Bug Summary

File:llvm/lib/Transforms/IPO/Attributor.cpp
Warning:line 4018, column 27
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Attributor.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/IPO -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-09-002921-48462-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an inter procedural pass that deduces and/or propagating
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Transforms/IPO/Attributor.h"
17
18#include "llvm/ADT/DepthFirstIterator.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CaptureTracking.h"
24#include "llvm/Analysis/EHPersonalities.h"
25#include "llvm/Analysis/GlobalsModRef.h"
26#include "llvm/Analysis/Loads.h"
27#include "llvm/Analysis/MemoryBuiltins.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/InstIterator.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Transforms/Utils/BasicBlockUtils.h"
38#include "llvm/Transforms/Utils/Local.h"
39
40#include <cassert>
41
42using namespace llvm;
43
44#define DEBUG_TYPE"attributor" "attributor"
45
46STATISTIC(NumFnWithExactDefinition,static llvm::Statistic NumFnWithExactDefinition = {"attributor"
, "NumFnWithExactDefinition", "Number of function with exact definitions"
}
47 "Number of function with exact definitions")static llvm::Statistic NumFnWithExactDefinition = {"attributor"
, "NumFnWithExactDefinition", "Number of function with exact definitions"
}
;
48STATISTIC(NumFnWithoutExactDefinition,static llvm::Statistic NumFnWithoutExactDefinition = {"attributor"
, "NumFnWithoutExactDefinition", "Number of function without exact definitions"
}
49 "Number of function without exact definitions")static llvm::Statistic NumFnWithoutExactDefinition = {"attributor"
, "NumFnWithoutExactDefinition", "Number of function without exact definitions"
}
;
50STATISTIC(NumAttributesTimedOut,static llvm::Statistic NumAttributesTimedOut = {"attributor",
"NumAttributesTimedOut", "Number of abstract attributes timed out before fixpoint"
}
51 "Number of abstract attributes timed out before fixpoint")static llvm::Statistic NumAttributesTimedOut = {"attributor",
"NumAttributesTimedOut", "Number of abstract attributes timed out before fixpoint"
}
;
52STATISTIC(NumAttributesValidFixpoint,static llvm::Statistic NumAttributesValidFixpoint = {"attributor"
, "NumAttributesValidFixpoint", "Number of abstract attributes in a valid fixpoint state"
}
53 "Number of abstract attributes in a valid fixpoint state")static llvm::Statistic NumAttributesValidFixpoint = {"attributor"
, "NumAttributesValidFixpoint", "Number of abstract attributes in a valid fixpoint state"
}
;
54STATISTIC(NumAttributesManifested,static llvm::Statistic NumAttributesManifested = {"attributor"
, "NumAttributesManifested", "Number of abstract attributes manifested in IR"
}
55 "Number of abstract attributes manifested in IR")static llvm::Statistic NumAttributesManifested = {"attributor"
, "NumAttributesManifested", "Number of abstract attributes manifested in IR"
}
;
56STATISTIC(NumAttributesFixedDueToRequiredDependences,static llvm::Statistic NumAttributesFixedDueToRequiredDependences
= {"attributor", "NumAttributesFixedDueToRequiredDependences"
, "Number of abstract attributes fixed due to required dependences"
}
57 "Number of abstract attributes fixed due to required dependences")static llvm::Statistic NumAttributesFixedDueToRequiredDependences
= {"attributor", "NumAttributesFixedDueToRequiredDependences"
, "Number of abstract attributes fixed due to required dependences"
}
;
58
59// Some helper macros to deal with statistics tracking.
60//
61// Usage:
62// For simple IR attribute tracking overload trackStatistics in the abstract
63// attribute and choose the right STATS_DECLTRACK_********* macro,
64// e.g.,:
65// void trackStatistics() const override {
66// STATS_DECLTRACK_ARG_ATTR(returned)
67// }
68// If there is a single "increment" side one can use the macro
69// STATS_DECLTRACK with a custom message. If there are multiple increment
70// sides, STATS_DECL and STATS_TRACK can also be used separatly.
71//
72#define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \
73 ("Number of " #TYPE " marked '" #NAME "'")
74#define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME
75#define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG};
76#define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
77 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};
;
78#define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME);
79#define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};; ++(NumIRTYPE_NAME); }
\
80 { \
81 STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME"
, MSG};;
\
82 STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \
83 }
84#define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
\
85 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor",
"NumIRArguments_NAME", ("Number of " "arguments" " marked '"
"NAME" "'")};; ++(NumIRArguments_NAME); }
86#define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
\
87 STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
88 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor"
, "NumIRCSArguments_NAME", ("Number of " "call site arguments"
" marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); }
89#define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
\
90 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME"
, ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME
); }
91#define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
\
92 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME"
, ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME
); }
93#define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
\
94 STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
95 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor"
, "NumIRFunctionReturn_NAME", ("Number of " "function returns"
" marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); }
96#define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
\
97 STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
98 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME"
, ("Number of " "call site returns" " marked '" "NAME" "'")};
; ++(NumIRCSReturn_NAME); }
99#define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" "NAME" "'")};; ++
(NumIRFloating_NAME); }
\
100 STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
101 ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME"
, ("Number of floating values known to be '" #NAME "'")};; ++
(NumIRFloating_NAME); }
102
103// TODO: Determine a good default value.
104//
105// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
106// (when run with the first 5 abstract attributes). The results also indicate
107// that we never reach 32 iterations but always find a fixpoint sooner.
108//
109// This will become more evolved once we perform two interleaved fixpoint
110// iterations: bottom-up and top-down.
111static cl::opt<unsigned>
112 MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
113 cl::desc("Maximal number of fixpoint iterations."),
114 cl::init(32));
115static cl::opt<bool> VerifyMaxFixpointIterations(
116 "attributor-max-iterations-verify", cl::Hidden,
117 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
118 cl::init(false));
119
120static cl::opt<bool> DisableAttributor(
121 "attributor-disable", cl::Hidden,
122 cl::desc("Disable the attributor inter-procedural deduction pass."),
123 cl::init(true));
124
125static cl::opt<bool> AnnotateDeclarationCallSites(
126 "attributor-annotate-decl-cs", cl::Hidden,
127 cl::desc("Annoate call sites of function declarations."), cl::init(false));
128
129static cl::opt<bool> ManifestInternal(
130 "attributor-manifest-internal", cl::Hidden,
131 cl::desc("Manifest Attributor internal string attributes."),
132 cl::init(false));
133
134static cl::opt<unsigned> DepRecInterval(
135 "attributor-dependence-recompute-interval", cl::Hidden,
136 cl::desc("Number of iterations until dependences are recomputed."),
137 cl::init(4));
138
139static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
140 cl::init(true), cl::Hidden);
141
142static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
143 cl::Hidden);
144
145/// Logic operators for the change status enum class.
146///
147///{
148ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
149 return l == ChangeStatus::CHANGED ? l : r;
150}
151ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
152 return l == ChangeStatus::UNCHANGED ? l : r;
153}
154///}
155
156/// Recursively visit all values that might become \p IRP at some point. This
157/// will be done by looking through cast instructions, selects, phis, and calls
158/// with the "returned" attribute. Once we cannot look through the value any
159/// further, the callback \p VisitValueCB is invoked and passed the current
160/// value, the \p State, and a flag to indicate if we stripped anything. To
161/// limit how much effort is invested, we will never visit more values than
162/// specified by \p MaxValues.
163template <typename AAType, typename StateTy>
164static bool genericValueTraversal(
165 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
166 const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
167 int MaxValues = 8) {
168
169 const AAIsDead *LivenessAA = nullptr;
170 if (IRP.getAnchorScope())
171 LivenessAA = &A.getAAFor<AAIsDead>(
172 QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
173 /* TrackDependence */ false);
174 bool AnyDead = false;
175
176 // TODO: Use Positions here to allow context sensitivity in VisitValueCB
177 SmallPtrSet<Value *, 16> Visited;
178 SmallVector<Value *, 16> Worklist;
179 Worklist.push_back(&IRP.getAssociatedValue());
180
181 int Iteration = 0;
182 do {
183 Value *V = Worklist.pop_back_val();
184
185 // Check if we should process the current value. To prevent endless
186 // recursion keep a record of the values we followed!
187 if (!Visited.insert(V).second)
188 continue;
189
190 // Make sure we limit the compile time for complex expressions.
191 if (Iteration++ >= MaxValues)
192 return false;
193
194 // Explicitly look through calls with a "returned" attribute if we do
195 // not have a pointer as stripPointerCasts only works on them.
196 Value *NewV = nullptr;
197 if (V->getType()->isPointerTy()) {
198 NewV = V->stripPointerCasts();
199 } else {
200 CallSite CS(V);
201 if (CS && CS.getCalledFunction()) {
202 for (Argument &Arg : CS.getCalledFunction()->args())
203 if (Arg.hasReturnedAttr()) {
204 NewV = CS.getArgOperand(Arg.getArgNo());
205 break;
206 }
207 }
208 }
209 if (NewV && NewV != V) {
210 Worklist.push_back(NewV);
211 continue;
212 }
213
214 // Look through select instructions, visit both potential values.
215 if (auto *SI = dyn_cast<SelectInst>(V)) {
216 Worklist.push_back(SI->getTrueValue());
217 Worklist.push_back(SI->getFalseValue());
218 continue;
219 }
220
221 // Look through phi nodes, visit all live operands.
222 if (auto *PHI = dyn_cast<PHINode>(V)) {
223 assert(LivenessAA &&((LivenessAA && "Expected liveness in the presence of instructions!"
) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 224, __PRETTY_FUNCTION__))
224 "Expected liveness in the presence of instructions!")((LivenessAA && "Expected liveness in the presence of instructions!"
) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 224, __PRETTY_FUNCTION__))
;
225 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
226 const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
227 if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
228 AnyDead = true;
229 continue;
230 }
231 Worklist.push_back(PHI->getIncomingValue(u));
232 }
233 continue;
234 }
235
236 // Once a leaf is reached we inform the user through the callback.
237 if (!VisitValueCB(*V, State, Iteration > 1))
238 return false;
239 } while (!Worklist.empty());
240
241 // If we actually used liveness information so we have to record a dependence.
242 if (AnyDead)
243 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
244
245 // All values have been visited.
246 return true;
247}
248
249/// Return true if \p New is equal or worse than \p Old.
250static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
251 if (!Old.isIntAttribute())
252 return true;
253
254 return Old.getValueAsInt() >= New.getValueAsInt();
255}
256
257/// Return true if the information provided by \p Attr was added to the
258/// attribute list \p Attrs. This is only the case if it was not already present
259/// in \p Attrs at the position describe by \p PK and \p AttrIdx.
260static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
261 AttributeList &Attrs, int AttrIdx) {
262
263 if (Attr.isEnumAttribute()) {
264 Attribute::AttrKind Kind = Attr.getKindAsEnum();
265 if (Attrs.hasAttribute(AttrIdx, Kind))
266 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
267 return false;
268 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
269 return true;
270 }
271 if (Attr.isStringAttribute()) {
272 StringRef Kind = Attr.getKindAsString();
273 if (Attrs.hasAttribute(AttrIdx, Kind))
274 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
275 return false;
276 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
277 return true;
278 }
279 if (Attr.isIntAttribute()) {
280 Attribute::AttrKind Kind = Attr.getKindAsEnum();
281 if (Attrs.hasAttribute(AttrIdx, Kind))
282 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
283 return false;
284 Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
285 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
286 return true;
287 }
288
289 llvm_unreachable("Expected enum or string attribute!")::llvm::llvm_unreachable_internal("Expected enum or string attribute!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 289)
;
290}
291static const Value *getPointerOperand(const Instruction *I) {
292 if (auto *LI = dyn_cast<LoadInst>(I))
293 if (!LI->isVolatile())
294 return LI->getPointerOperand();
295
296 if (auto *SI = dyn_cast<StoreInst>(I))
297 if (!SI->isVolatile())
298 return SI->getPointerOperand();
299
300 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
301 if (!CXI->isVolatile())
302 return CXI->getPointerOperand();
303
304 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
305 if (!RMWI->isVolatile())
306 return RMWI->getPointerOperand();
307
308 return nullptr;
309}
310static const Value *getBasePointerOfAccessPointerOperand(const Instruction *I,
311 int64_t &BytesOffset,
312 const DataLayout &DL) {
313 const Value *Ptr = getPointerOperand(I);
314 if (!Ptr)
315 return nullptr;
316
317 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
318 /*AllowNonInbounds*/ false);
319}
320
321ChangeStatus AbstractAttribute::update(Attributor &A) {
322 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
323 if (getState().isAtFixpoint())
324 return HasChanged;
325
326 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Update: " <<
*this << "\n"; } } while (false)
;
327
328 HasChanged = updateImpl(A);
329
330 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *thisdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Update " <<
HasChanged << " " << *this << "\n"; } } while
(false)
331 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Update " <<
HasChanged << " " << *this << "\n"; } } while
(false)
;
332
333 return HasChanged;
334}
335
336ChangeStatus
337IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
338 const ArrayRef<Attribute> &DeducedAttrs) {
339 Function *ScopeFn = IRP.getAssociatedFunction();
340 IRPosition::Kind PK = IRP.getPositionKind();
341
342 // In the following some generic code that will manifest attributes in
343 // DeducedAttrs if they improve the current IR. Due to the different
344 // annotation positions we use the underlying AttributeList interface.
345
346 AttributeList Attrs;
347 switch (PK) {
348 case IRPosition::IRP_INVALID:
349 case IRPosition::IRP_FLOAT:
350 return ChangeStatus::UNCHANGED;
351 case IRPosition::IRP_ARGUMENT:
352 case IRPosition::IRP_FUNCTION:
353 case IRPosition::IRP_RETURNED:
354 Attrs = ScopeFn->getAttributes();
355 break;
356 case IRPosition::IRP_CALL_SITE:
357 case IRPosition::IRP_CALL_SITE_RETURNED:
358 case IRPosition::IRP_CALL_SITE_ARGUMENT:
359 Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
360 break;
361 }
362
363 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
364 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
365 for (const Attribute &Attr : DeducedAttrs) {
366 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
367 continue;
368
369 HasChanged = ChangeStatus::CHANGED;
370 }
371
372 if (HasChanged == ChangeStatus::UNCHANGED)
373 return HasChanged;
374
375 switch (PK) {
376 case IRPosition::IRP_ARGUMENT:
377 case IRPosition::IRP_FUNCTION:
378 case IRPosition::IRP_RETURNED:
379 ScopeFn->setAttributes(Attrs);
380 break;
381 case IRPosition::IRP_CALL_SITE:
382 case IRPosition::IRP_CALL_SITE_RETURNED:
383 case IRPosition::IRP_CALL_SITE_ARGUMENT:
384 CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
385 break;
386 case IRPosition::IRP_INVALID:
387 case IRPosition::IRP_FLOAT:
388 break;
389 }
390
391 return HasChanged;
392}
393
394const IRPosition IRPosition::EmptyKey(255);
395const IRPosition IRPosition::TombstoneKey(256);
396
397SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
398 IRPositions.emplace_back(IRP);
399
400 ImmutableCallSite ICS(&IRP.getAnchorValue());
401 switch (IRP.getPositionKind()) {
402 case IRPosition::IRP_INVALID:
403 case IRPosition::IRP_FLOAT:
404 case IRPosition::IRP_FUNCTION:
405 return;
406 case IRPosition::IRP_ARGUMENT:
407 case IRPosition::IRP_RETURNED:
408 IRPositions.emplace_back(
409 IRPosition::function(*IRP.getAssociatedFunction()));
410 return;
411 case IRPosition::IRP_CALL_SITE:
412 assert(ICS && "Expected call site!")((ICS && "Expected call site!") ? static_cast<void
> (0) : __assert_fail ("ICS && \"Expected call site!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 412, __PRETTY_FUNCTION__))
;
413 // TODO: We need to look at the operand bundles similar to the redirection
414 // in CallBase.
415 if (!ICS.hasOperandBundles())
416 if (const Function *Callee = ICS.getCalledFunction())
417 IRPositions.emplace_back(IRPosition::function(*Callee));
418 return;
419 case IRPosition::IRP_CALL_SITE_RETURNED:
420 assert(ICS && "Expected call site!")((ICS && "Expected call site!") ? static_cast<void
> (0) : __assert_fail ("ICS && \"Expected call site!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 420, __PRETTY_FUNCTION__))
;
421 // TODO: We need to look at the operand bundles similar to the redirection
422 // in CallBase.
423 if (!ICS.hasOperandBundles()) {
424 if (const Function *Callee = ICS.getCalledFunction()) {
425 IRPositions.emplace_back(IRPosition::returned(*Callee));
426 IRPositions.emplace_back(IRPosition::function(*Callee));
427 }
428 }
429 IRPositions.emplace_back(
430 IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
431 return;
432 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
433 int ArgNo = IRP.getArgNo();
434 assert(ICS && ArgNo >= 0 && "Expected call site!")((ICS && ArgNo >= 0 && "Expected call site!"
) ? static_cast<void> (0) : __assert_fail ("ICS && ArgNo >= 0 && \"Expected call site!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 434, __PRETTY_FUNCTION__))
;
435 // TODO: We need to look at the operand bundles similar to the redirection
436 // in CallBase.
437 if (!ICS.hasOperandBundles()) {
438 const Function *Callee = ICS.getCalledFunction();
439 if (Callee && Callee->arg_size() > unsigned(ArgNo))
440 IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
441 if (Callee)
442 IRPositions.emplace_back(IRPosition::function(*Callee));
443 }
444 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
445 return;
446 }
447 }
448}
449
450bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
451 bool IgnoreSubsumingPositions) const {
452 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
453 for (Attribute::AttrKind AK : AKs)
454 if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
455 return true;
456 // The first position returned by the SubsumingPositionIterator is
457 // always the position itself. If we ignore subsuming positions we
458 // are done after the first iteration.
459 if (IgnoreSubsumingPositions)
460 break;
461 }
462 return false;
463}
464
465void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
466 SmallVectorImpl<Attribute> &Attrs) const {
467 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
468 for (Attribute::AttrKind AK : AKs) {
469 const Attribute &Attr = EquivIRP.getAttr(AK);
470 if (Attr.getKindAsEnum() == AK)
471 Attrs.push_back(Attr);
472 }
473}
474
475void IRPosition::verify() {
476 switch (KindOrArgNo) {
477 default:
478 assert(KindOrArgNo >= 0 && "Expected argument or call site argument!")((KindOrArgNo >= 0 && "Expected argument or call site argument!"
) ? static_cast<void> (0) : __assert_fail ("KindOrArgNo >= 0 && \"Expected argument or call site argument!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 478, __PRETTY_FUNCTION__))
;
479 assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&(((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal
)) && "Expected call base or argument for positive attribute index!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) && \"Expected call base or argument for positive attribute index!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 480, __PRETTY_FUNCTION__))
480 "Expected call base or argument for positive attribute index!")(((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal
)) && "Expected call base or argument for positive attribute index!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) && \"Expected call base or argument for positive attribute index!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 480, __PRETTY_FUNCTION__))
;
481 if (isa<Argument>(AnchorVal)) {
482 assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&((cast<Argument>(AnchorVal)->getArgNo() == unsigned(
getArgNo()) && "Argument number mismatch!") ? static_cast
<void> (0) : __assert_fail ("cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) && \"Argument number mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 483, __PRETTY_FUNCTION__))
483 "Argument number mismatch!")((cast<Argument>(AnchorVal)->getArgNo() == unsigned(
getArgNo()) && "Argument number mismatch!") ? static_cast
<void> (0) : __assert_fail ("cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) && \"Argument number mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 483, __PRETTY_FUNCTION__))
;
484 assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&((cast<Argument>(AnchorVal) == &getAssociatedValue(
) && "Associated value mismatch!") ? static_cast<void
> (0) : __assert_fail ("cast<Argument>(AnchorVal) == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 485, __PRETTY_FUNCTION__))
485 "Associated value mismatch!")((cast<Argument>(AnchorVal) == &getAssociatedValue(
) && "Associated value mismatch!") ? static_cast<void
> (0) : __assert_fail ("cast<Argument>(AnchorVal) == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 485, __PRETTY_FUNCTION__))
;
486 } else {
487 assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&((cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo
()) && "Call site argument number mismatch!") ? static_cast
<void> (0) : __assert_fail ("cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) && \"Call site argument number mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 488, __PRETTY_FUNCTION__))
488 "Call site argument number mismatch!")((cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo
()) && "Call site argument number mismatch!") ? static_cast
<void> (0) : __assert_fail ("cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) && \"Call site argument number mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 488, __PRETTY_FUNCTION__))
;
489 assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==((cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
&getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 491, __PRETTY_FUNCTION__))
490 &getAssociatedValue() &&((cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
&getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 491, __PRETTY_FUNCTION__))
491 "Associated value mismatch!")((cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
&getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 491, __PRETTY_FUNCTION__))
;
492 }
493 break;
494 case IRP_INVALID:
495 assert(!AnchorVal && "Expected no value for an invalid position!")((!AnchorVal && "Expected no value for an invalid position!"
) ? static_cast<void> (0) : __assert_fail ("!AnchorVal && \"Expected no value for an invalid position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 495, __PRETTY_FUNCTION__))
;
496 break;
497 case IRP_FLOAT:
498 assert((!isa<CallBase>(&getAssociatedValue()) &&(((!isa<CallBase>(&getAssociatedValue()) &&
!isa<Argument>(&getAssociatedValue())) && "Expected specialized kind for call base and argument values!"
) ? static_cast<void> (0) : __assert_fail ("(!isa<CallBase>(&getAssociatedValue()) && !isa<Argument>(&getAssociatedValue())) && \"Expected specialized kind for call base and argument values!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 500, __PRETTY_FUNCTION__))
499 !isa<Argument>(&getAssociatedValue())) &&(((!isa<CallBase>(&getAssociatedValue()) &&
!isa<Argument>(&getAssociatedValue())) && "Expected specialized kind for call base and argument values!"
) ? static_cast<void> (0) : __assert_fail ("(!isa<CallBase>(&getAssociatedValue()) && !isa<Argument>(&getAssociatedValue())) && \"Expected specialized kind for call base and argument values!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 500, __PRETTY_FUNCTION__))
500 "Expected specialized kind for call base and argument values!")(((!isa<CallBase>(&getAssociatedValue()) &&
!isa<Argument>(&getAssociatedValue())) && "Expected specialized kind for call base and argument values!"
) ? static_cast<void> (0) : __assert_fail ("(!isa<CallBase>(&getAssociatedValue()) && !isa<Argument>(&getAssociatedValue())) && \"Expected specialized kind for call base and argument values!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 500, __PRETTY_FUNCTION__))
;
501 break;
502 case IRP_RETURNED:
503 assert(isa<Function>(AnchorVal) &&((isa<Function>(AnchorVal) && "Expected function for a 'returned' position!"
) ? static_cast<void> (0) : __assert_fail ("isa<Function>(AnchorVal) && \"Expected function for a 'returned' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 504, __PRETTY_FUNCTION__))
504 "Expected function for a 'returned' position!")((isa<Function>(AnchorVal) && "Expected function for a 'returned' position!"
) ? static_cast<void> (0) : __assert_fail ("isa<Function>(AnchorVal) && \"Expected function for a 'returned' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 504, __PRETTY_FUNCTION__))
;
505 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!")((AnchorVal == &getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("AnchorVal == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 505, __PRETTY_FUNCTION__))
;
506 break;
507 case IRP_CALL_SITE_RETURNED:
508 assert((isa<CallBase>(AnchorVal)) &&(((isa<CallBase>(AnchorVal)) && "Expected call base for 'call site returned' position!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal)) && \"Expected call base for 'call site returned' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 509, __PRETTY_FUNCTION__))
509 "Expected call base for 'call site returned' position!")(((isa<CallBase>(AnchorVal)) && "Expected call base for 'call site returned' position!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal)) && \"Expected call base for 'call site returned' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 509, __PRETTY_FUNCTION__))
;
510 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!")((AnchorVal == &getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("AnchorVal == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 510, __PRETTY_FUNCTION__))
;
511 break;
512 case IRP_CALL_SITE:
513 assert((isa<CallBase>(AnchorVal)) &&(((isa<CallBase>(AnchorVal)) && "Expected call base for 'call site function' position!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal)) && \"Expected call base for 'call site function' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 514, __PRETTY_FUNCTION__))
514 "Expected call base for 'call site function' position!")(((isa<CallBase>(AnchorVal)) && "Expected call base for 'call site function' position!"
) ? static_cast<void> (0) : __assert_fail ("(isa<CallBase>(AnchorVal)) && \"Expected call base for 'call site function' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 514, __PRETTY_FUNCTION__))
;
515 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!")((AnchorVal == &getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("AnchorVal == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 515, __PRETTY_FUNCTION__))
;
516 break;
517 case IRP_FUNCTION:
518 assert(isa<Function>(AnchorVal) &&((isa<Function>(AnchorVal) && "Expected function for a 'function' position!"
) ? static_cast<void> (0) : __assert_fail ("isa<Function>(AnchorVal) && \"Expected function for a 'function' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 519, __PRETTY_FUNCTION__))
519 "Expected function for a 'function' position!")((isa<Function>(AnchorVal) && "Expected function for a 'function' position!"
) ? static_cast<void> (0) : __assert_fail ("isa<Function>(AnchorVal) && \"Expected function for a 'function' position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 519, __PRETTY_FUNCTION__))
;
520 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!")((AnchorVal == &getAssociatedValue() && "Associated value mismatch!"
) ? static_cast<void> (0) : __assert_fail ("AnchorVal == &getAssociatedValue() && \"Associated value mismatch!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 520, __PRETTY_FUNCTION__))
;
521 break;
522 }
523}
524
525namespace {
526/// Helper function to clamp a state \p S of type \p StateType with the
527/// information in \p R and indicate/return if \p S did change (as-in update is
528/// required to be run again).
529template <typename StateType>
530ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
531 auto Assumed = S.getAssumed();
532 S ^= R;
533 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
534 : ChangeStatus::CHANGED;
535}
536
537/// Clamp the information known for all returned values of a function
538/// (identified by \p QueryingAA) into \p S.
539template <typename AAType, typename StateType = typename AAType::StateType>
540static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
541 StateType &S) {
542 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
543 << static_cast<const AbstractAttribute &>(QueryingAA)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
544 << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp return value states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
;
545
546 assert((QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
547 IRPosition::IRP_RETURNED ||(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
548 QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
549 IRPosition::IRP_CALL_SITE_RETURNED) &&(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
550 "Can only clamp returned value states for a function returned or call "(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
551 "site returned position!")(((QueryingAA.getIRPosition().getPositionKind() == IRPosition
::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind(
) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call "
"site returned position!") ? static_cast<void> (0) : __assert_fail
("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 551, __PRETTY_FUNCTION__))
;
552
553 // Use an optional state as there might not be any return values and we want
554 // to join (IntegerState::operator&) the state of all there are.
555 Optional<StateType> T;
556
557 // Callback for each possibly returned value.
558 auto CheckReturnValue = [&](Value &RV) -> bool {
559 const IRPosition &RVPos = IRPosition::value(RV);
560 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
561 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
562 << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] RV: " <<
RV << " AA: " << AA.getAsStr() << " @ " <<
RVPos << "\n"; } } while (false)
;
563 const StateType &AAS = static_cast<const StateType &>(AA.getState());
564 if (T.hasValue())
565 *T &= AAS;
566 else
567 T = AAS;
568 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
569 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " RV State: " << T << "\n"; } } while
(false)
;
570 return T->isValidState();
571 };
572
573 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
574 S.indicatePessimisticFixpoint();
575 else if (T.hasValue())
576 S ^= *T;
577}
578
579/// Helper class to compose two generic deduction
580template <typename AAType, typename Base, typename StateType,
581 template <typename...> class F, template <typename...> class G>
582struct AAComposeTwoGenericDeduction
583 : public F<AAType, G<AAType, Base, StateType>, StateType> {
584 AAComposeTwoGenericDeduction(const IRPosition &IRP)
585 : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
586
587 /// See AbstractAttribute::updateImpl(...).
588 ChangeStatus updateImpl(Attributor &A) override {
589 ChangeStatus ChangedF =
590 F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
591 ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
592 return ChangedF | ChangedG;
593 }
594};
595
596/// Helper class for generic deduction: return value -> returned position.
597template <typename AAType, typename Base,
598 typename StateType = typename AAType::StateType>
599struct AAReturnedFromReturnedValues : public Base {
600 AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
601
602 /// See AbstractAttribute::updateImpl(...).
603 ChangeStatus updateImpl(Attributor &A) override {
604 StateType S;
605 clampReturnedValueStates<AAType, StateType>(A, *this, S);
606 // TODO: If we know we visited all returned values, thus no are assumed
607 // dead, we can take the known information from the state T.
608 return clampStateAndIndicateChange<StateType>(this->getState(), S);
609 }
610};
611
612/// Clamp the information known at all call sites for a given argument
613/// (identified by \p QueryingAA) into \p S.
614template <typename AAType, typename StateType = typename AAType::StateType>
615static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
616 StateType &S) {
617 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
618 << static_cast<const AbstractAttribute &>(QueryingAA)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
619 << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for "
<< static_cast<const AbstractAttribute &>(QueryingAA
) << " into " << S << "\n"; } } while (false
)
;
620
621 assert(QueryingAA.getIRPosition().getPositionKind() ==((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 623, __PRETTY_FUNCTION__))
622 IRPosition::IRP_ARGUMENT &&((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 623, __PRETTY_FUNCTION__))
623 "Can only clamp call site argument states for an argument position!")((QueryingAA.getIRPosition().getPositionKind() == IRPosition::
IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!"
) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 623, __PRETTY_FUNCTION__))
;
624
625 // Use an optional state as there might not be any return values and we want
626 // to join (IntegerState::operator&) the state of all there are.
627 Optional<StateType> T;
628
629 // The argument number which is also the call site argument number.
630 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
631
632 auto CallSiteCheck = [&](AbstractCallSite ACS) {
633 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
634 // Check if a coresponding argument was found or if it is on not associated
635 // (which can happen for callback calls).
636 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
637 return false;
638
639 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
640 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
641 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] ACS: " <<
*ACS.getInstruction() << " AA: " << AA.getAsStr(
) << " @" << ACSArgPos << "\n"; } } while (
false)
;
642 const StateType &AAS = static_cast<const StateType &>(AA.getState());
643 if (T.hasValue())
644 *T &= AAS;
645 else
646 T = AAS;
647 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
648 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] AA State: " <<
AAS << " CSA State: " << T << "\n"; } } while
(false)
;
649 return T->isValidState();
650 };
651
652 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
653 S.indicatePessimisticFixpoint();
654 else if (T.hasValue())
655 S ^= *T;
656}
657
658/// Helper class for generic deduction: call site argument -> argument position.
659template <typename AAType, typename Base,
660 typename StateType = typename AAType::StateType>
661struct AAArgumentFromCallSiteArguments : public Base {
662 AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
663
664 /// See AbstractAttribute::updateImpl(...).
665 ChangeStatus updateImpl(Attributor &A) override {
666 StateType S;
667 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
668 // TODO: If we know we visited all incoming values, thus no are assumed
669 // dead, we can take the known information from the state T.
670 return clampStateAndIndicateChange<StateType>(this->getState(), S);
671 }
672};
673
674/// Helper class for generic replication: function returned -> cs returned.
675template <typename AAType, typename Base,
676 typename StateType = typename AAType::StateType>
677struct AACallSiteReturnedFromReturned : public Base {
678 AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
679
680 /// See AbstractAttribute::updateImpl(...).
681 ChangeStatus updateImpl(Attributor &A) override {
682 assert(this->getIRPosition().getPositionKind() ==((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 685, __PRETTY_FUNCTION__))
683 IRPosition::IRP_CALL_SITE_RETURNED &&((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 685, __PRETTY_FUNCTION__))
684 "Can only wrap function returned positions for call site returned "((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 685, __PRETTY_FUNCTION__))
685 "positions!")((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED
&& "Can only wrap function returned positions for call site returned "
"positions!") ? static_cast<void> (0) : __assert_fail (
"this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 685, __PRETTY_FUNCTION__))
;
686 auto &S = this->getState();
687
688 const Function *AssociatedFunction =
689 this->getIRPosition().getAssociatedFunction();
690 if (!AssociatedFunction)
691 return S.indicatePessimisticFixpoint();
692
693 IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
694 const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
695 return clampStateAndIndicateChange(
696 S, static_cast<const typename AAType::StateType &>(AA.getState()));
697 }
698};
699
700/// Helper class for generic deduction using must-be-executed-context
701/// Base class is required to have `followUse` method.
702
703/// bool followUse(Attributor &A, const Use *U, const Instruction *I)
704/// U - Underlying use.
705/// I - The user of the \p U.
706/// `followUse` returns true if the value should be tracked transitively.
707
708template <typename AAType, typename Base,
709 typename StateType = typename AAType::StateType>
710struct AAFromMustBeExecutedContext : public Base {
711 AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
712
713 void initialize(Attributor &A) override {
714 Base::initialize(A);
715 const IRPosition &IRP = this->getIRPosition();
716 Instruction *CtxI = IRP.getCtxI();
717
718 if (!CtxI)
719 return;
720
721 for (const Use &U : IRP.getAssociatedValue().uses())
722 Uses.insert(&U);
723 }
724
725 /// See AbstractAttribute::updateImpl(...).
726 ChangeStatus updateImpl(Attributor &A) override {
727 auto BeforeState = this->getState();
728 auto &S = this->getState();
729 Instruction *CtxI = this->getIRPosition().getCtxI();
730 if (!CtxI)
731 return ChangeStatus::UNCHANGED;
732
733 MustBeExecutedContextExplorer &Explorer =
734 A.getInfoCache().getMustBeExecutedContextExplorer();
735
736 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
737 for (unsigned u = 0; u < Uses.size(); ++u) {
738 const Use *U = Uses[u];
739 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
740 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
741 if (Found && Base::followUse(A, U, UserI))
742 for (const Use &Us : UserI->uses())
743 Uses.insert(&Us);
744 }
745 }
746
747 return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
748 }
749
750private:
751 /// Container for (transitive) uses of the associated value.
752 SetVector<const Use *> Uses;
753};
754
755template <typename AAType, typename Base,
756 typename StateType = typename AAType::StateType>
757using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
758 AAComposeTwoGenericDeduction<AAType, Base, StateType,
759 AAFromMustBeExecutedContext,
760 AAArgumentFromCallSiteArguments>;
761
762template <typename AAType, typename Base,
763 typename StateType = typename AAType::StateType>
764using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
765 AAComposeTwoGenericDeduction<AAType, Base, StateType,
766 AAFromMustBeExecutedContext,
767 AACallSiteReturnedFromReturned>;
768
769/// -----------------------NoUnwind Function Attribute--------------------------
770
771struct AANoUnwindImpl : AANoUnwind {
772 AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
773
774 const std::string getAsStr() const override {
775 return getAssumed() ? "nounwind" : "may-unwind";
776 }
777
778 /// See AbstractAttribute::updateImpl(...).
779 ChangeStatus updateImpl(Attributor &A) override {
780 auto Opcodes = {
781 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
782 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
783 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
784
785 auto CheckForNoUnwind = [&](Instruction &I) {
786 if (!I.mayThrow())
787 return true;
788
789 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
790 const auto &NoUnwindAA =
791 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
792 return NoUnwindAA.isAssumedNoUnwind();
793 }
794 return false;
795 };
796
797 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
798 return indicatePessimisticFixpoint();
799
800 return ChangeStatus::UNCHANGED;
801 }
802};
803
804struct AANoUnwindFunction final : public AANoUnwindImpl {
805 AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
806
807 /// See AbstractAttribute::trackStatistics()
808 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor"
, "NumIRFunction_nounwind", ("Number of " "functions" " marked '"
"nounwind" "'")};; ++(NumIRFunction_nounwind); }
}
809};
810
811/// NoUnwind attribute deduction for a call sites.
812struct AANoUnwindCallSite final : AANoUnwindImpl {
813 AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
814
815 /// See AbstractAttribute::initialize(...).
816 void initialize(Attributor &A) override {
817 AANoUnwindImpl::initialize(A);
818 Function *F = getAssociatedFunction();
819 if (!F)
820 indicatePessimisticFixpoint();
821 }
822
823 /// See AbstractAttribute::updateImpl(...).
824 ChangeStatus updateImpl(Attributor &A) override {
825 // TODO: Once we have call site specific value information we can provide
826 // call site specific liveness information and then it makes
827 // sense to specialize attributes for call sites arguments instead of
828 // redirecting requests to the callee argument.
829 Function *F = getAssociatedFunction();
830 const IRPosition &FnPos = IRPosition::function(*F);
831 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
832 return clampStateAndIndicateChange(
833 getState(),
834 static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
835 }
836
837 /// See AbstractAttribute::trackStatistics()
838 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind"
, ("Number of " "call site" " marked '" "nounwind" "'")};; ++
(NumIRCS_nounwind); }
; }
839};
840
841/// --------------------- Function Return Values -------------------------------
842
843/// "Attribute" that collects all potential returned values and the return
844/// instructions that they arise from.
845///
846/// If there is a unique returned value R, the manifest method will:
847/// - mark R with the "returned" attribute, if R is an argument.
848class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
849
850 /// Mapping of values potentially returned by the associated function to the
851 /// return instructions that might return them.
852 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
853
854 /// Mapping to remember the number of returned values for a call site such
855 /// that we can avoid updates if nothing changed.
856 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
857
858 /// Set of unresolved calls returned by the associated function.
859 SmallSetVector<CallBase *, 4> UnresolvedCalls;
860
861 /// State flags
862 ///
863 ///{
864 bool IsFixed = false;
865 bool IsValidState = true;
866 ///}
867
868public:
869 AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
870
871 /// See AbstractAttribute::initialize(...).
872 void initialize(Attributor &A) override {
873 // Reset the state.
874 IsFixed = false;
875 IsValidState = true;
876 ReturnedValues.clear();
877
878 Function *F = getAssociatedFunction();
879 if (!F) {
880 indicatePessimisticFixpoint();
881 return;
882 }
883
884 // The map from instruction opcodes to those instructions in the function.
885 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
886
887 // Look through all arguments, if one is marked as returned we are done.
888 for (Argument &Arg : F->args()) {
889 if (Arg.hasReturnedAttr()) {
890 auto &ReturnInstSet = ReturnedValues[&Arg];
891 for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
892 ReturnInstSet.insert(cast<ReturnInst>(RI));
893
894 indicateOptimisticFixpoint();
895 return;
896 }
897 }
898
899 if (!F->hasExactDefinition())
900 indicatePessimisticFixpoint();
901 }
902
903 /// See AbstractAttribute::manifest(...).
904 ChangeStatus manifest(Attributor &A) override;
905
906 /// See AbstractAttribute::getState(...).
907 AbstractState &getState() override { return *this; }
908
909 /// See AbstractAttribute::getState(...).
910 const AbstractState &getState() const override { return *this; }
911
912 /// See AbstractAttribute::updateImpl(Attributor &A).
913 ChangeStatus updateImpl(Attributor &A) override;
914
915 llvm::iterator_range<iterator> returned_values() override {
916 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
917 }
918
919 llvm::iterator_range<const_iterator> returned_values() const override {
920 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
921 }
922
923 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
924 return UnresolvedCalls;
925 }
926
927 /// Return the number of potential return values, -1 if unknown.
928 size_t getNumReturnValues() const override {
929 return isValidState() ? ReturnedValues.size() : -1;
930 }
931
932 /// Return an assumed unique return value if a single candidate is found. If
933 /// there cannot be one, return a nullptr. If it is not clear yet, return the
934 /// Optional::NoneType.
935 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
936
937 /// See AbstractState::checkForAllReturnedValues(...).
938 bool checkForAllReturnedValuesAndReturnInsts(
939 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
940 &Pred) const override;
941
942 /// Pretty print the attribute similar to the IR representation.
943 const std::string getAsStr() const override;
944
945 /// See AbstractState::isAtFixpoint().
946 bool isAtFixpoint() const override { return IsFixed; }
947
948 /// See AbstractState::isValidState().
949 bool isValidState() const override { return IsValidState; }
950
951 /// See AbstractState::indicateOptimisticFixpoint(...).
952 ChangeStatus indicateOptimisticFixpoint() override {
953 IsFixed = true;
954 return ChangeStatus::UNCHANGED;
955 }
956
957 ChangeStatus indicatePessimisticFixpoint() override {
958 IsFixed = true;
959 IsValidState = false;
960 return ChangeStatus::CHANGED;
961 }
962};
963
964ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
965 ChangeStatus Changed = ChangeStatus::UNCHANGED;
966
967 // Bookkeeping.
968 assert(isValidState())((isValidState()) ? static_cast<void> (0) : __assert_fail
("isValidState()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 968, __PRETTY_FUNCTION__))
;
969 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
970 "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues
= {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values"
};; ++(NumIRFunctionReturn_KnownReturnValues); }
;
971
972 // Check if we have an assumed unique return value that we could manifest.
973 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
974
975 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
976 return Changed;
977
978 // Bookkeeping.
979 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
980 "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return"
};; ++(NumIRFunctionReturn_UniqueReturnValue); }
;
981
982 // Callback to replace the uses of CB with the constant C.
983 auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
984 if (CB.getNumUses() == 0 || CB.isMustTailCall())
985 return ChangeStatus::UNCHANGED;
986 CB.replaceAllUsesWith(&C);
987 return ChangeStatus::CHANGED;
988 };
989
990 // If the assumed unique return value is an argument, annotate it.
991 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
992 // TODO: This should be handled differently!
993 this->AnchorVal = UniqueRVArg;
994 this->KindOrArgNo = UniqueRVArg->getArgNo();
995 Changed = IRAttribute::manifest(A);
996 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
997 // We can replace the returned value with the unique returned constant.
998 Value &AnchorValue = getAnchorValue();
999 if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1000 for (const Use &U : F->uses())
1001 if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1002 if (CB->isCallee(&U)) {
1003 Constant *RVCCast =
1004 CB->getType() == RVC->getType()
1005 ? RVC
1006 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1007 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1008 }
1009 } else {
1010 assert(isa<CallBase>(AnchorValue) &&((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!"
) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1011, __PRETTY_FUNCTION__))
1011 "Expcected a function or call base anchor!")((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!"
) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1011, __PRETTY_FUNCTION__))
;
1012 Constant *RVCCast =
1013 AnchorValue.getType() == RVC->getType()
1014 ? RVC
1015 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1016 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1017 }
1018 if (Changed == ChangeStatus::CHANGED)
1019 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue"
, "Number of function returns replaced by constant return"};;
++(NumIRFunctionReturn_UniqueConstantReturnValue); }
1020 "Number of function returns replaced by constant return"){ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue
= {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue"
, "Number of function returns replaced by constant return"};;
++(NumIRFunctionReturn_UniqueConstantReturnValue); }
;
1021 }
1022
1023 return Changed;
1024}
1025
1026const std::string AAReturnedValuesImpl::getAsStr() const {
1027 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1028 (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1029 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1030}
1031
1032Optional<Value *>
1033AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1034 // If checkForAllReturnedValues provides a unique value, ignoring potential
1035 // undef values that can also be present, it is assumed to be the actual
1036 // return value and forwarded to the caller of this method. If there are
1037 // multiple, a nullptr is returned indicating there cannot be a unique
1038 // returned value.
1039 Optional<Value *> UniqueRV;
1040
1041 auto Pred = [&](Value &RV) -> bool {
1042 // If we found a second returned value and neither the current nor the saved
1043 // one is an undef, there is no unique returned value. Undefs are special
1044 // since we can pretend they have any value.
1045 if (UniqueRV.hasValue() && UniqueRV != &RV &&
1046 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1047 UniqueRV = nullptr;
1048 return false;
1049 }
1050
1051 // Do not overwrite a value with an undef.
1052 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1053 UniqueRV = &RV;
1054
1055 return true;
1056 };
1057
1058 if (!A.checkForAllReturnedValues(Pred, *this))
1059 UniqueRV = nullptr;
1060
1061 return UniqueRV;
1062}
1063
1064bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1065 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1066 &Pred) const {
1067 if (!isValidState())
1068 return false;
1069
1070 // Check all returned values but ignore call sites as long as we have not
1071 // encountered an overdefined one during an update.
1072 for (auto &It : ReturnedValues) {
1073 Value *RV = It.first;
1074
1075 CallBase *CB = dyn_cast<CallBase>(RV);
1076 if (CB && !UnresolvedCalls.count(CB))
1077 continue;
1078
1079 if (!Pred(*RV, It.second))
1080 return false;
1081 }
1082
1083 return true;
1084}
1085
1086ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1087 size_t NumUnresolvedCalls = UnresolvedCalls.size();
1088 bool Changed = false;
1089
1090 // State used in the value traversals starting in returned values.
1091 struct RVState {
1092 // The map in which we collect return values -> return instrs.
1093 decltype(ReturnedValues) &RetValsMap;
1094 // The flag to indicate a change.
1095 bool &Changed;
1096 // The return instrs we come from.
1097 SmallSetVector<ReturnInst *, 4> RetInsts;
1098 };
1099
1100 // Callback for a leaf value returned by the associated function.
1101 auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1102 auto Size = RVS.RetValsMap[&Val].size();
1103 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1104 bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1105 RVS.Changed |= Inserted;
1106 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1107 if (Inserted)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1108 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Valdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1109 << " => " << RVS.RetInsts.size() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
1110 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value "
<< Val << " => " << RVS.RetInsts.size()
<< "\n"; }; } } while (false)
;
1111 return true;
1112 };
1113
1114 // Helper method to invoke the generic value traversal.
1115 auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1116 IRPosition RetValPos = IRPosition::value(RV);
1117 return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1118 RVS, VisitValueCB);
1119 };
1120
1121 // Callback for all "return intructions" live in the associated function.
1122 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1123 ReturnInst &Ret = cast<ReturnInst>(I);
1124 RVState RVS({ReturnedValues, Changed, {}});
1125 RVS.RetInsts.insert(&Ret);
1126 return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1127 };
1128
1129 // Start by discovering returned values from all live returned instructions in
1130 // the associated function.
1131 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1132 return indicatePessimisticFixpoint();
1133
1134 // Once returned values "directly" present in the code are handled we try to
1135 // resolve returned calls.
1136 decltype(ReturnedValues) NewRVsMap;
1137 for (auto &It : ReturnedValues) {
1138 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.firstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Returned value: "
<< *It.first << " by #" << It.second.size(
) << " RIs\n"; } } while (false)
1139 << " by #" << It.second.size() << " RIs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Returned value: "
<< *It.first << " by #" << It.second.size(
) << " RIs\n"; } } while (false)
;
1140 CallBase *CB = dyn_cast<CallBase>(It.first);
1141 if (!CB || UnresolvedCalls.count(CB))
1142 continue;
1143
1144 if (!CB->getCalledFunction()) {
1145 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
1146 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
;
1147 UnresolvedCalls.insert(CB);
1148 continue;
1149 }
1150
1151 // TODO: use the function scope once we have call site AAReturnedValues.
1152 const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1153 *this, IRPosition::function(*CB->getCalledFunction()));
1154 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< static_cast<const AbstractAttribute &>(RetValAA
) << "\n"; } } while (false)
1155 << static_cast<const AbstractAttribute &>(RetValAA)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< static_cast<const AbstractAttribute &>(RetValAA
) << "\n"; } } while (false)
1156 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
<< static_cast<const AbstractAttribute &>(RetValAA
) << "\n"; } } while (false)
;
1157
1158 // Skip dead ends, thus if we do not know anything about the returned
1159 // call we mark it as unresolved and it will stay that way.
1160 if (!RetValAA.getState().isValidState()) {
1161 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
1162 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: "
<< *CB << "\n"; } } while (false)
;
1163 UnresolvedCalls.insert(CB);
1164 continue;
1165 }
1166
1167 // Do not try to learn partial information. If the callee has unresolved
1168 // return values we will treat the call as unresolved/opaque.
1169 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1170 if (!RetValAAUnresolvedCalls.empty()) {
1171 UnresolvedCalls.insert(CB);
1172 continue;
1173 }
1174
1175 // Now check if we can track transitively returned values. If possible, thus
1176 // if all return value can be represented in the current scope, do so.
1177 bool Unresolved = false;
1178 for (auto &RetValAAIt : RetValAA.returned_values()) {
1179 Value *RetVal = RetValAAIt.first;
1180 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1181 isa<Constant>(RetVal))
1182 continue;
1183 // Anything that did not fit in the above categories cannot be resolved,
1184 // mark the call as unresolved.
1185 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
1186 "cannot be translated: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
1187 << *RetVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value "
"cannot be translated: " << *RetVal << "\n"; } }
while (false)
;
1188 UnresolvedCalls.insert(CB);
1189 Unresolved = true;
1190 break;
1191 }
1192
1193 if (Unresolved)
1194 continue;
1195
1196 // Now track transitively returned values.
1197 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1198 if (NumRetAA == RetValAA.getNumReturnValues()) {
1199 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not "
"changed since it was seen last\n"; } } while (false)
1200 "changed since it was seen last\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not "
"changed since it was seen last\n"; } } while (false)
;
1201 continue;
1202 }
1203 NumRetAA = RetValAA.getNumReturnValues();
1204
1205 for (auto &RetValAAIt : RetValAA.returned_values()) {
1206 Value *RetVal = RetValAAIt.first;
1207 if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1208 // Arguments are mapped to call site operands and we begin the traversal
1209 // again.
1210 bool Unused = false;
1211 RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1212 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1213 continue;
1214 } else if (isa<CallBase>(RetVal)) {
1215 // Call sites are resolved by the callee attribute over time, no need to
1216 // do anything for us.
1217 continue;
1218 } else if (isa<Constant>(RetVal)) {
1219 // Constants are valid everywhere, we can simply take them.
1220 NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1221 continue;
1222 }
1223 }
1224 }
1225
1226 // To avoid modifications to the ReturnedValues map while we iterate over it
1227 // we kept record of potential new entries in a copy map, NewRVsMap.
1228 for (auto &It : NewRVsMap) {
1229 assert(!It.second.empty() && "Entry does not add anything.")((!It.second.empty() && "Entry does not add anything."
) ? static_cast<void> (0) : __assert_fail ("!It.second.empty() && \"Entry does not add anything.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1229, __PRETTY_FUNCTION__))
;
1230 auto &ReturnInsts = ReturnedValues[It.first];
1231 for (ReturnInst *RI : It.second)
1232 if (ReturnInsts.insert(RI)) {
1233 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value "
<< *It.first << " => " << *RI << "\n"
; } } while (false)
1234 << *It.first << " => " << *RI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value "
<< *It.first << " => " << *RI << "\n"
; } } while (false)
;
1235 Changed = true;
1236 }
1237 }
1238
1239 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1240 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1241}
1242
1243struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1244 AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1245
1246 /// See AbstractAttribute::trackStatistics()
1247 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor"
, "NumIRArguments_returned", ("Number of " "arguments" " marked '"
"returned" "'")};; ++(NumIRArguments_returned); }
}
1248};
1249
1250/// Returned values information for a call sites.
1251struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1252 AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1253
1254 /// See AbstractAttribute::initialize(...).
1255 void initialize(Attributor &A) override {
1256 // TODO: Once we have call site specific value information we can provide
1257 // call site specific liveness information and then it makes
1258 // sense to specialize attributes for call sites instead of
1259 // redirecting requests to the callee.
1260 llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1261)
1261 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1261)
;
1262 }
1263
1264 /// See AbstractAttribute::updateImpl(...).
1265 ChangeStatus updateImpl(Attributor &A) override {
1266 return indicatePessimisticFixpoint();
1267 }
1268
1269 /// See AbstractAttribute::trackStatistics()
1270 void trackStatistics() const override {}
1271};
1272
1273/// ------------------------ NoSync Function Attribute -------------------------
1274
1275struct AANoSyncImpl : AANoSync {
1276 AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1277
1278 const std::string getAsStr() const override {
1279 return getAssumed() ? "nosync" : "may-sync";
1280 }
1281
1282 /// See AbstractAttribute::updateImpl(...).
1283 ChangeStatus updateImpl(Attributor &A) override;
1284
1285 /// Helper function used to determine whether an instruction is non-relaxed
1286 /// atomic. In other words, if an atomic instruction does not have unordered
1287 /// or monotonic ordering
1288 static bool isNonRelaxedAtomic(Instruction *I);
1289
1290 /// Helper function used to determine whether an instruction is volatile.
1291 static bool isVolatile(Instruction *I);
1292
1293 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1294 /// memset).
1295 static bool isNoSyncIntrinsic(Instruction *I);
1296};
1297
1298bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1299 if (!I->isAtomic())
1300 return false;
1301
1302 AtomicOrdering Ordering;
1303 switch (I->getOpcode()) {
1304 case Instruction::AtomicRMW:
1305 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1306 break;
1307 case Instruction::Store:
1308 Ordering = cast<StoreInst>(I)->getOrdering();
1309 break;
1310 case Instruction::Load:
1311 Ordering = cast<LoadInst>(I)->getOrdering();
1312 break;
1313 case Instruction::Fence: {
1314 auto *FI = cast<FenceInst>(I);
1315 if (FI->getSyncScopeID() == SyncScope::SingleThread)
1316 return false;
1317 Ordering = FI->getOrdering();
1318 break;
1319 }
1320 case Instruction::AtomicCmpXchg: {
1321 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1322 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1323 // Only if both are relaxed, than it can be treated as relaxed.
1324 // Otherwise it is non-relaxed.
1325 if (Success != AtomicOrdering::Unordered &&
1326 Success != AtomicOrdering::Monotonic)
1327 return true;
1328 if (Failure != AtomicOrdering::Unordered &&
1329 Failure != AtomicOrdering::Monotonic)
1330 return true;
1331 return false;
1332 }
1333 default:
1334 llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1335)
1335 "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor."
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1335)
;
1336 }
1337
1338 // Relaxed.
1339 if (Ordering == AtomicOrdering::Unordered ||
1340 Ordering == AtomicOrdering::Monotonic)
1341 return false;
1342 return true;
1343}
1344
1345/// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1346/// FIXME: We should ipmrove the handling of intrinsics.
1347bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1348 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1349 switch (II->getIntrinsicID()) {
1350 /// Element wise atomic memory intrinsics are can only be unordered,
1351 /// therefore nosync.
1352 case Intrinsic::memset_element_unordered_atomic:
1353 case Intrinsic::memmove_element_unordered_atomic:
1354 case Intrinsic::memcpy_element_unordered_atomic:
1355 return true;
1356 case Intrinsic::memset:
1357 case Intrinsic::memmove:
1358 case Intrinsic::memcpy:
1359 if (!cast<MemIntrinsic>(II)->isVolatile())
1360 return true;
1361 return false;
1362 default:
1363 return false;
1364 }
1365 }
1366 return false;
1367}
1368
1369bool AANoSyncImpl::isVolatile(Instruction *I) {
1370 assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&((!ImmutableCallSite(I) && !isa<CallBase>(I) &&
"Calls should not be checked here") ? static_cast<void>
(0) : __assert_fail ("!ImmutableCallSite(I) && !isa<CallBase>(I) && \"Calls should not be checked here\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1371, __PRETTY_FUNCTION__))
1371 "Calls should not be checked here")((!ImmutableCallSite(I) && !isa<CallBase>(I) &&
"Calls should not be checked here") ? static_cast<void>
(0) : __assert_fail ("!ImmutableCallSite(I) && !isa<CallBase>(I) && \"Calls should not be checked here\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1371, __PRETTY_FUNCTION__))
;
1372
1373 switch (I->getOpcode()) {
1374 case Instruction::AtomicRMW:
1375 return cast<AtomicRMWInst>(I)->isVolatile();
1376 case Instruction::Store:
1377 return cast<StoreInst>(I)->isVolatile();
1378 case Instruction::Load:
1379 return cast<LoadInst>(I)->isVolatile();
1380 case Instruction::AtomicCmpXchg:
1381 return cast<AtomicCmpXchgInst>(I)->isVolatile();
1382 default:
1383 return false;
1384 }
1385}
1386
1387ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1388
1389 auto CheckRWInstForNoSync = [&](Instruction &I) {
1390 /// We are looking for volatile instructions or Non-Relaxed atomics.
1391 /// FIXME: We should ipmrove the handling of intrinsics.
1392
1393 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1394 return true;
1395
1396 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1397 if (ICS.hasFnAttr(Attribute::NoSync))
1398 return true;
1399
1400 const auto &NoSyncAA =
1401 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1402 if (NoSyncAA.isAssumedNoSync())
1403 return true;
1404 return false;
1405 }
1406
1407 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1408 return true;
1409
1410 return false;
1411 };
1412
1413 auto CheckForNoSync = [&](Instruction &I) {
1414 // At this point we handled all read/write effects and they are all
1415 // nosync, so they can be skipped.
1416 if (I.mayReadOrWriteMemory())
1417 return true;
1418
1419 // non-convergent and readnone imply nosync.
1420 return !ImmutableCallSite(&I).isConvergent();
1421 };
1422
1423 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1424 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1425 return indicatePessimisticFixpoint();
1426
1427 return ChangeStatus::UNCHANGED;
1428}
1429
1430struct AANoSyncFunction final : public AANoSyncImpl {
1431 AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1432
1433 /// See AbstractAttribute::trackStatistics()
1434 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor"
, "NumIRFunction_nosync", ("Number of " "functions" " marked '"
"nosync" "'")};; ++(NumIRFunction_nosync); }
}
1435};
1436
1437/// NoSync attribute deduction for a call sites.
1438struct AANoSyncCallSite final : AANoSyncImpl {
1439 AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1440
1441 /// See AbstractAttribute::initialize(...).
1442 void initialize(Attributor &A) override {
1443 AANoSyncImpl::initialize(A);
1444 Function *F = getAssociatedFunction();
1445 if (!F)
1446 indicatePessimisticFixpoint();
1447 }
1448
1449 /// See AbstractAttribute::updateImpl(...).
1450 ChangeStatus updateImpl(Attributor &A) override {
1451 // TODO: Once we have call site specific value information we can provide
1452 // call site specific liveness information and then it makes
1453 // sense to specialize attributes for call sites arguments instead of
1454 // redirecting requests to the callee argument.
1455 Function *F = getAssociatedFunction();
1456 const IRPosition &FnPos = IRPosition::function(*F);
1457 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1458 return clampStateAndIndicateChange(
1459 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1460 }
1461
1462 /// See AbstractAttribute::trackStatistics()
1463 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync"
, ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync
); }
; }
1464};
1465
1466/// ------------------------ No-Free Attributes ----------------------------
1467
1468struct AANoFreeImpl : public AANoFree {
1469 AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1470
1471 /// See AbstractAttribute::updateImpl(...).
1472 ChangeStatus updateImpl(Attributor &A) override {
1473 auto CheckForNoFree = [&](Instruction &I) {
1474 ImmutableCallSite ICS(&I);
1475 if (ICS.hasFnAttr(Attribute::NoFree))
1476 return true;
1477
1478 const auto &NoFreeAA =
1479 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1480 return NoFreeAA.isAssumedNoFree();
1481 };
1482
1483 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1484 return indicatePessimisticFixpoint();
1485 return ChangeStatus::UNCHANGED;
1486 }
1487
1488 /// See AbstractAttribute::getAsStr().
1489 const std::string getAsStr() const override {
1490 return getAssumed() ? "nofree" : "may-free";
1491 }
1492};
1493
1494struct AANoFreeFunction final : public AANoFreeImpl {
1495 AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1496
1497 /// See AbstractAttribute::trackStatistics()
1498 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor"
, "NumIRFunction_nofree", ("Number of " "functions" " marked '"
"nofree" "'")};; ++(NumIRFunction_nofree); }
}
1499};
1500
1501/// NoFree attribute deduction for a call sites.
1502struct AANoFreeCallSite final : AANoFreeImpl {
1503 AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1504
1505 /// See AbstractAttribute::initialize(...).
1506 void initialize(Attributor &A) override {
1507 AANoFreeImpl::initialize(A);
1508 Function *F = getAssociatedFunction();
1509 if (!F)
1510 indicatePessimisticFixpoint();
1511 }
1512
1513 /// See AbstractAttribute::updateImpl(...).
1514 ChangeStatus updateImpl(Attributor &A) override {
1515 // TODO: Once we have call site specific value information we can provide
1516 // call site specific liveness information and then it makes
1517 // sense to specialize attributes for call sites arguments instead of
1518 // redirecting requests to the callee argument.
1519 Function *F = getAssociatedFunction();
1520 const IRPosition &FnPos = IRPosition::function(*F);
1521 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1522 return clampStateAndIndicateChange(
1523 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1524 }
1525
1526 /// See AbstractAttribute::trackStatistics()
1527 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree"
, ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree
); }
; }
1528};
1529
1530/// NoFree attribute for floating values.
1531struct AANoFreeFloating : AANoFreeImpl {
1532 AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1533
1534 /// See AbstractAttribute::trackStatistics()
1535 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor"
, "NumIRFloating_nofree", ("Number of floating values known to be '"
"nofree" "'")};; ++(NumIRFloating_nofree); }
}
1536
1537 /// See Abstract Attribute::updateImpl(...).
1538 ChangeStatus updateImpl(Attributor &A) override {
1539 const IRPosition &IRP = getIRPosition();
1540 Function *F = IRP.getAnchorScope();
1541
1542 const AAIsDead &LivenessAA =
1543 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F));
1544
1545 const auto &NoFreeAA =
1546 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1547 if (NoFreeAA.isAssumedNoFree())
1548 return ChangeStatus::UNCHANGED;
1549
1550 SmallPtrSet<const Use *, 8> Visited;
1551 SmallVector<const Use *, 8> Worklist;
1552
1553 Value &AssociatedValue = getIRPosition().getAssociatedValue();
1554 for (Use &U : AssociatedValue.uses())
1555 Worklist.push_back(&U);
1556
1557 while (!Worklist.empty()) {
1558 const Use *U = Worklist.pop_back_val();
1559 if (!Visited.insert(U).second)
1560 continue;
1561
1562 auto *UserI = U->getUser();
1563 if (!UserI)
1564 continue;
1565
1566 if (LivenessAA.isAssumedDead(cast<Instruction>(UserI)))
1567 continue;
1568
1569 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1570 if (CB->isBundleOperand(U))
1571 return indicatePessimisticFixpoint();
1572 if (!CB->isArgOperand(U))
1573 continue;
1574
1575 unsigned ArgNo = CB->getArgOperandNo(U);
1576
1577 const auto &NoFreeArg = A.getAAFor<AANoFree>(
1578 *this, IRPosition::callsite_argument(*CB, ArgNo));
1579
1580 if (NoFreeArg.isAssumedNoFree())
1581 continue;
1582
1583 return indicatePessimisticFixpoint();
1584 }
1585
1586 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1587 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1588 for (Use &U : UserI->uses())
1589 Worklist.push_back(&U);
1590 continue;
1591 }
1592
1593 // Unknown user.
1594 return indicatePessimisticFixpoint();
1595 }
1596 return ChangeStatus::UNCHANGED;
1597 }
1598};
1599
1600/// NoFree attribute for a call site argument.
1601struct AANoFreeArgument final : AANoFreeFloating {
1602 AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1603
1604 /// See AbstractAttribute::trackStatistics()
1605 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor"
, "NumIRArguments_nofree", ("Number of " "arguments" " marked '"
"nofree" "'")};; ++(NumIRArguments_nofree); }
}
1606};
1607
1608/// NoFree attribute for call site arguments.
1609struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1610 AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1611
1612 /// See AbstractAttribute::updateImpl(...).
1613 ChangeStatus updateImpl(Attributor &A) override {
1614 // TODO: Once we have call site specific value information we can provide
1615 // call site specific liveness information and then it makes
1616 // sense to specialize attributes for call sites arguments instead of
1617 // redirecting requests to the callee argument.
1618 Argument *Arg = getAssociatedArgument();
1619 if (!Arg)
1620 return indicatePessimisticFixpoint();
1621 const IRPosition &ArgPos = IRPosition::argument(*Arg);
1622 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1623 return clampStateAndIndicateChange(
1624 getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1625 }
1626
1627 /// See AbstractAttribute::trackStatistics()
1628 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor"
, "NumIRCSArguments_nofree", ("Number of " "call site arguments"
" marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }
};
1629};
1630
1631/// NoFree attribute for function return value.
1632struct AANoFreeReturned final : AANoFreeFloating {
1633 AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1634 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1634)
;
1635 }
1636
1637 /// See AbstractAttribute::initialize(...).
1638 void initialize(Attributor &A) override {
1639 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1639)
;
1640 }
1641
1642 /// See AbstractAttribute::updateImpl(...).
1643 ChangeStatus updateImpl(Attributor &A) override {
1644 llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 1644)
;
1645 }
1646
1647 /// See AbstractAttribute::trackStatistics()
1648 void trackStatistics() const override {}
1649};
1650
1651/// NoFree attribute deduction for a call site return value.
1652struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1653 AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1654
1655 ChangeStatus manifest(Attributor &A) override {
1656 return ChangeStatus::UNCHANGED;
1657 }
1658 /// See AbstractAttribute::trackStatistics()
1659 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor"
, "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '"
"nofree" "'")};; ++(NumIRCSReturn_nofree); }
}
1660};
1661
1662/// ------------------------ NonNull Argument Attribute ------------------------
1663static int64_t getKnownNonNullAndDerefBytesForUse(
1664 Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1665 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1666 TrackUse = false;
1667
1668 const Value *UseV = U->get();
1669 if (!UseV->getType()->isPointerTy())
1670 return 0;
1671
1672 Type *PtrTy = UseV->getType();
1673 const Function *F = I->getFunction();
1674 bool NullPointerIsDefined =
1675 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1676 const DataLayout &DL = A.getInfoCache().getDL();
1677 if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1678 if (ICS.isBundleOperand(U))
1679 return 0;
1680
1681 if (ICS.isCallee(U)) {
1682 IsNonNull |= !NullPointerIsDefined;
1683 return 0;
1684 }
1685
1686 unsigned ArgNo = ICS.getArgumentNo(U);
1687 IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1688 // As long as we only use known information there is no need to track
1689 // dependences here.
1690 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1691 /* TrackDependence */ false);
1692 IsNonNull |= DerefAA.isKnownNonNull();
1693 return DerefAA.getKnownDereferenceableBytes();
1694 }
1695
1696 // We need to follow common pointer manipulation uses to the accesses they
1697 // feed into. We can try to be smart to avoid looking through things we do not
1698 // like for now, e.g., non-inbounds GEPs.
1699 if (isa<CastInst>(I)) {
1700 TrackUse = true;
1701 return 0;
1702 }
1703 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1704 if (GEP->hasAllZeroIndices() ||
1705 (GEP->isInBounds() && GEP->hasAllConstantIndices())) {
1706 TrackUse = true;
1707 return 0;
1708 }
1709
1710 int64_t Offset;
1711 if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1712 if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1713 int64_t DerefBytes =
1714 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1715
1716 IsNonNull |= !NullPointerIsDefined;
1717 return std::max(int64_t(0), DerefBytes);
1718 }
1719 }
1720 if (const Value *Base =
1721 GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1722 /*AllowNonInbounds*/ false)) {
1723 if (Base == &AssociatedValue) {
1724 // As long as we only use known information there is no need to track
1725 // dependences here.
1726 auto &DerefAA = A.getAAFor<AADereferenceable>(
1727 QueryingAA, IRPosition::value(*Base), /* TrackDependence */ false);
1728 IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1729 IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1730 int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1731 return std::max(int64_t(0), DerefBytes - std::max(int64_t(0), Offset));
1732 }
1733 }
1734
1735 return 0;
1736}
1737
1738struct AANonNullImpl : AANonNull {
1739 AANonNullImpl(const IRPosition &IRP)
1740 : AANonNull(IRP),
1741 NullIsDefined(NullPointerIsDefined(
1742 getAnchorScope(),
1743 getAssociatedValue().getType()->getPointerAddressSpace())) {}
1744
1745 /// See AbstractAttribute::initialize(...).
1746 void initialize(Attributor &A) override {
1747 if (!NullIsDefined &&
1748 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1749 indicateOptimisticFixpoint();
1750 else if (isa<ConstantPointerNull>(getAssociatedValue()))
1751 indicatePessimisticFixpoint();
1752 else
1753 AANonNull::initialize(A);
1754 }
1755
1756 /// See AAFromMustBeExecutedContext
1757 bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1758 bool IsNonNull = false;
1759 bool TrackUse = false;
1760 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1761 IsNonNull, TrackUse);
1762 setKnown(IsNonNull);
1763 return TrackUse;
1764 }
1765
1766 /// See AbstractAttribute::getAsStr().
1767 const std::string getAsStr() const override {
1768 return getAssumed() ? "nonnull" : "may-null";
1769 }
1770
1771 /// Flag to determine if the underlying value can be null and still allow
1772 /// valid accesses.
1773 const bool NullIsDefined;
1774};
1775
1776/// NonNull attribute for a floating value.
1777struct AANonNullFloating
1778 : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1779 using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1780 AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1781
1782 /// See AbstractAttribute::updateImpl(...).
1783 ChangeStatus updateImpl(Attributor &A) override {
1784 ChangeStatus Change = Base::updateImpl(A);
1785 if (isKnownNonNull())
1786 return Change;
1787
1788 if (!NullIsDefined) {
1789 const auto &DerefAA =
1790 A.getAAFor<AADereferenceable>(*this, getIRPosition());
1791 if (DerefAA.getAssumedDereferenceableBytes())
1792 return Change;
1793 }
1794
1795 const DataLayout &DL = A.getDataLayout();
1796
1797 DominatorTree *DT = nullptr;
1798 InformationCache &InfoCache = A.getInfoCache();
1799 if (const Function *Fn = getAnchorScope())
1800 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1801
1802 auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1803 bool Stripped) -> bool {
1804 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1805 if (!Stripped && this == &AA) {
1806 if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1807 T.indicatePessimisticFixpoint();
1808 } else {
1809 // Use abstract attribute information.
1810 const AANonNull::StateType &NS =
1811 static_cast<const AANonNull::StateType &>(AA.getState());
1812 T ^= NS;
1813 }
1814 return T.isValidState();
1815 };
1816
1817 StateType T;
1818 if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1819 T, VisitValueCB))
1820 return indicatePessimisticFixpoint();
1821
1822 return clampStateAndIndicateChange(getState(), T);
1823 }
1824
1825 /// See AbstractAttribute::trackStatistics()
1826 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
1827};
1828
1829/// NonNull attribute for function return value.
1830struct AANonNullReturned final
1831 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1832 AANonNullReturned(const IRPosition &IRP)
1833 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1834
1835 /// See AbstractAttribute::trackStatistics()
1836 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor"
, "NumIRFunctionReturn_nonnull", ("Number of " "function returns"
" marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull
); }
}
1837};
1838
1839/// NonNull attribute for function argument.
1840struct AANonNullArgument final
1841 : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1842 AANonNullImpl> {
1843 AANonNullArgument(const IRPosition &IRP)
1844 : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1845 AANonNullImpl>(
1846 IRP) {}
1847
1848 /// See AbstractAttribute::trackStatistics()
1849 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor"
, "NumIRArguments_nonnull", ("Number of " "arguments" " marked '"
"nonnull" "'")};; ++(NumIRArguments_nonnull); }
}
1850};
1851
1852struct AANonNullCallSiteArgument final : AANonNullFloating {
1853 AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1854
1855 /// See AbstractAttribute::trackStatistics()
1856 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor"
, "NumIRCSArguments_nonnull", ("Number of " "call site arguments"
" marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); }
}
1857};
1858
1859/// NonNull attribute for a call site return position.
1860struct AANonNullCallSiteReturned final
1861 : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1862 AANonNullImpl> {
1863 AANonNullCallSiteReturned(const IRPosition &IRP)
1864 : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1865 AANonNullImpl>(
1866 IRP) {}
1867
1868 /// See AbstractAttribute::trackStatistics()
1869 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor"
, "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '"
"nonnull" "'")};; ++(NumIRCSReturn_nonnull); }
}
1870};
1871
1872/// ------------------------ No-Recurse Attributes ----------------------------
1873
1874struct AANoRecurseImpl : public AANoRecurse {
1875 AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1876
1877 /// See AbstractAttribute::getAsStr()
1878 const std::string getAsStr() const override {
1879 return getAssumed() ? "norecurse" : "may-recurse";
1880 }
1881};
1882
1883struct AANoRecurseFunction final : AANoRecurseImpl {
1884 AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1885
1886 /// See AbstractAttribute::initialize(...).
1887 void initialize(Attributor &A) override {
1888 AANoRecurseImpl::initialize(A);
1889 if (const Function *F = getAnchorScope())
1890 if (A.getInfoCache().getSccSize(*F) == 1)
1891 return;
1892 indicatePessimisticFixpoint();
1893 }
1894
1895 /// See AbstractAttribute::updateImpl(...).
1896 ChangeStatus updateImpl(Attributor &A) override {
1897
1898 auto CheckForNoRecurse = [&](Instruction &I) {
1899 ImmutableCallSite ICS(&I);
1900 if (ICS.hasFnAttr(Attribute::NoRecurse))
1901 return true;
1902
1903 const auto &NoRecurseAA =
1904 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1905 if (!NoRecurseAA.isAssumedNoRecurse())
1906 return false;
1907
1908 // Recursion to the same function
1909 if (ICS.getCalledFunction() == getAnchorScope())
1910 return false;
1911
1912 return true;
1913 };
1914
1915 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1916 return indicatePessimisticFixpoint();
1917 return ChangeStatus::UNCHANGED;
1918 }
1919
1920 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor"
, "NumIRFunction_norecurse", ("Number of " "functions" " marked '"
"norecurse" "'")};; ++(NumIRFunction_norecurse); }
}
1921};
1922
1923/// NoRecurse attribute deduction for a call sites.
1924struct AANoRecurseCallSite final : AANoRecurseImpl {
1925 AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1926
1927 /// See AbstractAttribute::initialize(...).
1928 void initialize(Attributor &A) override {
1929 AANoRecurseImpl::initialize(A);
1930 Function *F = getAssociatedFunction();
1931 if (!F)
1932 indicatePessimisticFixpoint();
1933 }
1934
1935 /// See AbstractAttribute::updateImpl(...).
1936 ChangeStatus updateImpl(Attributor &A) override {
1937 // TODO: Once we have call site specific value information we can provide
1938 // call site specific liveness information and then it makes
1939 // sense to specialize attributes for call sites arguments instead of
1940 // redirecting requests to the callee argument.
1941 Function *F = getAssociatedFunction();
1942 const IRPosition &FnPos = IRPosition::function(*F);
1943 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1944 return clampStateAndIndicateChange(
1945 getState(),
1946 static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1947 }
1948
1949 /// See AbstractAttribute::trackStatistics()
1950 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse"
, ("Number of " "call site" " marked '" "norecurse" "'")};; ++
(NumIRCS_norecurse); }
; }
1951};
1952
1953/// ------------------------ Will-Return Attributes ----------------------------
1954
1955// Helper function that checks whether a function has any cycle.
1956// TODO: Replace with more efficent code
1957static bool containsCycle(Function &F) {
1958 SmallPtrSet<BasicBlock *, 32> Visited;
1959
1960 // Traverse BB by dfs and check whether successor is already visited.
1961 for (BasicBlock *BB : depth_first(&F)) {
1962 Visited.insert(BB);
1963 for (auto *SuccBB : successors(BB)) {
1964 if (Visited.count(SuccBB))
1965 return true;
1966 }
1967 }
1968 return false;
1969}
1970
1971// Helper function that checks the function have a loop which might become an
1972// endless loop
1973// FIXME: Any cycle is regarded as endless loop for now.
1974// We have to allow some patterns.
1975static bool containsPossiblyEndlessLoop(Function *F) {
1976 return !F || !F->hasExactDefinition() || containsCycle(*F);
1977}
1978
1979struct AAWillReturnImpl : public AAWillReturn {
1980 AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1981
1982 /// See AbstractAttribute::initialize(...).
1983 void initialize(Attributor &A) override {
1984 AAWillReturn::initialize(A);
1985
1986 Function *F = getAssociatedFunction();
1987 if (containsPossiblyEndlessLoop(F))
1988 indicatePessimisticFixpoint();
1989 }
1990
1991 /// See AbstractAttribute::updateImpl(...).
1992 ChangeStatus updateImpl(Attributor &A) override {
1993 auto CheckForWillReturn = [&](Instruction &I) {
1994 IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1995 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1996 if (WillReturnAA.isKnownWillReturn())
1997 return true;
1998 if (!WillReturnAA.isAssumedWillReturn())
1999 return false;
2000 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2001 return NoRecurseAA.isAssumedNoRecurse();
2002 };
2003
2004 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2005 return indicatePessimisticFixpoint();
2006
2007 return ChangeStatus::UNCHANGED;
2008 }
2009
2010 /// See AbstractAttribute::getAsStr()
2011 const std::string getAsStr() const override {
2012 return getAssumed() ? "willreturn" : "may-noreturn";
2013 }
2014};
2015
2016struct AAWillReturnFunction final : AAWillReturnImpl {
2017 AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2018
2019 /// See AbstractAttribute::trackStatistics()
2020 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor"
, "NumIRFunction_willreturn", ("Number of " "functions" " marked '"
"willreturn" "'")};; ++(NumIRFunction_willreturn); }
}
2021};
2022
2023/// WillReturn attribute deduction for a call sites.
2024struct AAWillReturnCallSite final : AAWillReturnImpl {
2025 AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2026
2027 /// See AbstractAttribute::initialize(...).
2028 void initialize(Attributor &A) override {
2029 AAWillReturnImpl::initialize(A);
2030 Function *F = getAssociatedFunction();
2031 if (!F)
2032 indicatePessimisticFixpoint();
2033 }
2034
2035 /// See AbstractAttribute::updateImpl(...).
2036 ChangeStatus updateImpl(Attributor &A) override {
2037 // TODO: Once we have call site specific value information we can provide
2038 // call site specific liveness information and then it makes
2039 // sense to specialize attributes for call sites arguments instead of
2040 // redirecting requests to the callee argument.
2041 Function *F = getAssociatedFunction();
2042 const IRPosition &FnPos = IRPosition::function(*F);
2043 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2044 return clampStateAndIndicateChange(
2045 getState(),
2046 static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2047 }
2048
2049 /// See AbstractAttribute::trackStatistics()
2050 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn"
, ("Number of " "call site" " marked '" "willreturn" "'")};; ++
(NumIRCS_willreturn); }
; }
2051};
2052
2053/// ------------------------ NoAlias Argument Attribute ------------------------
2054
2055struct AANoAliasImpl : AANoAlias {
2056 AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2057
2058 const std::string getAsStr() const override {
2059 return getAssumed() ? "noalias" : "may-alias";
2060 }
2061};
2062
2063/// NoAlias attribute for a floating value.
2064struct AANoAliasFloating final : AANoAliasImpl {
2065 AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2066
2067 /// See AbstractAttribute::initialize(...).
2068 void initialize(Attributor &A) override {
2069 AANoAliasImpl::initialize(A);
2070 Value &Val = getAssociatedValue();
2071 if (isa<AllocaInst>(Val))
2072 indicateOptimisticFixpoint();
2073 if (isa<ConstantPointerNull>(Val) &&
2074 Val.getType()->getPointerAddressSpace() == 0)
2075 indicateOptimisticFixpoint();
2076 }
2077
2078 /// See AbstractAttribute::updateImpl(...).
2079 ChangeStatus updateImpl(Attributor &A) override {
2080 // TODO: Implement this.
2081 return indicatePessimisticFixpoint();
2082 }
2083
2084 /// See AbstractAttribute::trackStatistics()
2085 void trackStatistics() const override {
2086 STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor"
, "NumIRFloating_noalias", ("Number of floating values known to be '"
"noalias" "'")};; ++(NumIRFloating_noalias); }
2087 }
2088};
2089
2090/// NoAlias attribute for an argument.
2091struct AANoAliasArgument final
2092 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2093 AANoAliasArgument(const IRPosition &IRP)
2094 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
2095
2096 /// See AbstractAttribute::trackStatistics()
2097 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor"
, "NumIRArguments_noalias", ("Number of " "arguments" " marked '"
"noalias" "'")};; ++(NumIRArguments_noalias); }
}
2098};
2099
2100struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2101 AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2102
2103 /// See AbstractAttribute::initialize(...).
2104 void initialize(Attributor &A) override {
2105 // See callsite argument attribute and callee argument attribute.
2106 ImmutableCallSite ICS(&getAnchorValue());
2107 if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2108 indicateOptimisticFixpoint();
2109 }
2110
2111 /// See AbstractAttribute::updateImpl(...).
2112 ChangeStatus updateImpl(Attributor &A) override {
2113 // We can deduce "noalias" if the following conditions hold.
2114 // (i) Associated value is assumed to be noalias in the definition.
2115 // (ii) Associated value is assumed to be no-capture in all the uses
2116 // possibly executed before this callsite.
2117 // (iii) There is no other pointer argument which could alias with the
2118 // value.
2119
2120 const Value &V = getAssociatedValue();
2121 const IRPosition IRP = IRPosition::value(V);
2122
2123 // (i) Check whether noalias holds in the definition.
2124
2125 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2126
2127 if (!NoAliasAA.isAssumedNoAlias())
2128 return indicatePessimisticFixpoint();
2129
2130 LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][AANoAliasCSArg] "
<< V << " is assumed NoAlias in the definition\n"
; } } while (false)
2131 << " is assumed NoAlias in the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][AANoAliasCSArg] "
<< V << " is assumed NoAlias in the definition\n"
; } } while (false)
;
2132
2133 // (ii) Check whether the value is captured in the scope using AANoCapture.
2134 // FIXME: This is conservative though, it is better to look at CFG and
2135 // check only uses possibly executed before this callsite.
2136
2137 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2138 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2139 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][AANoAliasCSArg] "
<< V << " cannot be noalias as it is potentially captured\n"
; } } while (false)
2140 dbgs() << "[Attributor][AANoAliasCSArg] " << Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][AANoAliasCSArg] "
<< V << " cannot be noalias as it is potentially captured\n"
; } } while (false)
2141 << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][AANoAliasCSArg] "
<< V << " cannot be noalias as it is potentially captured\n"
; } } while (false)
;
2142 return indicatePessimisticFixpoint();
2143 }
2144
2145 // (iii) Check there is no other pointer argument which could alias with the
2146 // value.
2147 ImmutableCallSite ICS(&getAnchorValue());
2148 for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2149 if (getArgNo() == (int)i)
2150 continue;
2151 const Value *ArgOp = ICS.getArgOperand(i);
2152 if (!ArgOp->getType()->isPointerTy())
2153 continue;
2154
2155 if (const Function *F = getAnchorScope()) {
2156 if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2157 bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2158 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2159 << "[Attributor][NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2160 "callsite arguments "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2161 << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2162 << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
2163 << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][NoAliasCSArg] Check alias between "
"callsite arguments " << AAR->isNoAlias(&getAssociatedValue
(), ArgOp) << " " << getAssociatedValue() <<
" " << *ArgOp << " => " << (IsAliasing ?
"" : "no-") << "alias \n"; } } while (false)
;
2164
2165 if (IsAliasing)
2166 continue;
2167 }
2168 }
2169 return indicatePessimisticFixpoint();
2170 }
2171
2172 return ChangeStatus::UNCHANGED;
2173 }
2174
2175 /// See AbstractAttribute::trackStatistics()
2176 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor"
, "NumIRCSArguments_noalias", ("Number of " "call site arguments"
" marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); }
}
2177};
2178
2179/// NoAlias attribute for function return value.
2180struct AANoAliasReturned final : AANoAliasImpl {
2181 AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2182
2183 /// See AbstractAttribute::updateImpl(...).
2184 virtual ChangeStatus updateImpl(Attributor &A) override {
2185
2186 auto CheckReturnValue = [&](Value &RV) -> bool {
2187 if (Constant *C = dyn_cast<Constant>(&RV))
2188 if (C->isNullValue() || isa<UndefValue>(C))
2189 return true;
2190
2191 /// For now, we can only deduce noalias if we have call sites.
2192 /// FIXME: add more support.
2193 ImmutableCallSite ICS(&RV);
2194 if (!ICS)
2195 return false;
2196
2197 const IRPosition &RVPos = IRPosition::value(RV);
2198 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2199 if (!NoAliasAA.isAssumedNoAlias())
2200 return false;
2201
2202 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2203 return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2204 };
2205
2206 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2207 return indicatePessimisticFixpoint();
2208
2209 return ChangeStatus::UNCHANGED;
2210 }
2211
2212 /// See AbstractAttribute::trackStatistics()
2213 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor"
, "NumIRFunctionReturn_noalias", ("Number of " "function returns"
" marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias
); }
}
2214};
2215
2216/// NoAlias attribute deduction for a call site return value.
2217struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2218 AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2219
2220 /// See AbstractAttribute::initialize(...).
2221 void initialize(Attributor &A) override {
2222 AANoAliasImpl::initialize(A);
2223 Function *F = getAssociatedFunction();
2224 if (!F)
2225 indicatePessimisticFixpoint();
2226 }
2227
2228 /// See AbstractAttribute::updateImpl(...).
2229 ChangeStatus updateImpl(Attributor &A) override {
2230 // TODO: Once we have call site specific value information we can provide
2231 // call site specific liveness information and then it makes
2232 // sense to specialize attributes for call sites arguments instead of
2233 // redirecting requests to the callee argument.
2234 Function *F = getAssociatedFunction();
2235 const IRPosition &FnPos = IRPosition::returned(*F);
2236 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2237 return clampStateAndIndicateChange(
2238 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2239 }
2240
2241 /// See AbstractAttribute::trackStatistics()
2242 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor"
, "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '"
"noalias" "'")};; ++(NumIRCSReturn_noalias); }
; }
2243};
2244
2245/// -------------------AAIsDead Function Attribute-----------------------
2246
2247struct AAIsDeadValueImpl : public AAIsDead {
2248 AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2249
2250 /// See AAIsDead::isAssumedDead().
2251 bool isAssumedDead() const override { return getAssumed(); }
2252
2253 /// See AAIsDead::isAssumedDead(BasicBlock *).
2254 bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2255
2256 /// See AAIsDead::isKnownDead(BasicBlock *).
2257 bool isKnownDead(const BasicBlock *BB) const override { return false; }
2258
2259 /// See AAIsDead::isAssumedDead(Instruction *I).
2260 bool isAssumedDead(const Instruction *I) const override {
2261 return I == getCtxI() && isAssumedDead();
2262 }
2263
2264 /// See AAIsDead::isKnownDead(Instruction *I).
2265 bool isKnownDead(const Instruction *I) const override {
2266 return I == getCtxI() && getKnown();
2267 }
2268
2269 /// See AbstractAttribute::getAsStr().
2270 const std::string getAsStr() const override {
2271 return isAssumedDead() ? "assumed-dead" : "assumed-live";
2272 }
2273};
2274
2275struct AAIsDeadFloating : public AAIsDeadValueImpl {
2276 AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2277
2278 /// See AbstractAttribute::initialize(...).
2279 void initialize(Attributor &A) override {
2280 if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2281 if (!wouldInstructionBeTriviallyDead(I))
2282 indicatePessimisticFixpoint();
2283 if (isa<UndefValue>(getAssociatedValue()))
2284 indicatePessimisticFixpoint();
2285 }
2286
2287 /// See AbstractAttribute::updateImpl(...).
2288 ChangeStatus updateImpl(Attributor &A) override {
2289 auto UsePred = [&](const Use &U, bool &Follow) {
2290 Instruction *UserI = cast<Instruction>(U.getUser());
2291 if (CallSite CS = CallSite(UserI)) {
2292 if (!CS.isArgOperand(&U))
2293 return false;
2294 const IRPosition &CSArgPos =
2295 IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2296 const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2297 return CSArgIsDead.isAssumedDead();
2298 }
2299 if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2300 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2301 const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2302 return RetIsDeadAA.isAssumedDead();
2303 }
2304 Follow = true;
2305 return wouldInstructionBeTriviallyDead(UserI);
2306 };
2307
2308 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2309 return indicatePessimisticFixpoint();
2310 return ChangeStatus::UNCHANGED;
2311 }
2312
2313 /// See AbstractAttribute::manifest(...).
2314 ChangeStatus manifest(Attributor &A) override {
2315 Value &V = getAssociatedValue();
2316 if (auto *I = dyn_cast<Instruction>(&V))
2317 if (wouldInstructionBeTriviallyDead(I)) {
2318 A.deleteAfterManifest(*I);
2319 return ChangeStatus::CHANGED;
2320 }
2321
2322 if (V.use_empty())
2323 return ChangeStatus::UNCHANGED;
2324
2325 UndefValue &UV = *UndefValue::get(V.getType());
2326 bool AnyChange = false;
2327 for (Use &U : V.uses())
2328 AnyChange |= A.changeUseAfterManifest(U, UV);
2329 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2330 }
2331
2332 /// See AbstractAttribute::trackStatistics()
2333 void trackStatistics() const override {
2334 STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor"
, "NumIRFloating_IsDead", ("Number of floating values known to be '"
"IsDead" "'")};; ++(NumIRFloating_IsDead); }
2335 }
2336};
2337
2338struct AAIsDeadArgument : public AAIsDeadFloating {
2339 AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2340
2341 /// See AbstractAttribute::initialize(...).
2342 void initialize(Attributor &A) override {
2343 if (!getAssociatedFunction()->hasExactDefinition())
2344 indicatePessimisticFixpoint();
2345 }
2346
2347 /// See AbstractAttribute::trackStatistics()
2348 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor"
, "NumIRArguments_IsDead", ("Number of " "arguments" " marked '"
"IsDead" "'")};; ++(NumIRArguments_IsDead); }
}
2349};
2350
2351struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2352 AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2353
2354 /// See AbstractAttribute::initialize(...).
2355 void initialize(Attributor &A) override {
2356 if (isa<UndefValue>(getAssociatedValue()))
2357 indicatePessimisticFixpoint();
2358 }
2359
2360 /// See AbstractAttribute::updateImpl(...).
2361 ChangeStatus updateImpl(Attributor &A) override {
2362 // TODO: Once we have call site specific value information we can provide
2363 // call site specific liveness information and then it makes
2364 // sense to specialize attributes for call sites arguments instead of
2365 // redirecting requests to the callee argument.
2366 Argument *Arg = getAssociatedArgument();
2367 if (!Arg)
2368 return indicatePessimisticFixpoint();
2369 const IRPosition &ArgPos = IRPosition::argument(*Arg);
2370 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2371 return clampStateAndIndicateChange(
2372 getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2373 }
2374
2375 /// See AbstractAttribute::manifest(...).
2376 ChangeStatus manifest(Attributor &A) override {
2377 CallBase &CB = cast<CallBase>(getAnchorValue());
2378 Use &U = CB.getArgOperandUse(getArgNo());
2379 assert(!isa<UndefValue>(U.get()) &&((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!"
) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2380, __PRETTY_FUNCTION__))
2380 "Expected undef values to be filtered out!")((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!"
) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2380, __PRETTY_FUNCTION__))
;
2381 UndefValue &UV = *UndefValue::get(U->getType());
2382 if (A.changeUseAfterManifest(U, UV))
2383 return ChangeStatus::CHANGED;
2384 return ChangeStatus::UNCHANGED;
2385 }
2386
2387 /// See AbstractAttribute::trackStatistics()
2388 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor"
, "NumIRCSArguments_IsDead", ("Number of " "call site arguments"
" marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); }
}
2389};
2390
2391struct AAIsDeadReturned : public AAIsDeadValueImpl {
2392 AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2393
2394 /// See AbstractAttribute::updateImpl(...).
2395 ChangeStatus updateImpl(Attributor &A) override {
2396
2397 auto PredForCallSite = [&](AbstractCallSite ACS) {
2398 if (ACS.isCallbackCall())
2399 return false;
2400 const IRPosition &CSRetPos =
2401 IRPosition::callsite_returned(ACS.getCallSite());
2402 const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2403 return RetIsDeadAA.isAssumedDead();
2404 };
2405
2406 if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2407 return indicatePessimisticFixpoint();
2408
2409 return ChangeStatus::UNCHANGED;
2410 }
2411
2412 /// See AbstractAttribute::manifest(...).
2413 ChangeStatus manifest(Attributor &A) override {
2414 // TODO: Rewrite the signature to return void?
2415 bool AnyChange = false;
2416 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2417 auto RetInstPred = [&](Instruction &I) {
2418 ReturnInst &RI = cast<ReturnInst>(I);
2419 if (!isa<UndefValue>(RI.getReturnValue()))
2420 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2421 return true;
2422 };
2423 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2424 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2425 }
2426
2427 /// See AbstractAttribute::trackStatistics()
2428 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor"
, "NumIRFunctionReturn_IsDead", ("Number of " "function returns"
" marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead);
}
}
2429};
2430
2431struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2432 AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2433
2434 /// See AbstractAttribute::initialize(...).
2435 void initialize(Attributor &A) override {}
2436
2437 /// See AbstractAttribute::trackStatistics()
2438 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor"
, "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '"
"IsDead" "'")};; ++(NumIRCSReturn_IsDead); }
}
2439};
2440
2441struct AAIsDeadFunction : public AAIsDead {
2442 AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2443
2444 /// See AbstractAttribute::initialize(...).
2445 void initialize(Attributor &A) override {
2446 const Function *F = getAssociatedFunction();
2447 if (F && !F->isDeclaration()) {
2448 ToBeExploredFrom.insert(&F->getEntryBlock().front());
2449 assumeLive(A, F->getEntryBlock());
2450 }
2451 }
2452
2453 /// See AbstractAttribute::getAsStr().
2454 const std::string getAsStr() const override {
2455 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2456 std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2457 std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2458 std::to_string(KnownDeadEnds.size()) + "]";
2459 }
2460
2461 /// See AbstractAttribute::manifest(...).
2462 ChangeStatus manifest(Attributor &A) override {
2463 assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2464, __PRETTY_FUNCTION__))
2464 "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2464, __PRETTY_FUNCTION__))
;
2465
2466 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2467 Function &F = *getAssociatedFunction();
2468
2469 if (AssumedLiveBlocks.empty()) {
2470 A.deleteAfterManifest(F);
2471 return ChangeStatus::CHANGED;
2472 }
2473
2474 // Flag to determine if we can change an invoke to a call assuming the
2475 // callee is nounwind. This is not possible if the personality of the
2476 // function allows to catch asynchronous exceptions.
2477 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2478
2479 KnownDeadEnds.set_union(ToBeExploredFrom);
2480 for (const Instruction *DeadEndI : KnownDeadEnds) {
2481 auto *CB = dyn_cast<CallBase>(DeadEndI);
2482 if (!CB)
2483 continue;
2484 const auto &NoReturnAA =
2485 A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2486 bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2487 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2488 continue;
2489 Instruction *I = const_cast<Instruction *>(DeadEndI);
2490 BasicBlock *BB = I->getParent();
2491 Instruction *SplitPos = I->getNextNode();
2492 // TODO: mark stuff before unreachable instructions as dead.
2493
2494 if (auto *II = dyn_cast<InvokeInst>(I)) {
2495 // If we keep the invoke the split position is at the beginning of the
2496 // normal desitination block (it invokes a noreturn function after all).
2497 BasicBlock *NormalDestBB = II->getNormalDest();
2498 SplitPos = &NormalDestBB->front();
2499
2500 /// Invoke is replaced with a call and unreachable is placed after it if
2501 /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2502 /// and only place an unreachable in the normal successor.
2503 if (Invoke2CallAllowed) {
2504 if (II->getCalledFunction()) {
2505 const IRPosition &IPos = IRPosition::callsite_function(*II);
2506 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2507 if (AANoUnw.isAssumedNoUnwind()) {
2508 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Replace invoke with call inst\n"
; } } while (false)
2509 << "[AAIsDead] Replace invoke with call inst\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Replace invoke with call inst\n"
; } } while (false)
;
2510 CallInst *CI = createCallMatchingInvoke(II);
2511 CI->insertBefore(II);
2512 CI->takeName(II);
2513 II->replaceAllUsesWith(CI);
2514
2515 // If this is a nounwind + mayreturn invoke we only remove the unwind edge.
2516 // This is done by moving the invoke into a new and dead block and connecting
2517 // the normal destination of the invoke with a branch that follows the call
2518 // replacement we created above.
2519 if (MayReturn) {
2520 BasicBlock *NewDeadBB = SplitBlock(BB, II, nullptr, nullptr, nullptr, ".i2c");
2521 assert(isa<BranchInst>(BB->getTerminator()) &&((isa<BranchInst>(BB->getTerminator()) && BB
->getTerminator()->getNumSuccessors() == 1 && BB
->getTerminator()->getSuccessor(0) == NewDeadBB) ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(BB->getTerminator()) && BB->getTerminator()->getNumSuccessors() == 1 && BB->getTerminator()->getSuccessor(0) == NewDeadBB"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2523, __PRETTY_FUNCTION__))
2522 BB->getTerminator()->getNumSuccessors() == 1 &&((isa<BranchInst>(BB->getTerminator()) && BB
->getTerminator()->getNumSuccessors() == 1 && BB
->getTerminator()->getSuccessor(0) == NewDeadBB) ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(BB->getTerminator()) && BB->getTerminator()->getNumSuccessors() == 1 && BB->getTerminator()->getSuccessor(0) == NewDeadBB"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2523, __PRETTY_FUNCTION__))
2523 BB->getTerminator()->getSuccessor(0) == NewDeadBB)((isa<BranchInst>(BB->getTerminator()) && BB
->getTerminator()->getNumSuccessors() == 1 && BB
->getTerminator()->getSuccessor(0) == NewDeadBB) ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(BB->getTerminator()) && BB->getTerminator()->getNumSuccessors() == 1 && BB->getTerminator()->getSuccessor(0) == NewDeadBB"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2523, __PRETTY_FUNCTION__))
;
2524 new UnreachableInst(I->getContext(), NewDeadBB);
2525 BB->getTerminator()->setOperand(0, NormalDestBB);
2526 A.deleteAfterManifest(*II);
2527 continue;
2528 }
2529
2530 // We do not need an invoke (II) but instead want a call followed
2531 // by an unreachable. However, we do not remove II as other
2532 // abstract attributes might have it cached as part of their
2533 // results. Given that we modify the CFG anyway, we simply keep II
2534 // around but in a new dead block. To avoid II being live through
2535 // a different edge we have to ensure the block we place it in is
2536 // only reached from the current block of II and then not reached
2537 // at all when we insert the unreachable.
2538 SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2539 SplitPos = CI->getNextNode();
2540 }
2541 }
2542 }
2543
2544 if (SplitPos == &NormalDestBB->front()) {
2545 // If this is an invoke of a noreturn function the edge to the normal
2546 // destination block is dead but not necessarily the block itself.
2547 // TODO: We need to move to an edge based system during deduction and
2548 // also manifest.
2549 assert(!NormalDestBB->isLandingPad() &&((!NormalDestBB->isLandingPad() && "Expected the normal destination not to be a landingpad!"
) ? static_cast<void> (0) : __assert_fail ("!NormalDestBB->isLandingPad() && \"Expected the normal destination not to be a landingpad!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2550, __PRETTY_FUNCTION__))
2550 "Expected the normal destination not to be a landingpad!")((!NormalDestBB->isLandingPad() && "Expected the normal destination not to be a landingpad!"
) ? static_cast<void> (0) : __assert_fail ("!NormalDestBB->isLandingPad() && \"Expected the normal destination not to be a landingpad!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2550, __PRETTY_FUNCTION__))
;
2551 if (NormalDestBB->getUniquePredecessor() == BB) {
2552 assumeLive(A, *NormalDestBB);
2553 } else {
2554 BasicBlock *SplitBB =
2555 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2556 // The split block is live even if it contains only an unreachable
2557 // instruction at the end.
2558 assumeLive(A, *SplitBB);
2559 SplitPos = SplitBB->getTerminator();
2560 HasChanged = ChangeStatus::CHANGED;
2561 }
2562 }
2563 }
2564
2565 if (isa_and_nonnull<UnreachableInst>(SplitPos))
2566 continue;
2567
2568 BB = SplitPos->getParent();
2569 SplitBlock(BB, SplitPos);
2570 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2571 HasChanged = ChangeStatus::CHANGED;
2572 }
2573
2574 for (BasicBlock &BB : F)
2575 if (!AssumedLiveBlocks.count(&BB))
2576 A.deleteAfterManifest(BB);
2577
2578 return HasChanged;
2579 }
2580
2581 /// See AbstractAttribute::updateImpl(...).
2582 ChangeStatus updateImpl(Attributor &A) override;
2583
2584 /// See AbstractAttribute::trackStatistics()
2585 void trackStatistics() const override {}
2586
2587 /// Returns true if the function is assumed dead.
2588 bool isAssumedDead() const override { return false; }
2589
2590 /// See AAIsDead::isAssumedDead(BasicBlock *).
2591 bool isAssumedDead(const BasicBlock *BB) const override {
2592 assert(BB->getParent() == getAssociatedFunction() &&((BB->getParent() == getAssociatedFunction() && "BB must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAssociatedFunction() && \"BB must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2593, __PRETTY_FUNCTION__))
2593 "BB must be in the same anchor scope function.")((BB->getParent() == getAssociatedFunction() && "BB must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAssociatedFunction() && \"BB must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2593, __PRETTY_FUNCTION__))
;
2594
2595 if (!getAssumed())
2596 return false;
2597 return !AssumedLiveBlocks.count(BB);
2598 }
2599
2600 /// See AAIsDead::isKnownDead(BasicBlock *).
2601 bool isKnownDead(const BasicBlock *BB) const override {
2602 return getKnown() && isAssumedDead(BB);
2603 }
2604
2605 /// See AAIsDead::isAssumed(Instruction *I).
2606 bool isAssumedDead(const Instruction *I) const override {
2607 assert(I->getParent()->getParent() == getAssociatedFunction() &&((I->getParent()->getParent() == getAssociatedFunction(
) && "Instruction must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == getAssociatedFunction() && \"Instruction must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2608, __PRETTY_FUNCTION__))
2608 "Instruction must be in the same anchor scope function.")((I->getParent()->getParent() == getAssociatedFunction(
) && "Instruction must be in the same anchor scope function."
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == getAssociatedFunction() && \"Instruction must be in the same anchor scope function.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2608, __PRETTY_FUNCTION__))
;
2609
2610 if (!getAssumed())
2611 return false;
2612
2613 // If it is not in AssumedLiveBlocks then it for sure dead.
2614 // Otherwise, it can still be after noreturn call in a live block.
2615 if (!AssumedLiveBlocks.count(I->getParent()))
2616 return true;
2617
2618 // If it is not after a liveness barrier it is live.
2619 const Instruction *PrevI = I->getPrevNode();
2620 while (PrevI) {
2621 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2622 return true;
2623 PrevI = PrevI->getPrevNode();
2624 }
2625 return false;
2626 }
2627
2628 /// See AAIsDead::isKnownDead(Instruction *I).
2629 bool isKnownDead(const Instruction *I) const override {
2630 return getKnown() && isAssumedDead(I);
2631 }
2632
2633 /// Determine if \p F might catch asynchronous exceptions.
2634 static bool mayCatchAsynchronousExceptions(const Function &F) {
2635 return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2636 }
2637
2638 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2639 /// that internal function called from \p BB should now be looked at.
2640 bool assumeLive(Attributor &A, const BasicBlock &BB) {
2641 if (!AssumedLiveBlocks.insert(&BB).second)
2642 return false;
2643
2644 // We assume that all of BB is (probably) live now and if there are calls to
2645 // internal functions we will assume that those are now live as well. This
2646 // is a performance optimization for blocks with calls to a lot of internal
2647 // functions. It can however cause dead functions to be treated as live.
2648 for (const Instruction &I : BB)
2649 if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2650 if (const Function *F = ICS.getCalledFunction())
2651 if (F->hasLocalLinkage())
2652 A.markLiveInternalFunction(*F);
2653 return true;
2654 }
2655
2656 /// Collection of instructions that need to be explored again, e.g., we
2657 /// did assume they do not transfer control to (one of their) successors.
2658 SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2659
2660 /// Collection of instructions that are known to not transfer control.
2661 SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2662
2663 /// Collection of all assumed live BasicBlocks.
2664 DenseSet<const BasicBlock *> AssumedLiveBlocks;
2665};
2666
2667static bool
2668identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2669 AbstractAttribute &AA,
2670 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2671 const IRPosition &IPos = IRPosition::callsite_function(CB);
2672
2673 const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2674 if (NoReturnAA.isAssumedNoReturn())
2675 return !NoReturnAA.isKnownNoReturn();
2676 if (CB.isTerminator())
2677 AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2678 else
2679 AliveSuccessors.push_back(CB.getNextNode());
2680 return false;
2681}
2682
2683static bool
2684identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2685 AbstractAttribute &AA,
2686 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2687 bool UsedAssumedInformation =
2688 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2689
2690 // First, determine if we can change an invoke to a call assuming the
2691 // callee is nounwind. This is not possible if the personality of the
2692 // function allows to catch asynchronous exceptions.
2693 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2694 AliveSuccessors.push_back(&II.getUnwindDest()->front());
2695 } else {
2696 const IRPosition &IPos = IRPosition::callsite_function(II);
2697 const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2698 if (AANoUnw.isAssumedNoUnwind()) {
2699 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2700 } else {
2701 AliveSuccessors.push_back(&II.getUnwindDest()->front());
2702 }
2703 }
2704 return UsedAssumedInformation;
2705}
2706
2707static Optional<ConstantInt *>
2708getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
2709 bool &UsedAssumedInformation) {
2710 const auto &ValueSimplifyAA =
2711 A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
2712 Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
2713 UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
2714 if (!SimplifiedV.hasValue())
2715 return llvm::None;
2716 if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
2717 return llvm::None;
2718 return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
2719}
2720
2721static bool
2722identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
2723 AbstractAttribute &AA,
2724 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2725 bool UsedAssumedInformation = false;
2726 if (BI.getNumSuccessors() == 1) {
2727 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2728 } else {
2729 Optional<ConstantInt *> CI =
2730 getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
2731 if (!CI.hasValue()) {
2732 // No value yet, assume both edges are dead.
2733 } else if (CI.getValue()) {
2734 const BasicBlock *SuccBB =
2735 BI.getSuccessor(1 - CI.getValue()->getZExtValue());
2736 AliveSuccessors.push_back(&SuccBB->front());
2737 } else {
2738 AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2739 AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
2740 UsedAssumedInformation = false;
2741 }
2742 }
2743 return UsedAssumedInformation;
2744}
2745
2746static bool
2747identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
2748 AbstractAttribute &AA,
2749 SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2750 bool UsedAssumedInformation = false;
2751 Optional<ConstantInt *> CI =
2752 getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
2753 if (!CI.hasValue()) {
2754 // No value yet, assume all edges are dead.
2755 } else if (CI.getValue()) {
2756 for (auto &CaseIt : SI.cases()) {
2757 if (CaseIt.getCaseValue() == CI.getValue()) {
2758 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
2759 return UsedAssumedInformation;
2760 }
2761 }
2762 AliveSuccessors.push_back(&SI.getDefaultDest()->front());
2763 return UsedAssumedInformation;
2764 } else {
2765 for (const BasicBlock *SuccBB : successors(SI.getParent()))
2766 AliveSuccessors.push_back(&SuccBB->front());
2767 }
2768 return UsedAssumedInformation;
2769}
2770
2771ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
2772 ChangeStatus Change = ChangeStatus::UNCHANGED;
2773
2774 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAssociatedFunction
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
2775 << getAssociatedFunction()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAssociatedFunction
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
2776 << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAssociatedFunction
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
2777 << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Live [" <<
AssumedLiveBlocks.size() << "/" << getAssociatedFunction
()->size() << "] BBs and " << ToBeExploredFrom
.size() << " exploration points and " << KnownDeadEnds
.size() << " known dead ends\n"; } } while (false)
;
2778
2779 // Copy and clear the list of instructions we need to explore from. It is
2780 // refilled with instructions the next update has to look at.
2781 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
2782 ToBeExploredFrom.end());
2783 decltype(ToBeExploredFrom) NewToBeExploredFrom;
2784
2785 SmallVector<const Instruction *, 8> AliveSuccessors;
2786 while (!Worklist.empty()) {
2787 const Instruction *I = Worklist.pop_back_val();
2788 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] Exploration inst: "
<< *I << "\n"; } } while (false)
;
2789
2790 AliveSuccessors.clear();
2791
2792 bool UsedAssumedInformation = false;
2793 switch (I->getOpcode()) {
2794 // TODO: look for (assumed) UB to backwards propagate "deadness".
2795 default:
2796 if (I->isTerminator()) {
2797 for (const BasicBlock *SuccBB : successors(I->getParent()))
2798 AliveSuccessors.push_back(&SuccBB->front());
2799 } else {
2800 AliveSuccessors.push_back(I->getNextNode());
2801 }
2802 break;
2803 case Instruction::Call:
2804 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
2805 *this, AliveSuccessors);
2806 break;
2807 case Instruction::Invoke:
2808 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
2809 *this, AliveSuccessors);
2810 break;
2811 case Instruction::Br:
2812 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
2813 *this, AliveSuccessors);
2814 break;
2815 case Instruction::Switch:
2816 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
2817 *this, AliveSuccessors);
2818 break;
2819 }
2820
2821 if (UsedAssumedInformation) {
2822 NewToBeExploredFrom.insert(I);
2823 } else {
2824 Change = ChangeStatus::CHANGED;
2825 if (AliveSuccessors.empty() ||
2826 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
2827 KnownDeadEnds.insert(I);
2828 }
2829
2830 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
2831 << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
2832 << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: "
<< AliveSuccessors.size() << " UsedAssumedInformation: "
<< UsedAssumedInformation << "\n"; } } while (false
)
;
2833
2834 for (const Instruction *AliveSuccessor : AliveSuccessors) {
2835 if (!I->isTerminator()) {
2836 assert(AliveSuccessors.size() == 1 &&((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2837, __PRETTY_FUNCTION__))
2837 "Non-terminator expected to have a single successor!")((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!"
) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2837, __PRETTY_FUNCTION__))
;
2838 Worklist.push_back(AliveSuccessor);
2839 } else {
2840 if (assumeLive(A, *AliveSuccessor->getParent()))
2841 Worklist.push_back(AliveSuccessor);
2842 }
2843 }
2844 }
2845
2846 ToBeExploredFrom = std::move(NewToBeExploredFrom);
2847
2848 // If we know everything is live there is no need to query for liveness.
2849 // Instead, indicating a pessimistic fixpoint will cause the state to be
2850 // "invalid" and all queries to be answered conservatively without lookups.
2851 // To be in this state we have to (1) finished the exploration and (3) not
2852 // discovered any non-trivial dead end and (2) not ruled unreachable code
2853 // dead.
2854 if (ToBeExploredFrom.empty() &&
2855 getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
2856 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
2857 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
2858 }))
2859 return indicatePessimisticFixpoint();
2860 return Change;
2861}
2862
2863/// Liveness information for a call sites.
2864struct AAIsDeadCallSite final : AAIsDeadFunction {
2865 AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
2866
2867 /// See AbstractAttribute::initialize(...).
2868 void initialize(Attributor &A) override {
2869 // TODO: Once we have call site specific value information we can provide
2870 // call site specific liveness information and then it makes
2871 // sense to specialize attributes for call sites instead of
2872 // redirecting requests to the callee.
2873 llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2874)
2874 "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not "
"supported for call sites yet!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 2874)
;
2875 }
2876
2877 /// See AbstractAttribute::updateImpl(...).
2878 ChangeStatus updateImpl(Attributor &A) override {
2879 return indicatePessimisticFixpoint();
2880 }
2881
2882 /// See AbstractAttribute::trackStatistics()
2883 void trackStatistics() const override {}
2884};
2885
2886/// -------------------- Dereferenceable Argument Attribute --------------------
2887
2888template <>
2889ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2890 const DerefState &R) {
2891 ChangeStatus CS0 =
2892 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
2893 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
2894 return CS0 | CS1;
2895}
2896
2897struct AADereferenceableImpl : AADereferenceable {
2898 AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2899 using StateType = DerefState;
2900
2901 void initialize(Attributor &A) override {
2902 SmallVector<Attribute, 4> Attrs;
2903 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2904 Attrs);
2905 for (const Attribute &Attr : Attrs)
2906 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2907
2908 NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2909
2910 const IRPosition &IRP = this->getIRPosition();
2911 bool IsFnInterface = IRP.isFnInterfaceKind();
2912 const Function *FnScope = IRP.getAnchorScope();
2913 if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2914 indicatePessimisticFixpoint();
2915 }
2916
2917 /// See AbstractAttribute::getState()
2918 /// {
2919 StateType &getState() override { return *this; }
2920 const StateType &getState() const override { return *this; }
2921 /// }
2922
2923 /// See AAFromMustBeExecutedContext
2924 bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2925 bool IsNonNull = false;
2926 bool TrackUse = false;
2927 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2928 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2929 takeKnownDerefBytesMaximum(DerefBytes);
2930 return TrackUse;
2931 }
2932
2933 void getDeducedAttributes(LLVMContext &Ctx,
2934 SmallVectorImpl<Attribute> &Attrs) const override {
2935 // TODO: Add *_globally support
2936 if (isAssumedNonNull())
2937 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
2938 Ctx, getAssumedDereferenceableBytes()));
2939 else
2940 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2941 Ctx, getAssumedDereferenceableBytes()));
2942 }
2943
2944 /// See AbstractAttribute::getAsStr().
2945 const std::string getAsStr() const override {
2946 if (!getAssumedDereferenceableBytes())
2947 return "unknown-dereferenceable";
2948 return std::string("dereferenceable") +
2949 (isAssumedNonNull() ? "" : "_or_null") +
2950 (isAssumedGlobal() ? "_globally" : "") + "<" +
2951 std::to_string(getKnownDereferenceableBytes()) + "-" +
2952 std::to_string(getAssumedDereferenceableBytes()) + ">";
2953 }
2954};
2955
2956/// Dereferenceable attribute for a floating value.
2957struct AADereferenceableFloating
2958 : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2959 using Base =
2960 AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2961 AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2962
2963 /// See AbstractAttribute::updateImpl(...).
2964 ChangeStatus updateImpl(Attributor &A) override {
2965 ChangeStatus Change = Base::updateImpl(A);
2966
2967 const DataLayout &DL = A.getDataLayout();
2968
2969 auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2970 unsigned IdxWidth =
2971 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
2972 APInt Offset(IdxWidth, 0);
2973 const Value *Base =
2974 V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
2975
2976 const auto &AA =
2977 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2978 int64_t DerefBytes = 0;
2979 if (!Stripped && this == &AA) {
2980 // Use IR information if we did not strip anything.
2981 // TODO: track globally.
2982 bool CanBeNull;
2983 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2984 T.GlobalState.indicatePessimisticFixpoint();
2985 } else {
2986 const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2987 DerefBytes = DS.DerefBytesState.getAssumed();
2988 T.GlobalState &= DS.GlobalState;
2989 }
2990
2991 // For now we do not try to "increase" dereferenceability due to negative
2992 // indices as we first have to come up with code to deal with loops and
2993 // for overflows of the dereferenceable bytes.
2994 int64_t OffsetSExt = Offset.getSExtValue();
2995 if (OffsetSExt < 0)
2996 OffsetSExt = 0;
2997
2998 T.takeAssumedDerefBytesMinimum(
2999 std::max(int64_t(0), DerefBytes - OffsetSExt));
3000
3001 if (this == &AA) {
3002 if (!Stripped) {
3003 // If nothing was stripped IR information is all we got.
3004 T.takeKnownDerefBytesMaximum(
3005 std::max(int64_t(0), DerefBytes - OffsetSExt));
3006 T.indicatePessimisticFixpoint();
3007 } else if (OffsetSExt > 0) {
3008 // If something was stripped but there is circular reasoning we look
3009 // for the offset. If it is positive we basically decrease the
3010 // dereferenceable bytes in a circluar loop now, which will simply
3011 // drive them down to the known value in a very slow way which we
3012 // can accelerate.
3013 T.indicatePessimisticFixpoint();
3014 }
3015 }
3016
3017 return T.isValidState();
3018 };
3019
3020 DerefState T;
3021 if (!genericValueTraversal<AADereferenceable, DerefState>(
3022 A, getIRPosition(), *this, T, VisitValueCB))
3023 return indicatePessimisticFixpoint();
3024
3025 return Change | clampStateAndIndicateChange(getState(), T);
3026 }
3027
3028 /// See AbstractAttribute::trackStatistics()
3029 void trackStatistics() const override {
3030 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor"
, "NumIRFloating_dereferenceable", ("Number of floating values known to be '"
"dereferenceable" "'")};; ++(NumIRFloating_dereferenceable);
}
3031 }
3032};
3033
3034/// Dereferenceable attribute for a return value.
3035struct AADereferenceableReturned final
3036 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3037 DerefState> {
3038 AADereferenceableReturned(const IRPosition &IRP)
3039 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3040 DerefState>(IRP) {}
3041
3042 /// See AbstractAttribute::trackStatistics()
3043 void trackStatistics() const override {
3044 STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable =
{"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of "
"function returns" " marked '" "dereferenceable" "'")};; ++(
NumIRFunctionReturn_dereferenceable); }
3045 }
3046};
3047
3048/// Dereferenceable attribute for an argument
3049struct AADereferenceableArgument final
3050 : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3051 AADereferenceable, AADereferenceableImpl, DerefState> {
3052 using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3053 AADereferenceable, AADereferenceableImpl, DerefState>;
3054 AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3055
3056 /// See AbstractAttribute::trackStatistics()
3057 void trackStatistics() const override {
3058 STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor"
, "NumIRArguments_dereferenceable", ("Number of " "arguments"
" marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable
); }
3059 }
3060};
3061
3062/// Dereferenceable attribute for a call site argument.
3063struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3064 AADereferenceableCallSiteArgument(const IRPosition &IRP)
3065 : AADereferenceableFloating(IRP) {}
3066
3067 /// See AbstractAttribute::trackStatistics()
3068 void trackStatistics() const override {
3069 STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = {
"attributor", "NumIRCSArguments_dereferenceable", ("Number of "
"call site arguments" " marked '" "dereferenceable" "'")};; ++
(NumIRCSArguments_dereferenceable); }
3070 }
3071};
3072
3073/// Dereferenceable attribute deduction for a call site return value.
3074struct AADereferenceableCallSiteReturned final
3075 : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3076 AADereferenceable, AADereferenceableImpl> {
3077 using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3078 AADereferenceable, AADereferenceableImpl>;
3079 AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3080
3081 /// See AbstractAttribute::trackStatistics()
3082 void trackStatistics() const override {
3083 STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor"
, "NumIRCS_dereferenceable", ("Number of " "call site" " marked '"
"dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }
;
3084 }
3085};
3086
3087// ------------------------ Align Argument Attribute ------------------------
3088
3089struct AAAlignImpl : AAAlign {
3090 AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3091
3092 /// See AbstractAttribute::initialize(...).
3093 void initialize(Attributor &A) override {
3094 SmallVector<Attribute, 4> Attrs;
3095 getAttrs({Attribute::Alignment}, Attrs);
3096 for (const Attribute &Attr : Attrs)
3097 takeKnownMaximum(Attr.getValueAsInt());
3098
3099 if (getIRPosition().isFnInterfaceKind() &&
3100 (!getAssociatedFunction() ||
3101 !getAssociatedFunction()->hasExactDefinition()))
3102 indicatePessimisticFixpoint();
3103 }
3104
3105 /// See AbstractAttribute::manifest(...).
3106 ChangeStatus manifest(Attributor &A) override {
3107 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3108
3109 // Check for users that allow alignment annotations.
3110 Value &AnchorVal = getIRPosition().getAnchorValue();
3111 for (const Use &U : AnchorVal.uses()) {
3112 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3113 if (SI->getPointerOperand() == &AnchorVal)
3114 if (SI->getAlignment() < getAssumedAlign()) {
3115 STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignemnt added to a store"};; ++(NumIRStore_AAAlign
); }
3116 "Number of times alignemnt added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign"
, "Number of times alignemnt added to a store"};; ++(NumIRStore_AAAlign
); }
;
3117 SI->setAlignment(Align(getAssumedAlign()));
3118 Changed = ChangeStatus::CHANGED;
3119 }
3120 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3121 if (LI->getPointerOperand() == &AnchorVal)
3122 if (LI->getAlignment() < getAssumedAlign()) {
3123 LI->setAlignment(Align(getAssumedAlign()));
3124 STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignemnt added to a load"};; ++(NumIRLoad_AAAlign
); }
3125 "Number of times alignemnt added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign"
, "Number of times alignemnt added to a load"};; ++(NumIRLoad_AAAlign
); }
;
3126 Changed = ChangeStatus::CHANGED;
3127 }
3128 }
3129 }
3130
3131 return AAAlign::manifest(A) | Changed;
3132 }
3133
3134 // TODO: Provide a helper to determine the implied ABI alignment and check in
3135 // the existing manifest method and a new one for AAAlignImpl that value
3136 // to avoid making the alignment explicit if it did not improve.
3137
3138 /// See AbstractAttribute::getDeducedAttributes
3139 virtual void
3140 getDeducedAttributes(LLVMContext &Ctx,
3141 SmallVectorImpl<Attribute> &Attrs) const override {
3142 if (getAssumedAlign() > 1)
3143 Attrs.emplace_back(
3144 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3145 }
3146
3147 /// See AbstractAttribute::getAsStr().
3148 const std::string getAsStr() const override {
3149 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3150 "-" + std::to_string(getAssumedAlign()) + ">")
3151 : "unknown-align";
3152 }
3153};
3154
3155/// Align attribute for a floating value.
3156struct AAAlignFloating : AAAlignImpl {
3157 AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
3158
3159 /// See AbstractAttribute::updateImpl(...).
3160 ChangeStatus updateImpl(Attributor &A) override {
3161 const DataLayout &DL = A.getDataLayout();
3162
3163 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3164 bool Stripped) -> bool {
3165 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3166 if (!Stripped && this == &AA) {
3167 // Use only IR information if we did not strip anything.
3168 const MaybeAlign PA = V.getPointerAlignment(DL);
3169 T.takeKnownMaximum(PA ? PA->value() : 0);
3170 T.indicatePessimisticFixpoint();
3171 } else {
3172 // Use abstract attribute information.
3173 const AAAlign::StateType &DS =
3174 static_cast<const AAAlign::StateType &>(AA.getState());
3175 T ^= DS;
3176 }
3177 return T.isValidState();
3178 };
3179
3180 StateType T;
3181 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3182 VisitValueCB))
3183 return indicatePessimisticFixpoint();
3184
3185 // TODO: If we know we visited all incoming values, thus no are assumed
3186 // dead, we can take the known information from the state T.
3187 return clampStateAndIndicateChange(getState(), T);
3188 }
3189
3190 /// See AbstractAttribute::trackStatistics()
3191 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor",
"NumIRFloating_align", ("Number of floating values known to be '"
"align" "'")};; ++(NumIRFloating_align); }
}
3192};
3193
3194/// Align attribute for function return value.
3195struct AAAlignReturned final
3196 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3197 AAAlignReturned(const IRPosition &IRP)
3198 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3199
3200 /// See AbstractAttribute::trackStatistics()
3201 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor"
, "NumIRFunctionReturn_aligned", ("Number of " "function returns"
" marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned
); }
}
3202};
3203
3204/// Align attribute for function argument.
3205struct AAAlignArgument final
3206 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3207 AAAlignArgument(const IRPosition &IRP)
3208 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
3209
3210 /// See AbstractAttribute::trackStatistics()
3211 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor"
, "NumIRArguments_aligned", ("Number of " "arguments" " marked '"
"aligned" "'")};; ++(NumIRArguments_aligned); }
}
3212};
3213
3214struct AAAlignCallSiteArgument final : AAAlignFloating {
3215 AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3216
3217 /// See AbstractAttribute::manifest(...).
3218 ChangeStatus manifest(Attributor &A) override {
3219 return AAAlignImpl::manifest(A);
3220 }
3221
3222 /// See AbstractAttribute::trackStatistics()
3223 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor"
, "NumIRCSArguments_aligned", ("Number of " "call site arguments"
" marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); }
}
3224};
3225
3226/// Align attribute deduction for a call site return value.
3227struct AAAlignCallSiteReturned final : AAAlignImpl {
3228 AAAlignCallSiteReturned(const IRPosition &IRP) : AAAlignImpl(IRP) {}
3229
3230 /// See AbstractAttribute::initialize(...).
3231 void initialize(Attributor &A) override {
3232 AAAlignImpl::initialize(A);
3233 Function *F = getAssociatedFunction();
3234 if (!F)
3235 indicatePessimisticFixpoint();
3236 }
3237
3238 /// See AbstractAttribute::updateImpl(...).
3239 ChangeStatus updateImpl(Attributor &A) override {
3240 // TODO: Once we have call site specific value information we can provide
3241 // call site specific liveness information and then it makes
3242 // sense to specialize attributes for call sites arguments instead of
3243 // redirecting requests to the callee argument.
3244 Function *F = getAssociatedFunction();
3245 const IRPosition &FnPos = IRPosition::returned(*F);
3246 auto &FnAA = A.getAAFor<AAAlign>(*this, FnPos);
3247 return clampStateAndIndicateChange(
3248 getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
3249 }
3250
3251 /// See AbstractAttribute::trackStatistics()
3252 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align"
, ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align
); }
; }
3253};
3254
3255/// ------------------ Function No-Return Attribute ----------------------------
3256struct AANoReturnImpl : public AANoReturn {
3257 AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3258
3259 /// See AbstractAttribute::initialize(...).
3260 void initialize(Attributor &A) override {
3261 AANoReturn::initialize(A);
3262 Function *F = getAssociatedFunction();
3263 if (!F)
3264 indicatePessimisticFixpoint();
3265 }
3266
3267 /// See AbstractAttribute::getAsStr().
3268 const std::string getAsStr() const override {
3269 return getAssumed() ? "noreturn" : "may-return";
3270 }
3271
3272 /// See AbstractAttribute::updateImpl(Attributor &A).
3273 virtual ChangeStatus updateImpl(Attributor &A) override {
3274 auto CheckForNoReturn = [](Instruction &) { return false; };
3275 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3276 {(unsigned)Instruction::Ret}))
3277 return indicatePessimisticFixpoint();
3278 return ChangeStatus::UNCHANGED;
3279 }
3280};
3281
3282struct AANoReturnFunction final : AANoReturnImpl {
3283 AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3284
3285 /// See AbstractAttribute::trackStatistics()
3286 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor"
, "NumIRFunction_noreturn", ("Number of " "functions" " marked '"
"noreturn" "'")};; ++(NumIRFunction_noreturn); }
}
3287};
3288
3289/// NoReturn attribute deduction for a call sites.
3290struct AANoReturnCallSite final : AANoReturnImpl {
3291 AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3292
3293 /// See AbstractAttribute::updateImpl(...).
3294 ChangeStatus updateImpl(Attributor &A) override {
3295 // TODO: Once we have call site specific value information we can provide
3296 // call site specific liveness information and then it makes
3297 // sense to specialize attributes for call sites arguments instead of
3298 // redirecting requests to the callee argument.
3299 Function *F = getAssociatedFunction();
3300 const IRPosition &FnPos = IRPosition::function(*F);
3301 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3302 return clampStateAndIndicateChange(
3303 getState(),
3304 static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3305 }
3306
3307 /// See AbstractAttribute::trackStatistics()
3308 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn"
, ("Number of " "call site" " marked '" "noreturn" "'")};; ++
(NumIRCS_noreturn); }
; }
3309};
3310
3311/// ----------------------- Variable Capturing ---------------------------------
3312
3313/// A class to hold the state of for no-capture attributes.
3314struct AANoCaptureImpl : public AANoCapture {
3315 AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3316
3317 /// See AbstractAttribute::initialize(...).
3318 void initialize(Attributor &A) override {
3319 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3320 indicateOptimisticFixpoint();
3321 return;
3322 }
3323 Function *AnchorScope = getAnchorScope();
3324 if (isFnInterfaceKind() &&
3325 (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3326 indicatePessimisticFixpoint();
3327 return;
3328 }
3329
3330 // You cannot "capture" null in the default address space.
3331 if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3332 getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3333 indicateOptimisticFixpoint();
3334 return;
3335 }
3336
3337 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3338
3339 // Check what state the associated function can actually capture.
3340 if (F)
3341 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3342 else
3343 indicatePessimisticFixpoint();
3344 }
3345
3346 /// See AbstractAttribute::updateImpl(...).
3347 ChangeStatus updateImpl(Attributor &A) override;
3348
3349 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3350 virtual void
3351 getDeducedAttributes(LLVMContext &Ctx,
3352 SmallVectorImpl<Attribute> &Attrs) const override {
3353 if (!isAssumedNoCaptureMaybeReturned())
3354 return;
3355
3356 if (getArgNo() >= 0) {
3357 if (isAssumedNoCapture())
3358 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3359 else if (ManifestInternal)
3360 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3361 }
3362 }
3363
3364 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3365 /// depending on the ability of the function associated with \p IRP to capture
3366 /// state in memory and through "returning/throwing", respectively.
3367 static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3368 const Function &F,
3369 BitIntegerState &State) {
3370 // TODO: Once we have memory behavior attributes we should use them here.
3371
3372 // If we know we cannot communicate or write to memory, we do not care about
3373 // ptr2int anymore.
3374 if (F.onlyReadsMemory() && F.doesNotThrow() &&
3375 F.getReturnType()->isVoidTy()) {
3376 State.addKnownBits(NO_CAPTURE);
3377 return;
3378 }
3379
3380 // A function cannot capture state in memory if it only reads memory, it can
3381 // however return/throw state and the state might be influenced by the
3382 // pointer value, e.g., loading from a returned pointer might reveal a bit.
3383 if (F.onlyReadsMemory())
3384 State.addKnownBits(NOT_CAPTURED_IN_MEM);
3385
3386 // A function cannot communicate state back if it does not through
3387 // exceptions and doesn not return values.
3388 if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3389 State.addKnownBits(NOT_CAPTURED_IN_RET);
3390
3391 // Check existing "returned" attributes.
3392 int ArgNo = IRP.getArgNo();
3393 if (F.doesNotThrow() && ArgNo >= 0) {
3394 for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3395 if (F.hasParamAttribute(u, Attribute::Returned)) {
3396 if (u == unsigned(ArgNo))
3397 State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3398 else if (F.onlyReadsMemory())
3399 State.addKnownBits(NO_CAPTURE);
3400 else
3401 State.addKnownBits(NOT_CAPTURED_IN_RET);
3402 break;
3403 }
3404 }
3405 }
3406
3407 /// See AbstractState::getAsStr().
3408 const std::string getAsStr() const override {
3409 if (isKnownNoCapture())
3410 return "known not-captured";
3411 if (isAssumedNoCapture())
3412 return "assumed not-captured";
3413 if (isKnownNoCaptureMaybeReturned())
3414 return "known not-captured-maybe-returned";
3415 if (isAssumedNoCaptureMaybeReturned())
3416 return "assumed not-captured-maybe-returned";
3417 return "assumed-captured";
3418 }
3419};
3420
3421/// Attributor-aware capture tracker.
3422struct AACaptureUseTracker final : public CaptureTracker {
3423
3424 /// Create a capture tracker that can lookup in-flight abstract attributes
3425 /// through the Attributor \p A.
3426 ///
3427 /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3428 /// search is stopped. If a use leads to a return instruction,
3429 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3430 /// If a use leads to a ptr2int which may capture the value,
3431 /// \p CapturedInInteger is set. If a use is found that is currently assumed
3432 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3433 /// set. All values in \p PotentialCopies are later tracked as well. For every
3434 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3435 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3436 /// conservatively set to true.
3437 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3438 const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3439 SmallVectorImpl<const Value *> &PotentialCopies,
3440 unsigned &RemainingUsesToExplore)
3441 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3442 PotentialCopies(PotentialCopies),
3443 RemainingUsesToExplore(RemainingUsesToExplore) {}
3444
3445 /// Determine if \p V maybe captured. *Also updates the state!*
3446 bool valueMayBeCaptured(const Value *V) {
3447 if (V->getType()->isPointerTy()) {
3448 PointerMayBeCaptured(V, this);
3449 } else {
3450 State.indicatePessimisticFixpoint();
3451 }
3452 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3453 }
3454
3455 /// See CaptureTracker::tooManyUses().
3456 void tooManyUses() override {
3457 State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3458 }
3459
3460 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3461 if (CaptureTracker::isDereferenceableOrNull(O, DL))
3462 return true;
3463 const auto &DerefAA =
3464 A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3465 return DerefAA.getAssumedDereferenceableBytes();
3466 }
3467
3468 /// See CaptureTracker::captured(...).
3469 bool captured(const Use *U) override {
3470 Instruction *UInst = cast<Instruction>(U->getUser());
3471 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
3472 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Check use: " << *U->
get() << " in " << *UInst << "\n"; } } while
(false)
;
3473
3474 // Because we may reuse the tracker multiple times we keep track of the
3475 // number of explored uses ourselves as well.
3476 if (RemainingUsesToExplore-- == 0) {
3477 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - too many uses to explore!\n"
; } } while (false)
;
3478 return isCapturedIn(/* Memory */ true, /* Integer */ true,
3479 /* Return */ true);
3480 }
3481
3482 // Deal with ptr2int by following uses.
3483 if (isa<PtrToIntInst>(UInst)) {
3484 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - ptr2int assume the worst!\n"
; } } while (false)
;
3485 return valueMayBeCaptured(UInst);
3486 }
3487
3488 // Explicitly catch return instructions.
3489 if (isa<ReturnInst>(UInst))
3490 return isCapturedIn(/* Memory */ false, /* Integer */ false,
3491 /* Return */ true);
3492
3493 // For now we only use special logic for call sites. However, the tracker
3494 // itself knows about a lot of other non-capturing cases already.
3495 CallSite CS(UInst);
3496 if (!CS || !CS.isArgOperand(U))
3497 return isCapturedIn(/* Memory */ true, /* Integer */ true,
3498 /* Return */ true);
3499
3500 unsigned ArgNo = CS.getArgumentNo(U);
3501 const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3502 // If we have a abstract no-capture attribute for the argument we can use
3503 // it to justify a non-capture attribute here. This allows recursion!
3504 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3505 if (ArgNoCaptureAA.isAssumedNoCapture())
3506 return isCapturedIn(/* Memory */ false, /* Integer */ false,
3507 /* Return */ false);
3508 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3509 addPotentialCopy(CS);
3510 return isCapturedIn(/* Memory */ false, /* Integer */ false,
3511 /* Return */ false);
3512 }
3513
3514 // Lastly, we could not find a reason no-capture can be assumed so we don't.
3515 return isCapturedIn(/* Memory */ true, /* Integer */ true,
3516 /* Return */ true);
3517 }
3518
3519 /// Register \p CS as potential copy of the value we are checking.
3520 void addPotentialCopy(CallSite CS) {
3521 PotentialCopies.push_back(CS.getInstruction());
3522 }
3523
3524 /// See CaptureTracker::shouldExplore(...).
3525 bool shouldExplore(const Use *U) override {
3526 // Check liveness.
3527 return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3528 }
3529
3530 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3531 /// \p CapturedInRet, then return the appropriate value for use in the
3532 /// CaptureTracker::captured() interface.
3533 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3534 bool CapturedInRet) {
3535 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
3536 << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << " - captures [Mem " <<
CapturedInMem << "|Int " << CapturedInInt <<
"|Ret " << CapturedInRet << "]\n"; } } while (false
)
;
3537 if (CapturedInMem)
3538 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3539 if (CapturedInInt)
3540 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3541 if (CapturedInRet)
3542 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3543 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3544 }
3545
3546private:
3547 /// The attributor providing in-flight abstract attributes.
3548 Attributor &A;
3549
3550 /// The abstract attribute currently updated.
3551 AANoCapture &NoCaptureAA;
3552
3553 /// The abstract liveness state.
3554 const AAIsDead &IsDeadAA;
3555
3556 /// The state currently updated.
3557 AANoCapture::StateType &State;
3558
3559 /// Set of potential copies of the tracked value.
3560 SmallVectorImpl<const Value *> &PotentialCopies;
3561
3562 /// Global counter to limit the number of explored uses.
3563 unsigned &RemainingUsesToExplore;
3564};
3565
3566ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3567 const IRPosition &IRP = getIRPosition();
3568 const Value *V =
3569 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3570 if (!V)
3571 return indicatePessimisticFixpoint();
3572
3573 const Function *F =
3574 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3575 assert(F && "Expected a function!")((F && "Expected a function!") ? static_cast<void>
(0) : __assert_fail ("F && \"Expected a function!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3575, __PRETTY_FUNCTION__))
;
3576 const IRPosition &FnPos = IRPosition::function(*F);
3577 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3578
3579 AANoCapture::StateType T;
3580
3581 // Readonly means we cannot capture through memory.
3582 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3583 if (FnMemAA.isAssumedReadOnly()) {
3584 T.addKnownBits(NOT_CAPTURED_IN_MEM);
3585 if (FnMemAA.isKnownReadOnly())
3586 addKnownBits(NOT_CAPTURED_IN_MEM);
3587 }
3588
3589 // Make sure all returned values are different than the underlying value.
3590 // TODO: we could do this in a more sophisticated way inside
3591 // AAReturnedValues, e.g., track all values that escape through returns
3592 // directly somehow.
3593 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3594 bool SeenConstant = false;
3595 for (auto &It : RVAA.returned_values()) {
3596 if (isa<Constant>(It.first)) {
3597 if (SeenConstant)
3598 return false;
3599 SeenConstant = true;
3600 } else if (!isa<Argument>(It.first) ||
3601 It.first == getAssociatedArgument())
3602 return false;
3603 }
3604 return true;
3605 };
3606
3607 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3608 if (NoUnwindAA.isAssumedNoUnwind()) {
3609 bool IsVoidTy = F->getReturnType()->isVoidTy();
3610 const AAReturnedValues *RVAA =
3611 IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3612 if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3613 T.addKnownBits(NOT_CAPTURED_IN_RET);
3614 if (T.isKnown(NOT_CAPTURED_IN_MEM))
3615 return ChangeStatus::UNCHANGED;
3616 if (NoUnwindAA.isKnownNoUnwind() &&
3617 (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3618 addKnownBits(NOT_CAPTURED_IN_RET);
3619 if (isKnown(NOT_CAPTURED_IN_MEM))
3620 return indicateOptimisticFixpoint();
3621 }
3622 }
3623 }
3624
3625 // Use the CaptureTracker interface and logic with the specialized tracker,
3626 // defined in AACaptureUseTracker, that can look at in-flight abstract
3627 // attributes and directly updates the assumed state.
3628 SmallVector<const Value *, 4> PotentialCopies;
3629 unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3630 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3631 RemainingUsesToExplore);
3632
3633 // Check all potential copies of the associated value until we can assume
3634 // none will be captured or we have to assume at least one might be.
3635 unsigned Idx = 0;
3636 PotentialCopies.push_back(V);
3637 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3638 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3639
3640 AANoCapture::StateType &S = getState();
3641 auto Assumed = S.getAssumed();
3642 S.intersectAssumedBits(T.getAssumed());
3643 if (!isAssumedNoCaptureMaybeReturned())
3644 return indicatePessimisticFixpoint();
3645 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3646 : ChangeStatus::CHANGED;
3647}
3648
3649/// NoCapture attribute for function arguments.
3650struct AANoCaptureArgument final : AANoCaptureImpl {
3651 AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3652
3653 /// See AbstractAttribute::trackStatistics()
3654 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor"
, "NumIRArguments_nocapture", ("Number of " "arguments" " marked '"
"nocapture" "'")};; ++(NumIRArguments_nocapture); }
}
3655};
3656
3657/// NoCapture attribute for call site arguments.
3658struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3659 AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3660
3661 /// See AbstractAttribute::updateImpl(...).
3662 ChangeStatus updateImpl(Attributor &A) override {
3663 // TODO: Once we have call site specific value information we can provide
3664 // call site specific liveness information and then it makes
3665 // sense to specialize attributes for call sites arguments instead of
3666 // redirecting requests to the callee argument.
3667 Argument *Arg = getAssociatedArgument();
3668 if (!Arg)
3669 return indicatePessimisticFixpoint();
3670 const IRPosition &ArgPos = IRPosition::argument(*Arg);
3671 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3672 return clampStateAndIndicateChange(
3673 getState(),
3674 static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3675 }
3676
3677 /// See AbstractAttribute::trackStatistics()
3678 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor"
, "NumIRCSArguments_nocapture", ("Number of " "call site arguments"
" marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture
); }
};
3679};
3680
3681/// NoCapture attribute for floating values.
3682struct AANoCaptureFloating final : AANoCaptureImpl {
3683 AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3684
3685 /// See AbstractAttribute::trackStatistics()
3686 void trackStatistics() const override {
3687 STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor"
, "NumIRFloating_nocapture", ("Number of floating values known to be '"
"nocapture" "'")};; ++(NumIRFloating_nocapture); }
3688 }
3689};
3690
3691/// NoCapture attribute for function return value.
3692struct AANoCaptureReturned final : AANoCaptureImpl {
3693 AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3694 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3694)
;
3695 }
3696
3697 /// See AbstractAttribute::initialize(...).
3698 void initialize(Attributor &A) override {
3699 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3699)
;
3700 }
3701
3702 /// See AbstractAttribute::updateImpl(...).
3703 ChangeStatus updateImpl(Attributor &A) override {
3704 llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3704)
;
3705 }
3706
3707 /// See AbstractAttribute::trackStatistics()
3708 void trackStatistics() const override {}
3709};
3710
3711/// NoCapture attribute deduction for a call site return value.
3712struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3713 AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3714
3715 /// See AbstractAttribute::trackStatistics()
3716 void trackStatistics() const override {
3717 STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor"
, "NumIRCSReturn_nocapture", ("Number of " "call site returns"
" marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture);
}
3718 }
3719};
3720
3721/// ------------------ Value Simplify Attribute ----------------------------
3722struct AAValueSimplifyImpl : AAValueSimplify {
3723 AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3724
3725 /// See AbstractAttribute::getAsStr().
3726 const std::string getAsStr() const override {
3727 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3728 : "not-simple";
3729 }
3730
3731 /// See AbstractAttribute::trackStatistics()
3732 void trackStatistics() const override {}
3733
3734 /// See AAValueSimplify::getAssumedSimplifiedValue()
3735 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3736 if (!getAssumed())
3737 return const_cast<Value *>(&getAssociatedValue());
3738 return SimplifiedAssociatedValue;
3739 }
3740 void initialize(Attributor &A) override {}
3741
3742 /// Helper function for querying AAValueSimplify and updating candicate.
3743 /// \param QueryingValue Value trying to unify with SimplifiedValue
3744 /// \param AccumulatedSimplifiedValue Current simplification result.
3745 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3746 Value &QueryingValue,
3747 Optional<Value *> &AccumulatedSimplifiedValue) {
3748 // FIXME: Add a typecast support.
3749
3750 auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3751 QueryingAA, IRPosition::value(QueryingValue));
3752
3753 Optional<Value *> QueryingValueSimplified =
3754 ValueSimpifyAA.getAssumedSimplifiedValue(A);
3755
3756 if (!QueryingValueSimplified.hasValue())
3757 return true;
3758
3759 if (!QueryingValueSimplified.getValue())
3760 return false;
3761
3762 Value &QueryingValueSimplifiedUnwrapped =
3763 *QueryingValueSimplified.getValue();
3764
3765 if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3766 return true;
3767
3768 if (AccumulatedSimplifiedValue.hasValue())
3769 return AccumulatedSimplifiedValue == QueryingValueSimplified;
3770
3771 LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] "
<< QueryingValue << " is assumed to be " <<
QueryingValueSimplifiedUnwrapped << "\n"; } } while (false
)
3772 << " is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] "
<< QueryingValue << " is assumed to be " <<
QueryingValueSimplifiedUnwrapped << "\n"; } } while (false
)
3773 << QueryingValueSimplifiedUnwrapped << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] "
<< QueryingValue << " is assumed to be " <<
QueryingValueSimplifiedUnwrapped << "\n"; } } while (false
)
;
3774
3775 AccumulatedSimplifiedValue = QueryingValueSimplified;
3776 return true;
3777 }
3778
3779 /// See AbstractAttribute::manifest(...).
3780 ChangeStatus manifest(Attributor &A) override {
3781 ChangeStatus Changed = ChangeStatus::UNCHANGED;
3782
3783 if (!SimplifiedAssociatedValue.hasValue() ||
3784 !SimplifiedAssociatedValue.getValue())
3785 return Changed;
3786
3787 if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3788 // We can replace the AssociatedValue with the constant.
3789 Value &V = getAssociatedValue();
3790 if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3791 LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] "
<< V << " -> " << *C << "\n"; } }
while (false)
3792 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] "
<< V << " -> " << *C << "\n"; } }
while (false)
;
3793 V.replaceAllUsesWith(C);
3794 Changed = ChangeStatus::CHANGED;
3795 }
3796 }
3797
3798 return Changed | AAValueSimplify::manifest(A);
3799 }
3800
3801protected:
3802 // An assumed simplified value. Initially, it is set to Optional::None, which
3803 // means that the value is not clear under current assumption. If in the
3804 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3805 // returns orignal associated value.
3806 Optional<Value *> SimplifiedAssociatedValue;
3807};
3808
3809struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3810 AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3811
3812 void initialize(Attributor &A) override {
3813 AAValueSimplifyImpl::initialize(A);
3814 if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
3815 indicatePessimisticFixpoint();
3816 if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
3817 /* IgnoreSubsumingPositions */ true))
3818 indicatePessimisticFixpoint();
3819 }
3820
3821 /// See AbstractAttribute::updateImpl(...).
3822 ChangeStatus updateImpl(Attributor &A) override {
3823 // Byval is only replacable if it is readonly otherwise we would write into
3824 // the replaced value and not the copy that byval creates implicitly.
3825 Argument *Arg = getAssociatedArgument();
3826 if (Arg->hasByValAttr()) {
3827 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
3828 if (!MemAA.isAssumedReadOnly())
3829 return indicatePessimisticFixpoint();
3830 }
3831
3832 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3833
3834 auto PredForCallSite = [&](AbstractCallSite ACS) {
3835 // Check if we have an associated argument or not (which can happen for
3836 // callback calls).
3837 Value *ArgOp = ACS.getCallArgOperand(getArgNo());
3838 if (!ArgOp)
3839 return false;
3840 // We can only propagate thread independent values through callbacks.
3841 // This is different to direct/indirect call sites because for them we
3842 // know the thread executing the caller and callee is the same. For
3843 // callbacks this is not guaranteed, thus a thread dependent value could
3844 // be different for the caller and callee, making it invalid to propagate.
3845 if (ACS.isCallbackCall())
3846 if (auto *C =dyn_cast<Constant>(ArgOp))
3847 if (C->isThreadDependent())
3848 return false;
3849 return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3850 };
3851
3852 if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3853 return indicatePessimisticFixpoint();
3854
3855 // If a candicate was found in this update, return CHANGED.
3856 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3857 ? ChangeStatus::UNCHANGED
3858 : ChangeStatus ::CHANGED;
3859 }
3860
3861 /// See AbstractAttribute::trackStatistics()
3862 void trackStatistics() const override {
3863 STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor"
, "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '"
"value_simplify" "'")};; ++(NumIRArguments_value_simplify); }
3864 }
3865};
3866
3867struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3868 AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3869
3870 /// See AbstractAttribute::updateImpl(...).
3871 ChangeStatus updateImpl(Attributor &A) override {
3872 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3873
3874 auto PredForReturned = [&](Value &V) {
3875 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3876 };
3877
3878 if (!A.checkForAllReturnedValues(PredForReturned, *this))
3879 return indicatePessimisticFixpoint();
3880
3881 // If a candicate was found in this update, return CHANGED.
3882 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3883 ? ChangeStatus::UNCHANGED
3884 : ChangeStatus ::CHANGED;
3885 }
3886 /// See AbstractAttribute::trackStatistics()
3887 void trackStatistics() const override {
3888 STATS_DECLTRACK_FNRET_ATTR(value_simplify){ static llvm::Statistic NumIRFunctionReturn_value_simplify =
{"attributor", "NumIRFunctionReturn_value_simplify", ("Number of "
"function returns" " marked '" "value_simplify" "'")};; ++(NumIRFunctionReturn_value_simplify
); }
3889 }
3890};
3891
3892struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3893 AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3894
3895 /// See AbstractAttribute::initialize(...).
3896 void initialize(Attributor &A) override {
3897 Value &V = getAnchorValue();
3898
3899 // TODO: add other stuffs
3900 if (isa<Constant>(V) || isa<UndefValue>(V))
3901 indicatePessimisticFixpoint();
3902 }
3903
3904 /// See AbstractAttribute::updateImpl(...).
3905 ChangeStatus updateImpl(Attributor &A) override {
3906 bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3907
3908 auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3909 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3910 if (!Stripped && this == &AA) {
3911 // TODO: Look the instruction and check recursively.
3912 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
3913 dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
3914 << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
<< V << "\n"; } } while (false)
;
3915 indicatePessimisticFixpoint();
3916 return false;
3917 }
3918 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3919 };
3920
3921 if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3922 A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3923 VisitValueCB))
3924 return indicatePessimisticFixpoint();
3925
3926 // If a candicate was found in this update, return CHANGED.
3927
3928 return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3929 ? ChangeStatus::UNCHANGED
3930 : ChangeStatus ::CHANGED;
3931 }
3932
3933 /// See AbstractAttribute::trackStatistics()
3934 void trackStatistics() const override {
3935 STATS_DECLTRACK_FLOATING_ATTR(value_simplify){ static llvm::Statistic NumIRFloating_value_simplify = {"attributor"
, "NumIRFloating_value_simplify", ("Number of floating values known to be '"
"value_simplify" "'")};; ++(NumIRFloating_value_simplify); }
3936 }
3937};
3938
3939struct AAValueSimplifyFunction : AAValueSimplifyImpl {
3940 AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3941
3942 /// See AbstractAttribute::initialize(...).
3943 void initialize(Attributor &A) override {
3944 SimplifiedAssociatedValue = &getAnchorValue();
3945 indicateOptimisticFixpoint();
3946 }
3947 /// See AbstractAttribute::initialize(...).
3948 ChangeStatus updateImpl(Attributor &A) override {
3949 llvm_unreachable(::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3950)
3950 "AAValueSimplify(Function|CallSite)::updateImpl will not be called")::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3950)
;
3951 }
3952 /// See AbstractAttribute::trackStatistics()
3953 void trackStatistics() const override {
3954 STATS_DECLTRACK_FN_ATTR(value_simplify){ static llvm::Statistic NumIRFunction_value_simplify = {"attributor"
, "NumIRFunction_value_simplify", ("Number of " "functions" " marked '"
"value_simplify" "'")};; ++(NumIRFunction_value_simplify); }
3955 }
3956};
3957
3958struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
3959 AAValueSimplifyCallSite(const IRPosition &IRP)
3960 : AAValueSimplifyFunction(IRP) {}
3961 /// See AbstractAttribute::trackStatistics()
3962 void trackStatistics() const override {
3963 STATS_DECLTRACK_CS_ATTR(value_simplify){ static llvm::Statistic NumIRCS_value_simplify = {"attributor"
, "NumIRCS_value_simplify", ("Number of " "call site" " marked '"
"value_simplify" "'")};; ++(NumIRCS_value_simplify); }
3964 }
3965};
3966
3967struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
3968 AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
3969 : AAValueSimplifyReturned(IRP) {}
3970
3971 void trackStatistics() const override {
3972 STATS_DECLTRACK_CSRET_ATTR(value_simplify){ static llvm::Statistic NumIRCSReturn_value_simplify = {"attributor"
, "NumIRCSReturn_value_simplify", ("Number of " "call site returns"
" marked '" "value_simplify" "'")};; ++(NumIRCSReturn_value_simplify
); }
3973 }
3974};
3975struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
3976 AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
3977 : AAValueSimplifyFloating(IRP) {}
3978
3979 void trackStatistics() const override {
3980 STATS_DECLTRACK_CSARG_ATTR(value_simplify){ static llvm::Statistic NumIRCSArguments_value_simplify = {"attributor"
, "NumIRCSArguments_value_simplify", ("Number of " "call site arguments"
" marked '" "value_simplify" "'")};; ++(NumIRCSArguments_value_simplify
); }
3981 }
3982};
3983
3984/// ----------------------- Heap-To-Stack Conversion ---------------------------
3985struct AAHeapToStackImpl : public AAHeapToStack {
3986 AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
3987
3988 const std::string getAsStr() const override {
3989 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
3990 }
3991
3992 ChangeStatus manifest(Attributor &A) override {
3993 assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3994, __PRETTY_FUNCTION__))
1
Assuming the condition is true
2
'?' condition is true
3994 "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!"
) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 3994, __PRETTY_FUNCTION__))
;
3995
3996 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3997 Function *F = getAssociatedFunction();
3998 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3999
4000 for (Instruction *MallocCall : MallocCalls) {
3
Assuming '__begin2' is not equal to '__end2'
4001 // This malloc cannot be replaced.
4002 if (BadMallocCalls.count(MallocCall))
4
Assuming the condition is false
5
Taking false branch
4003 continue;
4004
4005 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4006 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing free call: "
<< *FreeCall << "\n"; } } while (false)
;
4007 A.deleteAfterManifest(*FreeCall);
4008 HasChanged = ChangeStatus::CHANGED;
4009 }
4010
4011 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCalldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc call: "
<< *MallocCall << "\n"; } } while (false)
6
Assuming 'DebugFlag' is false
7
Loop condition is false. Exiting loop
4012 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "H2S: Removing malloc call: "
<< *MallocCall << "\n"; } } while (false)
;
4013
4014 Constant *Size;
4015 if (isCallocLikeFn(MallocCall, TLI)) {
8
Assuming the condition is true
9
Taking true branch
4016 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
10
The object is a 'ConstantInt'
4017 auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
11
Assuming the object is not a 'ConstantInt'
12
'SizeT' initialized to a null pointer value
4018 APInt TotalSize = SizeT->getValue() * Num->getValue();
13
Called C++ object pointer is null
4019 Size =
4020 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4021 } else {
4022 Size = cast<ConstantInt>(MallocCall->getOperand(0));
4023 }
4024
4025 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4026 Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4027 Size, "", MallocCall->getNextNode());
4028
4029 if (AI->getType() != MallocCall->getType())
4030 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4031 AI->getNextNode());
4032
4033 MallocCall->replaceAllUsesWith(AI);
4034
4035 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4036 auto *NBB = II->getNormalDest();
4037 BranchInst::Create(NBB, MallocCall->getParent());
4038 A.deleteAfterManifest(*MallocCall);
4039 } else {
4040 A.deleteAfterManifest(*MallocCall);
4041 }
4042
4043 if (isCallocLikeFn(MallocCall, TLI)) {
4044 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4045 AI->getNextNode());
4046 Value *Ops[] = {
4047 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4048 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4049
4050 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4051 Module *M = F->getParent();
4052 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4053 CallInst::Create(Fn, Ops, "", BI->getNextNode());
4054 }
4055 HasChanged = ChangeStatus::CHANGED;
4056 }
4057
4058 return HasChanged;
4059 }
4060
4061 /// Collection of all malloc calls in a function.
4062 SmallSetVector<Instruction *, 4> MallocCalls;
4063
4064 /// Collection of malloc calls that cannot be converted.
4065 DenseSet<const Instruction *> BadMallocCalls;
4066
4067 /// A map for each malloc call to the set of associated free calls.
4068 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4069
4070 ChangeStatus updateImpl(Attributor &A) override;
4071};
4072
4073ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4074 const Function *F = getAssociatedFunction();
4075 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4076
4077 MustBeExecutedContextExplorer &Explorer =
4078 A.getInfoCache().getMustBeExecutedContextExplorer();
4079
4080 auto FreeCheck = [&](Instruction &I) {
4081 const auto &Frees = FreesForMalloc.lookup(&I);
4082 if (Frees.size() != 1)
4083 return false;
4084 Instruction *UniqueFree = *Frees.begin();
4085 return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4086 };
4087
4088 auto UsesCheck = [&](Instruction &I) {
4089 bool ValidUsesOnly = true;
4090 bool MustUse = true;
4091
4092 SmallPtrSet<const Use *, 8> Visited;
4093 SmallVector<const Use *, 8> Worklist;
4094
4095 for (Use &U : I.uses())
4096 Worklist.push_back(&U);
4097
4098 while (!Worklist.empty()) {
4099 const Use *U = Worklist.pop_back_val();
4100 if (!Visited.insert(U).second)
4101 continue;
4102
4103 auto *UserI = U->getUser();
4104
4105 if (isa<LoadInst>(UserI))
4106 continue;
4107 if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4108 if (SI->getValueOperand() == U->get()) {
4109 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
4110 << "[H2S] escaping store to memory: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] escaping store to memory: "
<< *UserI << "\n"; } } while (false)
;
4111 ValidUsesOnly = false;
4112 } else {
4113 // A store into the malloc'ed memory is fine.
4114 }
4115 continue;
4116 }
4117
4118 // NOTE: Right now, if a function that has malloc pointer as an argument
4119 // frees memory, we assume that the malloc pointer is freed.
4120
4121 // TODO: Add nofree callsite argument attribute to indicate that pointer
4122 // argument is not freed.
4123 if (auto *CB = dyn_cast<CallBase>(UserI)) {
4124 if (!CB->isArgOperand(U))
4125 continue;
4126
4127 if (CB->isLifetimeStartOrEnd())
4128 continue;
4129
4130 // Record malloc.
4131 if (isFreeCall(UserI, TLI)) {
4132 if (MustUse) {
4133 FreesForMalloc[&I].insert(
4134 cast<Instruction>(const_cast<User *>(UserI)));
4135 } else {
4136 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: "
<< *UserI << "\n"; } } while (false)
4137 << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: "
<< *UserI << "\n"; } } while (false)
;
4138 ValidUsesOnly = false;
4139 }
4140 continue;
4141 }
4142
4143 // If a function does not free memory we are fine
4144 const auto &NoFreeAA =
4145 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(*CB));
4146
4147 unsigned ArgNo = CB->getArgOperandNo(U);
4148 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4149 *this, IRPosition::callsite_argument(*CB, ArgNo));
4150
4151 if (!NoCaptureAA.isAssumedNoCapture() || !NoFreeAA.isAssumedNoFree()) {
4152 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Bad user: " <<
*UserI << "\n"; } } while (false)
;
4153 ValidUsesOnly = false;
4154 }
4155 continue;
4156 }
4157
4158 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4159 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4160 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4161 for (Use &U : UserI->uses())
4162 Worklist.push_back(&U);
4163 continue;
4164 }
4165
4166 // Unknown user for which we can not track uses further (in a way that
4167 // makes sense).
4168 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[H2S] Unknown user: " <<
*UserI << "\n"; } } while (false)
;
4169 ValidUsesOnly = false;
4170 }
4171 return ValidUsesOnly;
4172 };
4173
4174 auto MallocCallocCheck = [&](Instruction &I) {
4175 if (BadMallocCalls.count(&I))
4176 return true;
4177
4178 bool IsMalloc = isMallocLikeFn(&I, TLI);
4179 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4180 if (!IsMalloc && !IsCalloc) {
4181 BadMallocCalls.insert(&I);
4182 return true;
4183 }
4184
4185 if (IsMalloc) {
4186 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4187 if (Size->getValue().sle(MaxHeapToStackSize))
4188 if (UsesCheck(I) || FreeCheck(I)) {
4189 MallocCalls.insert(&I);
4190 return true;
4191 }
4192 } else if (IsCalloc) {
4193 bool Overflow = false;
4194 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4195 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4196 if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4197 .sle(MaxHeapToStackSize))
4198 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4199 MallocCalls.insert(&I);
4200 return true;
4201 }
4202 }
4203
4204 BadMallocCalls.insert(&I);
4205 return true;
4206 };
4207
4208 size_t NumBadMallocs = BadMallocCalls.size();
4209
4210 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4211
4212 if (NumBadMallocs != BadMallocCalls.size())
4213 return ChangeStatus::CHANGED;
4214
4215 return ChangeStatus::UNCHANGED;
4216}
4217
4218struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4219 AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4220
4221 /// See AbstractAttribute::trackStatistics()
4222 void trackStatistics() const override {
4223 STATS_DECL(MallocCalls, Function,static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc calls converted to allocas"
};;
4224 "Number of malloc calls converted to allocas")static llvm::Statistic NumIRFunction_MallocCalls = {"attributor"
, "NumIRFunction_MallocCalls", "Number of malloc calls converted to allocas"
};;
;
4225 for (auto *C : MallocCalls)
4226 if (!BadMallocCalls.count(C))
4227 ++BUILD_STAT_NAME(MallocCalls, Function)NumIRFunction_MallocCalls;
4228 }
4229};
4230
4231/// -------------------- Memory Behavior Attributes ----------------------------
4232/// Includes read-none, read-only, and write-only.
4233/// ----------------------------------------------------------------------------
4234struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4235 AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4236
4237 /// See AbstractAttribute::initialize(...).
4238 void initialize(Attributor &A) override {
4239 intersectAssumedBits(BEST_STATE);
4240 getKnownStateFromValue(getIRPosition(), getState());
4241 IRAttribute::initialize(A);
4242 }
4243
4244 /// Return the memory behavior information encoded in the IR for \p IRP.
4245 static void getKnownStateFromValue(const IRPosition &IRP,
4246 BitIntegerState &State) {
4247 SmallVector<Attribute, 2> Attrs;
4248 IRP.getAttrs(AttrKinds, Attrs);
4249 for (const Attribute &Attr : Attrs) {
4250 switch (Attr.getKindAsEnum()) {
4251 case Attribute::ReadNone:
4252 State.addKnownBits(NO_ACCESSES);
4253 break;
4254 case Attribute::ReadOnly:
4255 State.addKnownBits(NO_WRITES);
4256 break;
4257 case Attribute::WriteOnly:
4258 State.addKnownBits(NO_READS);
4259 break;
4260 default:
4261 llvm_unreachable("Unexpcted attribute!")::llvm::llvm_unreachable_internal("Unexpcted attribute!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 4261)
;
4262 }
4263 }
4264
4265 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4266 if (!I->mayReadFromMemory())
4267 State.addKnownBits(NO_READS);
4268 if (!I->mayWriteToMemory())
4269 State.addKnownBits(NO_WRITES);
4270 }
4271 }
4272
4273 /// See AbstractAttribute::getDeducedAttributes(...).
4274 void getDeducedAttributes(LLVMContext &Ctx,
4275 SmallVectorImpl<Attribute> &Attrs) const override {
4276 assert(Attrs.size() == 0)((Attrs.size() == 0) ? static_cast<void> (0) : __assert_fail
("Attrs.size() == 0", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 4276, __PRETTY_FUNCTION__))
;
4277 if (isAssumedReadNone())
4278 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4279 else if (isAssumedReadOnly())
4280 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4281 else if (isAssumedWriteOnly())
4282 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4283 assert(Attrs.size() <= 1)((Attrs.size() <= 1) ? static_cast<void> (0) : __assert_fail
("Attrs.size() <= 1", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 4283, __PRETTY_FUNCTION__))
;
4284 }
4285
4286 /// See AbstractAttribute::manifest(...).
4287 ChangeStatus manifest(Attributor &A) override {
4288 const IRPosition &IRP = getIRPosition();
4289
4290 // Check if we would improve the existing attributes first.
4291 SmallVector<Attribute, 4> DeducedAttrs;
4292 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4293 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4294 return IRP.hasAttr(Attr.getKindAsEnum(),
4295 /* IgnoreSubsumingPositions */ true);
4296 }))
4297 return ChangeStatus::UNCHANGED;
4298
4299 // Clear existing attributes.
4300 IRP.removeAttrs(AttrKinds);
4301
4302 // Use the generic manifest method.
4303 return IRAttribute::manifest(A);
4304 }
4305
4306 /// See AbstractState::getAsStr().
4307 const std::string getAsStr() const override {
4308 if (isAssumedReadNone())
4309 return "readnone";
4310 if (isAssumedReadOnly())
4311 return "readonly";
4312 if (isAssumedWriteOnly())
4313 return "writeonly";
4314 return "may-read/write";
4315 }
4316
4317 /// The set of IR attributes AAMemoryBehavior deals with.
4318 static const Attribute::AttrKind AttrKinds[3];
4319};
4320
4321const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4322 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4323
4324/// Memory behavior attribute for a floating value.
4325struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4326 AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4327
4328 /// See AbstractAttribute::initialize(...).
4329 void initialize(Attributor &A) override {
4330 AAMemoryBehaviorImpl::initialize(A);
4331 // Initialize the use vector with all direct uses of the associated value.
4332 for (const Use &U : getAssociatedValue().uses())
4333 Uses.insert(&U);
4334 }
4335
4336 /// See AbstractAttribute::updateImpl(...).
4337 ChangeStatus updateImpl(Attributor &A) override;
4338
4339 /// See AbstractAttribute::trackStatistics()
4340 void trackStatistics() const override {
4341 if (isAssumedReadNone())
4342 STATS_DECLTRACK_FLOATING_ATTR(readnone){ static llvm::Statistic NumIRFloating_readnone = {"attributor"
, "NumIRFloating_readnone", ("Number of floating values known to be '"
"readnone" "'")};; ++(NumIRFloating_readnone); }
4343 else if (isAssumedReadOnly())
4344 STATS_DECLTRACK_FLOATING_ATTR(readonly){ static llvm::Statistic NumIRFloating_readonly = {"attributor"
, "NumIRFloating_readonly", ("Number of floating values known to be '"
"readonly" "'")};; ++(NumIRFloating_readonly); }
4345 else if (isAssumedWriteOnly())
4346 STATS_DECLTRACK_FLOATING_ATTR(writeonly){ static llvm::Statistic NumIRFloating_writeonly = {"attributor"
, "NumIRFloating_writeonly", ("Number of floating values known to be '"
"writeonly" "'")};; ++(NumIRFloating_writeonly); }
4347 }
4348
4349private:
4350 /// Return true if users of \p UserI might access the underlying
4351 /// variable/location described by \p U and should therefore be analyzed.
4352 bool followUsersOfUseIn(Attributor &A, const Use *U,
4353 const Instruction *UserI);
4354
4355 /// Update the state according to the effect of use \p U in \p UserI.
4356 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4357
4358protected:
4359 /// Container for (transitive) uses of the associated argument.
4360 SetVector<const Use *> Uses;
4361};
4362
4363/// Memory behavior attribute for function argument.
4364struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4365 AAMemoryBehaviorArgument(const IRPosition &IRP)
4366 : AAMemoryBehaviorFloating(IRP) {}
4367
4368 /// See AbstractAttribute::initialize(...).
4369 void initialize(Attributor &A) override {
4370 AAMemoryBehaviorFloating::initialize(A);
4371
4372 // Initialize the use vector with all direct uses of the associated value.
4373 Argument *Arg = getAssociatedArgument();
4374 if (!Arg || !Arg->getParent()->hasExactDefinition())
4375 indicatePessimisticFixpoint();
4376 }
4377
4378 ChangeStatus manifest(Attributor &A) override {
4379 // TODO: From readattrs.ll: "inalloca parameters are always
4380 // considered written"
4381 if (hasAttr({Attribute::InAlloca})) {
4382 removeKnownBits(NO_WRITES);
4383 removeAssumedBits(NO_WRITES);
4384 }
4385 return AAMemoryBehaviorFloating::manifest(A);
4386 }
4387
4388 /// See AbstractAttribute::trackStatistics()
4389 void trackStatistics() const override {
4390 if (isAssumedReadNone())
4391 STATS_DECLTRACK_ARG_ATTR(readnone){ static llvm::Statistic NumIRArguments_readnone = {"attributor"
, "NumIRArguments_readnone", ("Number of " "arguments" " marked '"
"readnone" "'")};; ++(NumIRArguments_readnone); }
4392 else if (isAssumedReadOnly())
4393 STATS_DECLTRACK_ARG_ATTR(readonly){ static llvm::Statistic NumIRArguments_readonly = {"attributor"
, "NumIRArguments_readonly", ("Number of " "arguments" " marked '"
"readonly" "'")};; ++(NumIRArguments_readonly); }
4394 else if (isAssumedWriteOnly())
4395 STATS_DECLTRACK_ARG_ATTR(writeonly){ static llvm::Statistic NumIRArguments_writeonly = {"attributor"
, "NumIRArguments_writeonly", ("Number of " "arguments" " marked '"
"writeonly" "'")};; ++(NumIRArguments_writeonly); }
4396 }
4397};
4398
4399struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4400 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4401 : AAMemoryBehaviorArgument(IRP) {}
4402
4403 /// See AbstractAttribute::updateImpl(...).
4404 ChangeStatus updateImpl(Attributor &A) override {
4405 // TODO: Once we have call site specific value information we can provide
4406 // call site specific liveness liveness information and then it makes
4407 // sense to specialize attributes for call sites arguments instead of
4408 // redirecting requests to the callee argument.
4409 Argument *Arg = getAssociatedArgument();
4410 const IRPosition &ArgPos = IRPosition::argument(*Arg);
4411 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4412 return clampStateAndIndicateChange(
4413 getState(),
4414 static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4415 }
4416
4417 /// See AbstractAttribute::trackStatistics()
4418 void trackStatistics() const override {
4419 if (isAssumedReadNone())
4420 STATS_DECLTRACK_CSARG_ATTR(readnone){ static llvm::Statistic NumIRCSArguments_readnone = {"attributor"
, "NumIRCSArguments_readnone", ("Number of " "call site arguments"
" marked '" "readnone" "'")};; ++(NumIRCSArguments_readnone)
; }
4421 else if (isAssumedReadOnly())
4422 STATS_DECLTRACK_CSARG_ATTR(readonly){ static llvm::Statistic NumIRCSArguments_readonly = {"attributor"
, "NumIRCSArguments_readonly", ("Number of " "call site arguments"
" marked '" "readonly" "'")};; ++(NumIRCSArguments_readonly)
; }
4423 else if (isAssumedWriteOnly())
4424 STATS_DECLTRACK_CSARG_ATTR(writeonly){ static llvm::Statistic NumIRCSArguments_writeonly = {"attributor"
, "NumIRCSArguments_writeonly", ("Number of " "call site arguments"
" marked '" "writeonly" "'")};; ++(NumIRCSArguments_writeonly
); }
4425 }
4426};
4427
4428/// Memory behavior attribute for a call site return position.
4429struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4430 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4431 : AAMemoryBehaviorFloating(IRP) {}
4432
4433 /// See AbstractAttribute::manifest(...).
4434 ChangeStatus manifest(Attributor &A) override {
4435 // We do not annotate returned values.
4436 return ChangeStatus::UNCHANGED;
4437 }
4438
4439 /// See AbstractAttribute::trackStatistics()
4440 void trackStatistics() const override {}
4441};
4442
4443/// An AA to represent the memory behavior function attributes.
4444struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4445 AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4446
4447 /// See AbstractAttribute::updateImpl(Attributor &A).
4448 virtual ChangeStatus updateImpl(Attributor &A) override;
4449
4450 /// See AbstractAttribute::manifest(...).
4451 ChangeStatus manifest(Attributor &A) override {
4452 Function &F = cast<Function>(getAnchorValue());
4453 if (isAssumedReadNone()) {
4454 F.removeFnAttr(Attribute::ArgMemOnly);
4455 F.removeFnAttr(Attribute::InaccessibleMemOnly);
4456 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4457 }
4458 return AAMemoryBehaviorImpl::manifest(A);
4459 }
4460
4461 /// See AbstractAttribute::trackStatistics()
4462 void trackStatistics() const override {
4463 if (isAssumedReadNone())
4464 STATS_DECLTRACK_FN_ATTR(readnone){ static llvm::Statistic NumIRFunction_readnone = {"attributor"
, "NumIRFunction_readnone", ("Number of " "functions" " marked '"
"readnone" "'")};; ++(NumIRFunction_readnone); }
4465 else if (isAssumedReadOnly())
4466 STATS_DECLTRACK_FN_ATTR(readonly){ static llvm::Statistic NumIRFunction_readonly = {"attributor"
, "NumIRFunction_readonly", ("Number of " "functions" " marked '"
"readonly" "'")};; ++(NumIRFunction_readonly); }
4467 else if (isAssumedWriteOnly())
4468 STATS_DECLTRACK_FN_ATTR(writeonly){ static llvm::Statistic NumIRFunction_writeonly = {"attributor"
, "NumIRFunction_writeonly", ("Number of " "functions" " marked '"
"writeonly" "'")};; ++(NumIRFunction_writeonly); }
4469 }
4470};
4471
4472/// AAMemoryBehavior attribute for call sites.
4473struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4474 AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4475
4476 /// See AbstractAttribute::initialize(...).
4477 void initialize(Attributor &A) override {
4478 AAMemoryBehaviorImpl::initialize(A);
4479 Function *F = getAssociatedFunction();
4480 if (!F || !F->hasExactDefinition())
4481 indicatePessimisticFixpoint();
4482 }
4483
4484 /// See AbstractAttribute::updateImpl(...).
4485 ChangeStatus updateImpl(Attributor &A) override {
4486 // TODO: Once we have call site specific value information we can provide
4487 // call site specific liveness liveness information and then it makes
4488 // sense to specialize attributes for call sites arguments instead of
4489 // redirecting requests to the callee argument.
4490 Function *F = getAssociatedFunction();
4491 const IRPosition &FnPos = IRPosition::function(*F);
4492 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4493 return clampStateAndIndicateChange(
4494 getState(),
4495 static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4496 }
4497
4498 /// See AbstractAttribute::trackStatistics()
4499 void trackStatistics() const override {
4500 if (isAssumedReadNone())
4501 STATS_DECLTRACK_CS_ATTR(readnone){ static llvm::Statistic NumIRCS_readnone = {"attributor", "NumIRCS_readnone"
, ("Number of " "call site" " marked '" "readnone" "'")};; ++
(NumIRCS_readnone); }
4502 else if (isAssumedReadOnly())
4503 STATS_DECLTRACK_CS_ATTR(readonly){ static llvm::Statistic NumIRCS_readonly = {"attributor", "NumIRCS_readonly"
, ("Number of " "call site" " marked '" "readonly" "'")};; ++
(NumIRCS_readonly); }
4504 else if (isAssumedWriteOnly())
4505 STATS_DECLTRACK_CS_ATTR(writeonly){ static llvm::Statistic NumIRCS_writeonly = {"attributor", "NumIRCS_writeonly"
, ("Number of " "call site" " marked '" "writeonly" "'")};; ++
(NumIRCS_writeonly); }
4506 }
4507};
4508} // namespace
4509
4510ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4511
4512 // The current assumed state used to determine a change.
4513 auto AssumedState = getAssumed();
4514
4515 auto CheckRWInst = [&](Instruction &I) {
4516 // If the instruction has an own memory behavior state, use it to restrict
4517 // the local state. No further analysis is required as the other memory
4518 // state is as optimistic as it gets.
4519 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4520 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4521 *this, IRPosition::callsite_function(ICS));
4522 intersectAssumedBits(MemBehaviorAA.getAssumed());
4523 return !isAtFixpoint();
4524 }
4525
4526 // Remove access kind modifiers if necessary.
4527 if (I.mayReadFromMemory())
4528 removeAssumedBits(NO_READS);
4529 if (I.mayWriteToMemory())
4530 removeAssumedBits(NO_WRITES);
4531 return !isAtFixpoint();
4532 };
4533
4534 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4535 return indicatePessimisticFixpoint();
4536
4537 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4538 : ChangeStatus::UNCHANGED;
4539}
4540
4541ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4542
4543 const IRPosition &IRP = getIRPosition();
4544 const IRPosition &FnPos = IRPosition::function_scope(IRP);
4545 AAMemoryBehavior::StateType &S = getState();
4546
4547 // First, check the function scope. We take the known information and we avoid
4548 // work if the assumed information implies the current assumed information for
4549 // this attribute.
4550 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4551 S.addKnownBits(FnMemAA.getKnown());
4552 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4553 return ChangeStatus::UNCHANGED;
4554
4555 // Make sure the value is not captured (except through "return"), if
4556 // it is, any information derived would be irrelevant anyway as we cannot
4557 // check the potential aliases introduced by the capture. However, no need
4558 // to fall back to anythign less optimistic than the function state.
4559 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4560 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4561 S.intersectAssumedBits(FnMemAA.getAssumed());
4562 return ChangeStatus::CHANGED;
4563 }
4564
4565 // The current assumed state used to determine a change.
4566 auto AssumedState = S.getAssumed();
4567
4568 // Liveness information to exclude dead users.
4569 // TODO: Take the FnPos once we have call site specific liveness information.
4570 const auto &LivenessAA = A.getAAFor<AAIsDead>(
4571 *this, IRPosition::function(*IRP.getAssociatedFunction()));
4572
4573 // Visit and expand uses until all are analyzed or a fixpoint is reached.
4574 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4575 const Use *U = Uses[i];
4576 Instruction *UserI = cast<Instruction>(U->getUser());
4577 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " <<
**U << " in " << *UserI << " [Dead: " <<
(LivenessAA.isAssumedDead(UserI)) << "]\n"; } } while (
false)
4578 << " [Dead: " << (LivenessAA.isAssumedDead(UserI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " <<
**U << " in " << *UserI << " [Dead: " <<
(LivenessAA.isAssumedDead(UserI)) << "]\n"; } } while (
false)
4579 << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " <<
**U << " in " << *UserI << " [Dead: " <<
(LivenessAA.isAssumedDead(UserI)) << "]\n"; } } while (
false)
;
4580 if (LivenessAA.isAssumedDead(UserI))
4581 continue;
4582
4583 // Check if the users of UserI should also be visited.
4584 if (followUsersOfUseIn(A, U, UserI))
4585 for (const Use &UserIUse : UserI->uses())
4586 Uses.insert(&UserIUse);
4587
4588 // If UserI might touch memory we analyze the use in detail.
4589 if (UserI->mayReadOrWriteMemory())
4590 analyzeUseIn(A, U, UserI);
4591 }
4592
4593 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4594 : ChangeStatus::UNCHANGED;
4595}
4596
4597bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4598 const Instruction *UserI) {
4599 // The loaded value is unrelated to the pointer argument, no need to
4600 // follow the users of the load.
4601 if (isa<LoadInst>(UserI))
4602 return false;
4603
4604 // By default we follow all uses assuming UserI might leak information on U,
4605 // we have special handling for call sites operands though.
4606 ImmutableCallSite ICS(UserI);
4607 if (!ICS || !ICS.isArgOperand(U))
4608 return true;
4609
4610 // If the use is a call argument known not to be captured, the users of
4611 // the call do not need to be visited because they have to be unrelated to
4612 // the input. Note that this check is not trivial even though we disallow
4613 // general capturing of the underlying argument. The reason is that the
4614 // call might the argument "through return", which we allow and for which we
4615 // need to check call users.
4616 unsigned ArgNo = ICS.getArgumentNo(U);
4617 const auto &ArgNoCaptureAA =
4618 A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4619 return !ArgNoCaptureAA.isAssumedNoCapture();
4620}
4621
4622void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4623 const Instruction *UserI) {
4624 assert(UserI->mayReadOrWriteMemory())((UserI->mayReadOrWriteMemory()) ? static_cast<void>
(0) : __assert_fail ("UserI->mayReadOrWriteMemory()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 4624, __PRETTY_FUNCTION__))
;
4625
4626 switch (UserI->getOpcode()) {
4627 default:
4628 // TODO: Handle all atomics and other side-effect operations we know of.
4629 break;
4630 case Instruction::Load:
4631 // Loads cause the NO_READS property to disappear.
4632 removeAssumedBits(NO_READS);
4633 return;
4634
4635 case Instruction::Store:
4636 // Stores cause the NO_WRITES property to disappear if the use is the
4637 // pointer operand. Note that we do assume that capturing was taken care of
4638 // somewhere else.
4639 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4640 removeAssumedBits(NO_WRITES);
4641 return;
4642
4643 case Instruction::Call:
4644 case Instruction::CallBr:
4645 case Instruction::Invoke: {
4646 // For call sites we look at the argument memory behavior attribute (this
4647 // could be recursive!) in order to restrict our own state.
4648 ImmutableCallSite ICS(UserI);
4649
4650 // Give up on operand bundles.
4651 if (ICS.isBundleOperand(U)) {
4652 indicatePessimisticFixpoint();
4653 return;
4654 }
4655
4656 // Calling a function does read the function pointer, maybe write it if the
4657 // function is self-modifying.
4658 if (ICS.isCallee(U)) {
4659 removeAssumedBits(NO_READS);
4660 break;
4661 }
4662
4663 // Adjust the possible access behavior based on the information on the
4664 // argument.
4665 unsigned ArgNo = ICS.getArgumentNo(U);
4666 const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4667 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4668 // "assumed" has at most the same bits as the MemBehaviorAA assumed
4669 // and at least "known".
4670 intersectAssumedBits(MemBehaviorAA.getAssumed());
4671 return;
4672 }
4673 };
4674
4675 // Generally, look at the "may-properties" and adjust the assumed state if we
4676 // did not trigger special handling before.
4677 if (UserI->mayReadFromMemory())
4678 removeAssumedBits(NO_READS);
4679 if (UserI->mayWriteToMemory())
4680 removeAssumedBits(NO_WRITES);
4681}
4682
4683/// ----------------------------------------------------------------------------
4684/// Attributor
4685/// ----------------------------------------------------------------------------
4686
4687bool Attributor::isAssumedDead(const AbstractAttribute &AA,
4688 const AAIsDead *LivenessAA) {
4689 const Instruction *CtxI = AA.getIRPosition().getCtxI();
4690 if (!CtxI)
4691 return false;
4692
4693 // TODO: Find a good way to utilize fine and coarse grained liveness
4694 // information.
4695 if (!LivenessAA)
4696 LivenessAA =
4697 &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4698 /* TrackDependence */ false);
4699
4700 // Don't check liveness for AAIsDead.
4701 if (&AA == LivenessAA)
4702 return false;
4703
4704 if (!LivenessAA->isAssumedDead(CtxI))
4705 return false;
4706
4707 // We actually used liveness information so we have to record a dependence.
4708 recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
4709
4710 return true;
4711}
4712
4713bool Attributor::checkForAllUses(
4714 const function_ref<bool(const Use &, bool &)> &Pred,
4715 const AbstractAttribute &QueryingAA, const Value &V) {
4716 const IRPosition &IRP = QueryingAA.getIRPosition();
4717 SmallVector<const Use *, 16> Worklist;
4718 SmallPtrSet<const Use *, 16> Visited;
4719
4720 for (const Use &U : V.uses())
4721 Worklist.push_back(&U);
4722
4723 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Got " <<
Worklist.size() << " initial uses to check\n"; } } while
(false)
4724 << " initial uses to check\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Got " <<
Worklist.size() << " initial uses to check\n"; } } while
(false)
;
4725
4726 if (Worklist.empty())
4727 return true;
4728
4729 bool AnyDead = false;
4730 const Function *ScopeFn = IRP.getAnchorScope();
4731 const auto *LivenessAA =
4732 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
4733 /* TrackDependence */ false)
4734 : nullptr;
4735
4736 while (!Worklist.empty()) {
4737 const Use *U = Worklist.pop_back_val();
4738 if (!Visited.insert(U).second)
4739 continue;
4740 LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Check use: " <<
**U << "\n"; } } while (false)
;
4741 if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
4742 if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
4743 LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Dead user: " <<
*UserI << ": " << static_cast<const AbstractAttribute
&>(*LivenessAA) << "\n"; } } while (false)
4744 << static_cast<const AbstractAttribute &>(*LivenessAA)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Dead user: " <<
*UserI << ": " << static_cast<const AbstractAttribute
&>(*LivenessAA) << "\n"; } } while (false)
4745 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Dead user: " <<
*UserI << ": " << static_cast<const AbstractAttribute
&>(*LivenessAA) << "\n"; } } while (false)
;
4746 AnyDead = true;
4747 continue;
4748 }
4749
4750 bool Follow = false;
4751 if (!Pred(*U, Follow))
4752 return false;
4753 if (!Follow)
4754 continue;
4755 for (const Use &UU : U->getUser()->uses())
4756 Worklist.push_back(&UU);
4757 }
4758
4759 if (AnyDead)
4760 recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4761
4762 return true;
4763}
4764
4765bool Attributor::checkForAllCallSites(
4766 const function_ref<bool(AbstractCallSite)> &Pred,
4767 const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4768 // We can try to determine information from
4769 // the call sites. However, this is only possible all call sites are known,
4770 // hence the function has internal linkage.
4771 const IRPosition &IRP = QueryingAA.getIRPosition();
4772 const Function *AssociatedFunction = IRP.getAssociatedFunction();
4773 if (!AssociatedFunction) {
4774 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] No function associated with "
<< IRP << "\n"; } } while (false)
4775 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] No function associated with "
<< IRP << "\n"; } } while (false)
;
4776 return false;
4777 }
4778
4779 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4780 &QueryingAA);
4781}
4782
4783bool Attributor::checkForAllCallSites(
4784 const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4785 bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4786 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4787 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has no internal linkage, hence not all call sites are known\n"
; } } while (false)
4788 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has no internal linkage, hence not all call sites are known\n"
; } } while (false)
4789 << "[Attributor] Function " << Fn.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has no internal linkage, hence not all call sites are known\n"
; } } while (false)
4790 << " has no internal linkage, hence not all call sites are known\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has no internal linkage, hence not all call sites are known\n"
; } } while (false)
;
4791 return false;
4792 }
4793
4794 for (const Use &U : Fn.uses()) {
4795 AbstractCallSite ACS(&U);
4796 if (!ACS) {
4797 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has non call site use " << *U.
get() << " in " << *U.getUser() << "\n"; } }
while (false)
4798 << " has non call site use " << *U.get() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has non call site use " << *U.
get() << " in " << *U.getUser() << "\n"; } }
while (false)
4799 << *U.getUser() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Function " <<
Fn.getName() << " has non call site use " << *U.
get() << " in " << *U.getUser() << "\n"; } }
while (false)
;
4800 // BlockAddress users are allowed.
4801 if (isa<BlockAddress>(U.getUser()))
4802 continue;
4803 return false;
4804 }
4805
4806 Instruction *I = ACS.getInstruction();
4807 Function *Caller = I->getFunction();
4808
4809 const auto *LivenessAA =
4810 lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4811 /* TrackDependence */ false);
4812
4813 // Skip dead calls.
4814 if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4815 // We actually used liveness information so we have to record a
4816 // dependence.
4817 if (QueryingAA)
4818 recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
4819 continue;
4820 }
4821
4822 const Use *EffectiveUse =
4823 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4824 if (!ACS.isCallee(EffectiveUse)) {
4825 if (!RequireAllCallSites)
4826 continue;
4827 LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] User " <<
EffectiveUse->getUser() << " is an invalid use of "
<< Fn.getName() << "\n"; } } while (false)
4828 << " is an invalid use of " << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] User " <<
EffectiveUse->getUser() << " is an invalid use of "
<< Fn.getName() << "\n"; } } while (false)
;
4829 return false;
4830 }
4831
4832 if (Pred(ACS))
4833 continue;
4834
4835 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Call site callback failed for "
<< *ACS.getInstruction() << "\n"; } } while (false
)
4836 << *ACS.getInstruction() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Call site callback failed for "
<< *ACS.getInstruction() << "\n"; } } while (false
)
;
4837 return false;
4838 }
4839
4840 return true;
4841}
4842
4843bool Attributor::checkForAllReturnedValuesAndReturnInsts(
4844 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4845 &Pred,
4846 const AbstractAttribute &QueryingAA) {
4847
4848 const IRPosition &IRP = QueryingAA.getIRPosition();
4849 // Since we need to provide return instructions we have to have an exact
4850 // definition.
4851 const Function *AssociatedFunction = IRP.getAssociatedFunction();
4852 if (!AssociatedFunction)
4853 return false;
4854
4855 // If this is a call site query we use the call site specific return values
4856 // and liveness information.
4857 // TODO: use the function scope once we have call site AAReturnedValues.
4858 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4859 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4860 if (!AARetVal.getState().isValidState())
4861 return false;
4862
4863 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4864}
4865
4866bool Attributor::checkForAllReturnedValues(
4867 const function_ref<bool(Value &)> &Pred,
4868 const AbstractAttribute &QueryingAA) {
4869
4870 const IRPosition &IRP = QueryingAA.getIRPosition();
4871 const Function *AssociatedFunction = IRP.getAssociatedFunction();
4872 if (!AssociatedFunction)
4873 return false;
4874
4875 // TODO: use the function scope once we have call site AAReturnedValues.
4876 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4877 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4878 if (!AARetVal.getState().isValidState())
4879 return false;
4880
4881 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4882 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4883 return Pred(RV);
4884 });
4885}
4886
4887static bool
4888checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
4889 const function_ref<bool(Instruction &)> &Pred,
4890 const AAIsDead *LivenessAA, bool &AnyDead,
4891 const ArrayRef<unsigned> &Opcodes) {
4892 for (unsigned Opcode : Opcodes) {
4893 for (Instruction *I : OpcodeInstMap[Opcode]) {
4894 // Skip dead instructions.
4895 if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4896 AnyDead = true;
4897 continue;
4898 }
4899
4900 if (!Pred(*I))
4901 return false;
4902 }
4903 }
4904 return true;
4905}
4906
4907bool Attributor::checkForAllInstructions(
4908 const llvm::function_ref<bool(Instruction &)> &Pred,
4909 const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4910
4911 const IRPosition &IRP = QueryingAA.getIRPosition();
4912 // Since we need to provide instructions we have to have an exact definition.
4913 const Function *AssociatedFunction = IRP.getAssociatedFunction();
4914 if (!AssociatedFunction)
4915 return false;
4916
4917 // TODO: use the function scope once we have call site AAReturnedValues.
4918 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4919 const auto &LivenessAA =
4920 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4921 bool AnyDead = false;
4922
4923 auto &OpcodeInstMap =
4924 InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4925 if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4926 Opcodes))
4927 return false;
4928
4929 // If we actually used liveness information so we have to record a dependence.
4930 if (AnyDead)
4931 recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4932
4933 return true;
4934}
4935
4936bool Attributor::checkForAllReadWriteInstructions(
4937 const llvm::function_ref<bool(Instruction &)> &Pred,
4938 AbstractAttribute &QueryingAA) {
4939
4940 const Function *AssociatedFunction =
4941 QueryingAA.getIRPosition().getAssociatedFunction();
4942 if (!AssociatedFunction)
4943 return false;
4944
4945 // TODO: use the function scope once we have call site AAReturnedValues.
4946 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4947 const auto &LivenessAA =
4948 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4949 bool AnyDead = false;
4950
4951 for (Instruction *I :
4952 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
4953 // Skip dead instructions.
4954 if (LivenessAA.isAssumedDead(I)) {
4955 AnyDead = true;
4956 continue;
4957 }
4958
4959 if (!Pred(*I))
4960 return false;
4961 }
4962
4963 // If we actually used liveness information so we have to record a dependence.
4964 if (AnyDead)
4965 recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4966
4967 return true;
4968}
4969
4970ChangeStatus Attributor::run(Module &M) {
4971 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Identified and initialized "
<< AllAbstractAttributes.size() << " abstract attributes.\n"
; } } while (false)
4972 << AllAbstractAttributes.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Identified and initialized "
<< AllAbstractAttributes.size() << " abstract attributes.\n"
; } } while (false)
4973 << " abstract attributes.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Identified and initialized "
<< AllAbstractAttributes.size() << " abstract attributes.\n"
; } } while (false)
;
4974
4975 // Now that all abstract attributes are collected and initialized we start
4976 // the abstract analysis.
4977
4978 unsigned IterationCounter = 1;
4979
4980 SmallVector<AbstractAttribute *, 64> ChangedAAs;
4981 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
4982 Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
4983
4984 bool RecomputeDependences = false;
4985
4986 do {
4987 // Remember the size to determine new attributes.
4988 size_t NumAAs = AllAbstractAttributes.size();
4989 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounterdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n\n[Attributor] #Iteration: "
<< IterationCounter << ", Worklist size: " <<
Worklist.size() << "\n"; } } while (false)
4990 << ", Worklist size: " << Worklist.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n\n[Attributor] #Iteration: "
<< IterationCounter << ", Worklist size: " <<
Worklist.size() << "\n"; } } while (false)
;
4991
4992 // For invalid AAs we can fix dependent AAs that have a required dependence,
4993 // thereby folding long dependence chains in a single step without the need
4994 // to run updates.
4995 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
4996 AbstractAttribute *InvalidAA = InvalidAAs[u];
4997 auto &QuerriedAAs = QueryMap[InvalidAA];
4998 LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] InvalidAA: " <<
*InvalidAA << " has " << QuerriedAAs.RequiredAAs
.size() << "/" << QuerriedAAs.OptionalAAs.size() <<
" required/optional dependences\n"; } } while (false)
4999 << QuerriedAAs.RequiredAAs.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] InvalidAA: " <<
*InvalidAA << " has " << QuerriedAAs.RequiredAAs
.size() << "/" << QuerriedAAs.OptionalAAs.size() <<
" required/optional dependences\n"; } } while (false)
5000 << QuerriedAAs.OptionalAAs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] InvalidAA: " <<
*InvalidAA << " has " << QuerriedAAs.RequiredAAs
.size() << "/" << QuerriedAAs.OptionalAAs.size() <<
" required/optional dependences\n"; } } while (false)
5001 << " required/optional dependences\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] InvalidAA: " <<
*InvalidAA << " has " << QuerriedAAs.RequiredAAs
.size() << "/" << QuerriedAAs.OptionalAAs.size() <<
" required/optional dependences\n"; } } while (false)
;
5002 for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5003 AbstractState &DOIAAState = DepOnInvalidAA->getState();
5004 DOIAAState.indicatePessimisticFixpoint();
5005 ++NumAttributesFixedDueToRequiredDependences;
5006 assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!")((DOIAAState.isAtFixpoint() && "Expected fixpoint state!"
) ? static_cast<void> (0) : __assert_fail ("DOIAAState.isAtFixpoint() && \"Expected fixpoint state!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5006, __PRETTY_FUNCTION__))
;
5007 if (!DOIAAState.isValidState())
5008 InvalidAAs.insert(DepOnInvalidAA);
5009 }
5010 if (!RecomputeDependences)
5011 Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5012 QuerriedAAs.OptionalAAs.end());
5013 }
5014
5015 // If dependences (=QueryMap) are recomputed we have to look at all abstract
5016 // attributes again, regardless of what changed in the last iteration.
5017 if (RecomputeDependences) {
5018 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Run all AAs to recompute dependences\n"
; } } while (false)
5019 dbgs() << "[Attributor] Run all AAs to recompute dependences\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] Run all AAs to recompute dependences\n"
; } } while (false)
;
5020 QueryMap.clear();
5021 ChangedAAs.clear();
5022 Worklist.insert(AllAbstractAttributes.begin(),
5023 AllAbstractAttributes.end());
5024 }
5025
5026 // Add all abstract attributes that are potentially dependent on one that
5027 // changed to the work list.
5028 for (AbstractAttribute *ChangedAA : ChangedAAs) {
5029 auto &QuerriedAAs = QueryMap[ChangedAA];
5030 Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5031 QuerriedAAs.OptionalAAs.end());
5032 Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5033 QuerriedAAs.RequiredAAs.end());
5034 }
5035
5036 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounterdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] #Iteration: "
<< IterationCounter << ", Worklist+Dependent size: "
<< Worklist.size() << "\n"; } } while (false)
5037 << ", Worklist+Dependent size: " << Worklist.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] #Iteration: "
<< IterationCounter << ", Worklist+Dependent size: "
<< Worklist.size() << "\n"; } } while (false)
5038 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "[Attributor] #Iteration: "
<< IterationCounter << ", Worklist+Dependent size: "
<< Worklist.size() << "\n"; } } while (false)
;
5039
5040 // Reset the changed and invalid set.
5041 ChangedAAs.clear();
5042 InvalidAAs.clear();
5043
5044 // Update all abstract attribute in the work list and record the ones that
5045 // changed.
5046 for (AbstractAttribute *AA : Worklist)
5047 if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5048 QueriedNonFixAA = false;
5049 if (AA->update(*this) == ChangeStatus::CHANGED) {
5050 ChangedAAs.push_back(AA);
5051 if (!AA->getState().isValidState())
5052 InvalidAAs.insert(AA);
5053 } else if (!QueriedNonFixAA) {
5054 // If the attribute did not query any non-fix information, the state
5055 // will not change and we can indicate that right away.
5056 AA->getState().indicateOptimisticFixpoint();
5057 }
5058 }
5059
5060 // Check if we recompute the dependences in the next iteration.
5061 RecomputeDependences = (DepRecomputeInterval > 0 &&
5062 IterationCounter % DepRecomputeInterval == 0);
5063
5064 // Add attributes to the changed set if they have been created in the last
5065 // iteration.
5066 ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
5067 AllAbstractAttributes.end());
5068
5069 // Reset the work list and repopulate with the changed abstract attributes.
5070 // Note that dependent ones are added above.
5071 Worklist.clear();
5072 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
5073
5074 } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
5075 VerifyMaxFixpointIterations));
5076
5077 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Fixpoint iteration done after: "
<< IterationCounter << "/" << MaxFixpointIterations
<< " iterations\n"; } } while (false)
5078 << IterationCounter << "/" << MaxFixpointIterationsdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Fixpoint iteration done after: "
<< IterationCounter << "/" << MaxFixpointIterations
<< " iterations\n"; } } while (false)
5079 << " iterations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Fixpoint iteration done after: "
<< IterationCounter << "/" << MaxFixpointIterations
<< " iterations\n"; } } while (false)
;
5080
5081 size_t NumFinalAAs = AllAbstractAttributes.size();
5082
5083 // Reset abstract arguments not settled in a sound fixpoint by now. This
5084 // happens when we stopped the fixpoint iteration early. Note that only the
5085 // ones marked as "changed" *and* the ones transitively depending on them
5086 // need to be reverted to a pessimistic state. Others might not be in a
5087 // fixpoint state but we can use the optimistic results for them anyway.
5088 SmallPtrSet<AbstractAttribute *, 32> Visited;
5089 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
5090 AbstractAttribute *ChangedAA = ChangedAAs[u];
5091 if (!Visited.insert(ChangedAA).second)
5092 continue;
5093
5094 AbstractState &State = ChangedAA->getState();
5095 if (!State.isAtFixpoint()) {
5096 State.indicatePessimisticFixpoint();
5097
5098 NumAttributesTimedOut++;
5099 }
5100
5101 auto &QuerriedAAs = QueryMap[ChangedAA];
5102 ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
5103 QuerriedAAs.OptionalAAs.end());
5104 ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
5105 QuerriedAAs.RequiredAAs.end());
5106 }
5107
5108 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Visited.empty()) dbgs() << "\n[Attributor] Finalized "
<< Visited.size() << " abstract attributes.\n"; }
; } } while (false)
5109 if (!Visited.empty())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Visited.empty()) dbgs() << "\n[Attributor] Finalized "
<< Visited.size() << " abstract attributes.\n"; }
; } } while (false)
5110 dbgs() << "\n[Attributor] Finalized " << Visited.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Visited.empty()) dbgs() << "\n[Attributor] Finalized "
<< Visited.size() << " abstract attributes.\n"; }
; } } while (false)
5111 << " abstract attributes.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Visited.empty()) dbgs() << "\n[Attributor] Finalized "
<< Visited.size() << " abstract attributes.\n"; }
; } } while (false)
5112 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { { if (!Visited.empty()) dbgs() << "\n[Attributor] Finalized "
<< Visited.size() << " abstract attributes.\n"; }
; } } while (false)
;
5113
5114 unsigned NumManifested = 0;
5115 unsigned NumAtFixpoint = 0;
5116 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
5117 for (AbstractAttribute *AA : AllAbstractAttributes) {
5118 AbstractState &State = AA->getState();
5119
5120 // If there is not already a fixpoint reached, we can now take the
5121 // optimistic state. This is correct because we enforced a pessimistic one
5122 // on abstract attributes that were transitively dependent on a changed one
5123 // already above.
5124 if (!State.isAtFixpoint())
5125 State.indicateOptimisticFixpoint();
5126
5127 // If the state is invalid, we do not try to manifest it.
5128 if (!State.isValidState())
5129 continue;
5130
5131 // Skip dead code.
5132 if (isAssumedDead(*AA, nullptr))
5133 continue;
5134 // Manifest the state and record if we changed the IR.
5135 ChangeStatus LocalChange = AA->manifest(*this);
5136 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
5137 AA->trackStatistics();
5138
5139 ManifestChange = ManifestChange | LocalChange;
5140
5141 NumAtFixpoint++;
5142 NumManifested += (LocalChange == ChangeStatus::CHANGED);
5143 }
5144
5145 (void)NumManifested;
5146 (void)NumAtFixpoint;
5147 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifesteddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Manifested "
<< NumManifested << " arguments while " <<
NumAtFixpoint << " were in a valid fixpoint state\n"; }
} while (false)
5148 << " arguments while " << NumAtFixpointdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Manifested "
<< NumManifested << " arguments while " <<
NumAtFixpoint << " were in a valid fixpoint state\n"; }
} while (false)
5149 << " were in a valid fixpoint state\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Manifested "
<< NumManifested << " arguments while " <<
NumAtFixpoint << " were in a valid fixpoint state\n"; }
} while (false)
;
5150
5151 NumAttributesManifested += NumManifested;
5152 NumAttributesValidFixpoint += NumAtFixpoint;
5153
5154 (void)NumFinalAAs;
5155 assert(((NumFinalAAs == AllAbstractAttributes.size() && "Expected the final number of abstract attributes to remain unchanged!"
) ? static_cast<void> (0) : __assert_fail ("NumFinalAAs == AllAbstractAttributes.size() && \"Expected the final number of abstract attributes to remain unchanged!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5157, __PRETTY_FUNCTION__))
5156 NumFinalAAs == AllAbstractAttributes.size() &&((NumFinalAAs == AllAbstractAttributes.size() && "Expected the final number of abstract attributes to remain unchanged!"
) ? static_cast<void> (0) : __assert_fail ("NumFinalAAs == AllAbstractAttributes.size() && \"Expected the final number of abstract attributes to remain unchanged!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5157, __PRETTY_FUNCTION__))
5157 "Expected the final number of abstract attributes to remain unchanged!")((NumFinalAAs == AllAbstractAttributes.size() && "Expected the final number of abstract attributes to remain unchanged!"
) ? static_cast<void> (0) : __assert_fail ("NumFinalAAs == AllAbstractAttributes.size() && \"Expected the final number of abstract attributes to remain unchanged!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5157, __PRETTY_FUNCTION__))
;
5158
5159 // Delete stuff at the end to avoid invalid references and a nice order.
5160 {
5161 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Delete at least "
<< ToBeDeletedFunctions.size() << " functions and "
<< ToBeDeletedBlocks.size() << " blocks and " <<
ToBeDeletedInsts.size() << " instructions and " <<
ToBeChangedUses.size() << " uses\n"; } } while (false)
5162 << ToBeDeletedFunctions.size() << " functions and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Delete at least "
<< ToBeDeletedFunctions.size() << " functions and "
<< ToBeDeletedBlocks.size() << " blocks and " <<
ToBeDeletedInsts.size() << " instructions and " <<
ToBeChangedUses.size() << " uses\n"; } } while (false)
5163 << ToBeDeletedBlocks.size() << " blocks and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Delete at least "
<< ToBeDeletedFunctions.size() << " functions and "
<< ToBeDeletedBlocks.size() << " blocks and " <<
ToBeDeletedInsts.size() << " instructions and " <<
ToBeChangedUses.size() << " uses\n"; } } while (false)
5164 << ToBeDeletedInsts.size() << " instructions and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Delete at least "
<< ToBeDeletedFunctions.size() << " functions and "
<< ToBeDeletedBlocks.size() << " blocks and " <<
ToBeDeletedInsts.size() << " instructions and " <<
ToBeChangedUses.size() << " uses\n"; } } while (false)
5165 << ToBeChangedUses.size() << " uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "\n[Attributor] Delete at least "
<< ToBeDeletedFunctions.size() << " functions and "
<< ToBeDeletedBlocks.size() << " blocks and " <<
ToBeDeletedInsts.size() << " instructions and " <<
ToBeChangedUses.size() << " uses\n"; } } while (false)
;
5166
5167 SmallVector<Instruction *, 32> DeadInsts;
5168 SmallVector<Instruction *, 32> TerminatorsToFold;
5169 SmallVector<Instruction *, 32> UnreachablesToInsert;
5170
5171 for (auto &It : ToBeChangedUses) {
5172 Use *U = It.first;
5173 Value *NewV = It.second;
5174 Value *OldV = U->get();
5175 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Use " << *NewV <<
" in " << *U->getUser() << " instead of " <<
*OldV << "\n"; } } while (false)
5176 << " instead of " << *OldV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("attributor")) { dbgs() << "Use " << *NewV <<
" in " << *U->getUser() << " instead of " <<
*OldV << "\n"; } } while (false)
;
5177 U->set(NewV);
5178 if (Instruction *I = dyn_cast<Instruction>(OldV))
5179 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
5180 isInstructionTriviallyDead(I)) {
5181 DeadInsts.push_back(I);
5182 }
5183 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
5184 Instruction *UserI = cast<Instruction>(U->getUser());
5185 if (isa<UndefValue>(NewV)) {
5186 UnreachablesToInsert.push_back(UserI);
5187 } else {
5188 TerminatorsToFold.push_back(UserI);
5189 }
5190 }
5191 }
5192 for (Instruction *I : UnreachablesToInsert)
5193 changeToUnreachable(I, /* UseLLVMTrap */ false);
5194 for (Instruction *I : TerminatorsToFold)
5195 ConstantFoldTerminator(I->getParent());
5196
5197 for (Instruction *I : ToBeDeletedInsts) {
5198 I->replaceAllUsesWith(UndefValue::get(I->getType()));
5199 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
5200 DeadInsts.push_back(I);
5201 else
5202 I->eraseFromParent();
5203 }
5204
5205 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
5206
5207 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
5208 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
5209 ToBeDeletedBBs.reserve(NumDeadBlocks);
5210 ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
5211 // Actually we do not delete the blocks but squash them into a single
5212 // unreachable but untangling branches that jump here is something we need
5213 // to do in a more generic way.
5214 DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
5215 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor"
, "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted."
};;
;
5216 BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead += ToBeDeletedBlocks.size();
5217 }
5218
5219 STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.")static llvm::Statistic NumIRFunction_AAIsDead = {"attributor"
, "NumIRFunction_AAIsDead", "Number of dead functions deleted."
};;
;
5220 for (Function *Fn : ToBeDeletedFunctions) {
5221 Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
5222 Fn->eraseFromParent();
5223 STATS_TRACK(AAIsDead, Function)++(NumIRFunction_AAIsDead);;
5224 }
5225
5226 // Identify dead internal functions and delete them. This happens outside
5227 // the other fixpoint analysis as we might treat potentially dead functions
5228 // as live to lower the number of iterations. If they happen to be dead, the
5229 // below fixpoint loop will identify and eliminate them.
5230 SmallVector<Function *, 8> InternalFns;
5231 for (Function &F : M)
5232 if (F.hasLocalLinkage())
5233 InternalFns.push_back(&F);
5234
5235 bool FoundDeadFn = true;
5236 while (FoundDeadFn) {
5237 FoundDeadFn = false;
5238 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
5239 Function *F = InternalFns[u];
5240 if (!F)
5241 continue;
5242
5243 if (!checkForAllCallSites([](AbstractCallSite ACS) { return false; },
5244 *F, true, nullptr))
5245 continue;
5246
5247 STATS_TRACK(AAIsDead, Function)++(NumIRFunction_AAIsDead);;
5248 ToBeDeletedFunctions.insert(F);
5249 F->deleteBody();
5250 F->replaceAllUsesWith(UndefValue::get(F->getType()));
5251 F->eraseFromParent();
5252 InternalFns[u] = nullptr;
5253 FoundDeadFn = true;
5254 }
5255 }
5256 }
5257
5258 if (VerifyMaxFixpointIterations &&
5259 IterationCounter != MaxFixpointIterations) {
5260 errs() << "\n[Attributor] Fixpoint iteration done after: "
5261 << IterationCounter << "/" << MaxFixpointIterations
5262 << " iterations\n";
5263 llvm_unreachable("The fixpoint was not reached with exactly the number of "::llvm::llvm_unreachable_internal("The fixpoint was not reached with exactly the number of "
"specified iterations!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5264)
5264 "specified iterations!")::llvm::llvm_unreachable_internal("The fixpoint was not reached with exactly the number of "
"specified iterations!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5264)
;
5265 }
5266
5267 return ManifestChange;
5268}
5269
5270void Attributor::initializeInformationCache(Function &F) {
5271
5272 // Walk all instructions to find interesting instructions that might be
5273 // queried by abstract attributes during their initialization or update.
5274 // This has to happen before we create attributes.
5275 auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
5276 auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
5277
5278 for (Instruction &I : instructions(&F)) {
5279 bool IsInterestingOpcode = false;
5280
5281 // To allow easy access to all instructions in a function with a given
5282 // opcode we store them in the InfoCache. As not all opcodes are interesting
5283 // to concrete attributes we only cache the ones that are as identified in
5284 // the following switch.
5285 // Note: There are no concrete attributes now so this is initially empty.
5286 switch (I.getOpcode()) {
5287 default:
5288 assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&(((!ImmutableCallSite(&I)) && (!isa<CallBase>
(&I)) && "New call site/base instruction type needs to be known int the "
"Attributor.") ? static_cast<void> (0) : __assert_fail
("(!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) && \"New call site/base instruction type needs to be known int the \" \"Attributor.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5290, __PRETTY_FUNCTION__))
5289 "New call site/base instruction type needs to be known int the "(((!ImmutableCallSite(&I)) && (!isa<CallBase>
(&I)) && "New call site/base instruction type needs to be known int the "
"Attributor.") ? static_cast<void> (0) : __assert_fail
("(!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) && \"New call site/base instruction type needs to be known int the \" \"Attributor.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5290, __PRETTY_FUNCTION__))
5290 "Attributor.")(((!ImmutableCallSite(&I)) && (!isa<CallBase>
(&I)) && "New call site/base instruction type needs to be known int the "
"Attributor.") ? static_cast<void> (0) : __assert_fail
("(!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) && \"New call site/base instruction type needs to be known int the \" \"Attributor.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/IPO/Attributor.cpp"
, 5290, __PRETTY_FUNCTION__))
;
5291 break;
5292 case Instruction::Load:
5293 // The alignment of a pointer is interesting for loads.
5294 case Instruction::Store:
5295 // The alignment of a pointer is interesting for stores.
5296 case Instruction::Call:
5297 case Instruction::CallBr:
5298 case Instruction::Invoke:
5299 case Instruction::CleanupRet:
5300 case Instruction::CatchSwitch:
5301 case Instruction::Resume:
5302 case Instruction::Ret:
5303 IsInterestingOpcode = true;
5304 }
5305 if (IsInterestingOpcode)
5306 InstOpcodeMap[I.getOpcode()].push_back(&I);
5307 if (I.mayReadOrWriteMemory())
5308 ReadOrWriteInsts.push_back(&I);
5309 }
5310}
5311
5312void Attributor::recordDependence(const AbstractAttribute &FromAA,
5313 const AbstractAttribute &ToAA,
5314 DepClassTy DepClass) {
5315 if (FromAA.getState().isAtFixpoint())
5316 return;
5317
5318 if (DepClass == DepClassTy::REQUIRED)
5319 QueryMap[&FromAA].RequiredAAs.insert(
5320 const_cast<AbstractAttribute *>(&ToAA));
5321 else
5322 QueryMap[&FromAA].OptionalAAs.insert(
5323 const_cast<AbstractAttribute *>(&ToAA));
5324 QueriedNonFixAA = true;
5325}
5326
5327void Attributor::identifyDefaultAbstractAttributes(Function &F) {
5328 if (!VisitedFunctions.insert(&F).second)
5329 return;
5330 if (F.isDeclaration())
5331 return;
5332
5333 IRPosition FPos = IRPosition::function(F);
5334
5335 // Check for dead BasicBlocks in every function.
5336 // We need dead instruction detection because we do not want to deal with
5337 // broken IR in which SSA rules do not apply.
5338 getOrCreateAAFor<AAIsDead>(FPos);
5339
5340 // Every function might be "will-return".
5341 getOrCreateAAFor<AAWillReturn>(FPos);
5342
5343 // Every function can be nounwind.
5344 getOrCreateAAFor<AANoUnwind>(FPos);
5345
5346 // Every function might be marked "nosync"
5347 getOrCreateAAFor<AANoSync>(FPos);
5348
5349 // Every function might be "no-free".
5350 getOrCreateAAFor<AANoFree>(FPos);
5351
5352 // Every function might be "no-return".
5353 getOrCreateAAFor<AANoReturn>(FPos);
5354
5355 // Every function might be "no-recurse".
5356 getOrCreateAAFor<AANoRecurse>(FPos);
5357
5358 // Every function might be "readnone/readonly/writeonly/...".
5359 getOrCreateAAFor<AAMemoryBehavior>(FPos);
5360
5361 // Every function might be applicable for Heap-To-Stack conversion.
5362 if (EnableHeapToStack)
5363 getOrCreateAAFor<AAHeapToStack>(FPos);
5364
5365 // Return attributes are only appropriate if the return type is non void.
5366 Type *ReturnType = F.getReturnType();
5367 if (!ReturnType->isVoidTy()) {
5368 // Argument attribute "returned" --- Create only one per function even
5369 // though it is an argument attribute.
5370 getOrCreateAAFor<AAReturnedValues>(FPos);
5371
5372 IRPosition RetPos = IRPosition::returned(F);
5373
5374 // Every returned value might be dead.
5375 getOrCreateAAFor<AAIsDead>(RetPos);
5376
5377 // Every function might be simplified.
5378 getOrCreateAAFor<AAValueSimplify>(RetPos);
5379
5380 if (ReturnType->isPointerTy()) {
5381
5382 // Every function with pointer return type might be marked align.
5383 getOrCreateAAFor<AAAlign>(RetPos);
5384
5385 // Every function with pointer return type might be marked nonnull.
5386 getOrCreateAAFor<AANonNull>(RetPos);
5387
5388 // Every function with pointer return type might be marked noalias.
5389 getOrCreateAAFor<AANoAlias>(RetPos);
5390
5391 // Every function with pointer return type might be marked
5392 // dereferenceable.
5393 getOrCreateAAFor<AADereferenceable>(RetPos);
5394 }
5395 }
5396
5397 for (Argument &Arg : F.args()) {
5398 IRPosition ArgPos = IRPosition::argument(Arg);
5399
5400 // Every argument might be simplified.
5401 getOrCreateAAFor<AAValueSimplify>(ArgPos);
5402
5403 if (Arg.getType()->isPointerTy()) {
5404 // Every argument with pointer type might be marked nonnull.
5405 getOrCreateAAFor<AANonNull>(ArgPos);
5406
5407 // Every argument with pointer type might be marked noalias.
5408 getOrCreateAAFor<AANoAlias>(ArgPos);
5409
5410 // Every argument with pointer type might be marked dereferenceable.
5411 getOrCreateAAFor<AADereferenceable>(ArgPos);
5412
5413 // Every argument with pointer type might be marked align.
5414 getOrCreateAAFor<AAAlign>(ArgPos);
5415
5416 // Every argument with pointer type might be marked nocapture.
5417 getOrCreateAAFor<AANoCapture>(ArgPos);
5418
5419 // Every argument with pointer type might be marked
5420 // "readnone/readonly/writeonly/..."
5421 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
5422
5423 // Every argument with pointer type might be marked nofree.</