File: | llvm/lib/Transforms/IPO/AttributorAttributes.cpp |
Warning: | line 4989, column 9 Value stored to 'HasChanged' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // See the Attributor.h file comment and the class descriptions in that file for |
10 | // more information. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/Transforms/IPO/Attributor.h" |
15 | |
16 | #include "llvm/ADT/SCCIterator.h" |
17 | #include "llvm/ADT/SmallPtrSet.h" |
18 | #include "llvm/ADT/Statistic.h" |
19 | #include "llvm/Analysis/AliasAnalysis.h" |
20 | #include "llvm/Analysis/AssumeBundleQueries.h" |
21 | #include "llvm/Analysis/AssumptionCache.h" |
22 | #include "llvm/Analysis/CaptureTracking.h" |
23 | #include "llvm/Analysis/LazyValueInfo.h" |
24 | #include "llvm/Analysis/MemoryBuiltins.h" |
25 | #include "llvm/Analysis/ScalarEvolution.h" |
26 | #include "llvm/Analysis/TargetTransformInfo.h" |
27 | #include "llvm/Analysis/ValueTracking.h" |
28 | #include "llvm/IR/IRBuilder.h" |
29 | #include "llvm/IR/Instruction.h" |
30 | #include "llvm/IR/IntrinsicInst.h" |
31 | #include "llvm/IR/NoFolder.h" |
32 | #include "llvm/Support/CommandLine.h" |
33 | #include "llvm/Transforms/IPO/ArgumentPromotion.h" |
34 | #include "llvm/Transforms/Utils/Local.h" |
35 | |
36 | #include <cassert> |
37 | |
38 | using namespace llvm; |
39 | |
40 | #define DEBUG_TYPE"attributor" "attributor" |
41 | |
42 | static cl::opt<bool> ManifestInternal( |
43 | "attributor-manifest-internal", cl::Hidden, |
44 | cl::desc("Manifest Attributor internal string attributes."), |
45 | cl::init(false)); |
46 | |
47 | static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), |
48 | cl::Hidden); |
49 | |
50 | template <> |
51 | unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; |
52 | |
53 | static cl::opt<unsigned, true> MaxPotentialValues( |
54 | "attributor-max-potential-values", cl::Hidden, |
55 | cl::desc("Maximum number of potential values to be " |
56 | "tracked for each position."), |
57 | cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), |
58 | cl::init(7)); |
59 | |
60 | STATISTIC(NumAAs, "Number of abstract attributes created")static llvm::Statistic NumAAs = {"attributor", "NumAAs", "Number of abstract attributes created" }; |
61 | |
62 | // Some helper macros to deal with statistics tracking. |
63 | // |
64 | // Usage: |
65 | // For simple IR attribute tracking overload trackStatistics in the abstract |
66 | // attribute and choose the right STATS_DECLTRACK_********* macro, |
67 | // e.g.,: |
68 | // void trackStatistics() const override { |
69 | // STATS_DECLTRACK_ARG_ATTR(returned) |
70 | // } |
71 | // If there is a single "increment" side one can use the macro |
72 | // STATS_DECLTRACK with a custom message. If there are multiple increment |
73 | // sides, STATS_DECL and STATS_TRACK can also be used separately. |
74 | // |
75 | #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \ |
76 | ("Number of " #TYPE " marked '" #NAME "'") |
77 | #define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME |
78 | #define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; |
79 | #define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; \ |
80 | STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; |
81 | #define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME); |
82 | #define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; ++(NumIRTYPE_NAME); } \ |
83 | { \ |
84 | STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; \ |
85 | STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \ |
86 | } |
87 | #define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor", "NumIRArguments_NAME", ("Number of " "arguments" " marked '" "NAME" "'")};; ++(NumIRArguments_NAME); } \ |
88 | STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor", "NumIRArguments_NAME", ("Number of " "arguments" " marked '" "NAME" "'")};; ++(NumIRArguments_NAME); } |
89 | #define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } \ |
90 | STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } |
91 | BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } |
92 | #define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME" , ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME ); } \ |
93 | STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME" , ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME ); } |
94 | #define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME" , ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME ); } \ |
95 | STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME" , ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME ); } |
96 | #define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } \ |
97 | STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } |
98 | BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } |
99 | #define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } \ |
100 | STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } |
101 | BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } |
102 | #define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" "NAME" "'")};; ++ (NumIRFloating_NAME); } \ |
103 | STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" #NAME "'")};; ++ (NumIRFloating_NAME); } |
104 | ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" #NAME "'")};; ++ (NumIRFloating_NAME); } |
105 | |
106 | // Specialization of the operator<< for abstract attributes subclasses. This |
107 | // disambiguates situations where multiple operators are applicable. |
108 | namespace llvm { |
109 | #define PIPE_OPERATOR(CLASS) \ |
110 | raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ |
111 | return OS << static_cast<const AbstractAttribute &>(AA); \ |
112 | } |
113 | |
114 | PIPE_OPERATOR(AAIsDead) |
115 | PIPE_OPERATOR(AANoUnwind) |
116 | PIPE_OPERATOR(AANoSync) |
117 | PIPE_OPERATOR(AANoRecurse) |
118 | PIPE_OPERATOR(AAWillReturn) |
119 | PIPE_OPERATOR(AANoReturn) |
120 | PIPE_OPERATOR(AAReturnedValues) |
121 | PIPE_OPERATOR(AANonNull) |
122 | PIPE_OPERATOR(AANoAlias) |
123 | PIPE_OPERATOR(AADereferenceable) |
124 | PIPE_OPERATOR(AAAlign) |
125 | PIPE_OPERATOR(AANoCapture) |
126 | PIPE_OPERATOR(AAValueSimplify) |
127 | PIPE_OPERATOR(AANoFree) |
128 | PIPE_OPERATOR(AAHeapToStack) |
129 | PIPE_OPERATOR(AAReachability) |
130 | PIPE_OPERATOR(AAMemoryBehavior) |
131 | PIPE_OPERATOR(AAMemoryLocation) |
132 | PIPE_OPERATOR(AAValueConstantRange) |
133 | PIPE_OPERATOR(AAPrivatizablePtr) |
134 | PIPE_OPERATOR(AAUndefinedBehavior) |
135 | PIPE_OPERATOR(AAPotentialValues) |
136 | PIPE_OPERATOR(AANoUndef) |
137 | |
138 | #undef PIPE_OPERATOR |
139 | } // namespace llvm |
140 | |
141 | namespace { |
142 | |
143 | static Optional<ConstantInt *> |
144 | getAssumedConstantInt(Attributor &A, const Value &V, |
145 | const AbstractAttribute &AA, |
146 | bool &UsedAssumedInformation) { |
147 | Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); |
148 | if (C.hasValue()) |
149 | return dyn_cast_or_null<ConstantInt>(C.getValue()); |
150 | return llvm::None; |
151 | } |
152 | |
153 | /// Get pointer operand of memory accessing instruction. If \p I is |
154 | /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, |
155 | /// is set to false and the instruction is volatile, return nullptr. |
156 | static const Value *getPointerOperand(const Instruction *I, |
157 | bool AllowVolatile) { |
158 | if (auto *LI = dyn_cast<LoadInst>(I)) { |
159 | if (!AllowVolatile && LI->isVolatile()) |
160 | return nullptr; |
161 | return LI->getPointerOperand(); |
162 | } |
163 | |
164 | if (auto *SI = dyn_cast<StoreInst>(I)) { |
165 | if (!AllowVolatile && SI->isVolatile()) |
166 | return nullptr; |
167 | return SI->getPointerOperand(); |
168 | } |
169 | |
170 | if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { |
171 | if (!AllowVolatile && CXI->isVolatile()) |
172 | return nullptr; |
173 | return CXI->getPointerOperand(); |
174 | } |
175 | |
176 | if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { |
177 | if (!AllowVolatile && RMWI->isVolatile()) |
178 | return nullptr; |
179 | return RMWI->getPointerOperand(); |
180 | } |
181 | |
182 | return nullptr; |
183 | } |
184 | |
185 | /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and |
186 | /// advanced by \p Offset bytes. To aid later analysis the method tries to build |
187 | /// getelement pointer instructions that traverse the natural type of \p Ptr if |
188 | /// possible. If that fails, the remaining offset is adjusted byte-wise, hence |
189 | /// through a cast to i8*. |
190 | /// |
191 | /// TODO: This could probably live somewhere more prominantly if it doesn't |
192 | /// already exist. |
193 | static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, |
194 | IRBuilder<NoFolder> &IRB, const DataLayout &DL) { |
195 | assert(Offset >= 0 && "Negative offset not supported yet!")((Offset >= 0 && "Negative offset not supported yet!" ) ? static_cast<void> (0) : __assert_fail ("Offset >= 0 && \"Negative offset not supported yet!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 195, __PRETTY_FUNCTION__)); |
196 | LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Construct pointer: " << *Ptr << " + " << Offset << "-bytes as " << *ResTy << "\n"; } } while (false) |
197 | << "-bytes as " << *ResTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Construct pointer: " << *Ptr << " + " << Offset << "-bytes as " << *ResTy << "\n"; } } while (false); |
198 | |
199 | // The initial type we are trying to traverse to get nice GEPs. |
200 | Type *Ty = Ptr->getType(); |
201 | |
202 | SmallVector<Value *, 4> Indices; |
203 | std::string GEPName = Ptr->getName().str(); |
204 | while (Offset) { |
205 | uint64_t Idx, Rem; |
206 | |
207 | if (auto *STy = dyn_cast<StructType>(Ty)) { |
208 | const StructLayout *SL = DL.getStructLayout(STy); |
209 | if (int64_t(SL->getSizeInBytes()) < Offset) |
210 | break; |
211 | Idx = SL->getElementContainingOffset(Offset); |
212 | assert(Idx < STy->getNumElements() && "Offset calculation error!")((Idx < STy->getNumElements() && "Offset calculation error!" ) ? static_cast<void> (0) : __assert_fail ("Idx < STy->getNumElements() && \"Offset calculation error!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 212, __PRETTY_FUNCTION__)); |
213 | Rem = Offset - SL->getElementOffset(Idx); |
214 | Ty = STy->getElementType(Idx); |
215 | } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { |
216 | Ty = PTy->getElementType(); |
217 | if (!Ty->isSized()) |
218 | break; |
219 | uint64_t ElementSize = DL.getTypeAllocSize(Ty); |
220 | assert(ElementSize && "Expected type with size!")((ElementSize && "Expected type with size!") ? static_cast <void> (0) : __assert_fail ("ElementSize && \"Expected type with size!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 220, __PRETTY_FUNCTION__)); |
221 | Idx = Offset / ElementSize; |
222 | Rem = Offset % ElementSize; |
223 | } else { |
224 | // Non-aggregate type, we cast and make byte-wise progress now. |
225 | break; |
226 | } |
227 | |
228 | LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { errs() << "Ty: " << *Ty << " Offset: " << Offset << " Idx: " << Idx << " Rem: " << Rem << "\n"; } } while (false) |
229 | << " Idx: " << Idx << " Rem: " << Rem << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { errs() << "Ty: " << *Ty << " Offset: " << Offset << " Idx: " << Idx << " Rem: " << Rem << "\n"; } } while (false); |
230 | |
231 | GEPName += "." + std::to_string(Idx); |
232 | Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); |
233 | Offset = Rem; |
234 | } |
235 | |
236 | // Create a GEP if we collected indices above. |
237 | if (Indices.size()) |
238 | Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); |
239 | |
240 | // If an offset is left we use byte-wise adjustment. |
241 | if (Offset) { |
242 | Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); |
243 | Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), |
244 | GEPName + ".b" + Twine(Offset)); |
245 | } |
246 | |
247 | // Ensure the result has the requested type. |
248 | Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); |
249 | |
250 | LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Constructed pointer: " << *Ptr << "\n"; } } while (false); |
251 | return Ptr; |
252 | } |
253 | |
254 | /// Recursively visit all values that might become \p IRP at some point. This |
255 | /// will be done by looking through cast instructions, selects, phis, and calls |
256 | /// with the "returned" attribute. Once we cannot look through the value any |
257 | /// further, the callback \p VisitValueCB is invoked and passed the current |
258 | /// value, the \p State, and a flag to indicate if we stripped anything. |
259 | /// Stripped means that we unpacked the value associated with \p IRP at least |
260 | /// once. Note that the value used for the callback may still be the value |
261 | /// associated with \p IRP (due to PHIs). To limit how much effort is invested, |
262 | /// we will never visit more values than specified by \p MaxValues. |
263 | template <typename AAType, typename StateTy> |
264 | static bool genericValueTraversal( |
265 | Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, |
266 | function_ref<bool(Value &, const Instruction *, StateTy &, bool)> |
267 | VisitValueCB, |
268 | const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, |
269 | function_ref<Value *(Value *)> StripCB = nullptr) { |
270 | |
271 | const AAIsDead *LivenessAA = nullptr; |
272 | if (IRP.getAnchorScope()) |
273 | LivenessAA = &A.getAAFor<AAIsDead>( |
274 | QueryingAA, IRPosition::function(*IRP.getAnchorScope()), |
275 | /* TrackDependence */ false); |
276 | bool AnyDead = false; |
277 | |
278 | using Item = std::pair<Value *, const Instruction *>; |
279 | SmallSet<Item, 16> Visited; |
280 | SmallVector<Item, 16> Worklist; |
281 | Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); |
282 | |
283 | int Iteration = 0; |
284 | do { |
285 | Item I = Worklist.pop_back_val(); |
286 | Value *V = I.first; |
287 | CtxI = I.second; |
288 | if (StripCB) |
289 | V = StripCB(V); |
290 | |
291 | // Check if we should process the current value. To prevent endless |
292 | // recursion keep a record of the values we followed! |
293 | if (!Visited.insert(I).second) |
294 | continue; |
295 | |
296 | // Make sure we limit the compile time for complex expressions. |
297 | if (Iteration++ >= MaxValues) |
298 | return false; |
299 | |
300 | // Explicitly look through calls with a "returned" attribute if we do |
301 | // not have a pointer as stripPointerCasts only works on them. |
302 | Value *NewV = nullptr; |
303 | if (V->getType()->isPointerTy()) { |
304 | NewV = V->stripPointerCasts(); |
305 | } else { |
306 | auto *CB = dyn_cast<CallBase>(V); |
307 | if (CB && CB->getCalledFunction()) { |
308 | for (Argument &Arg : CB->getCalledFunction()->args()) |
309 | if (Arg.hasReturnedAttr()) { |
310 | NewV = CB->getArgOperand(Arg.getArgNo()); |
311 | break; |
312 | } |
313 | } |
314 | } |
315 | if (NewV && NewV != V) { |
316 | Worklist.push_back({NewV, CtxI}); |
317 | continue; |
318 | } |
319 | |
320 | // Look through select instructions, visit both potential values. |
321 | if (auto *SI = dyn_cast<SelectInst>(V)) { |
322 | Worklist.push_back({SI->getTrueValue(), CtxI}); |
323 | Worklist.push_back({SI->getFalseValue(), CtxI}); |
324 | continue; |
325 | } |
326 | |
327 | // Look through phi nodes, visit all live operands. |
328 | if (auto *PHI = dyn_cast<PHINode>(V)) { |
329 | assert(LivenessAA &&((LivenessAA && "Expected liveness in the presence of instructions!" ) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 330, __PRETTY_FUNCTION__)) |
330 | "Expected liveness in the presence of instructions!")((LivenessAA && "Expected liveness in the presence of instructions!" ) ? static_cast<void> (0) : __assert_fail ("LivenessAA && \"Expected liveness in the presence of instructions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 330, __PRETTY_FUNCTION__)); |
331 | for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { |
332 | BasicBlock *IncomingBB = PHI->getIncomingBlock(u); |
333 | if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, |
334 | LivenessAA, |
335 | /* CheckBBLivenessOnly */ true)) { |
336 | AnyDead = true; |
337 | continue; |
338 | } |
339 | Worklist.push_back( |
340 | {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); |
341 | } |
342 | continue; |
343 | } |
344 | |
345 | if (UseValueSimplify && !isa<Constant>(V)) { |
346 | bool UsedAssumedInformation = false; |
347 | Optional<Constant *> C = |
348 | A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); |
349 | if (!C.hasValue()) |
350 | continue; |
351 | if (Value *NewV = C.getValue()) { |
352 | Worklist.push_back({NewV, CtxI}); |
353 | continue; |
354 | } |
355 | } |
356 | |
357 | // Once a leaf is reached we inform the user through the callback. |
358 | if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) |
359 | return false; |
360 | } while (!Worklist.empty()); |
361 | |
362 | // If we actually used liveness information so we have to record a dependence. |
363 | if (AnyDead) |
364 | A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); |
365 | |
366 | // All values have been visited. |
367 | return true; |
368 | } |
369 | |
370 | const Value *stripAndAccumulateMinimalOffsets( |
371 | Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, |
372 | const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, |
373 | bool UseAssumed = false) { |
374 | |
375 | auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { |
376 | const IRPosition &Pos = IRPosition::value(V); |
377 | // Only track dependence if we are going to use the assumed info. |
378 | const AAValueConstantRange &ValueConstantRangeAA = |
379 | A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, |
380 | /* TrackDependence */ UseAssumed); |
381 | ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() |
382 | : ValueConstantRangeAA.getKnown(); |
383 | // We can only use the lower part of the range because the upper part can |
384 | // be higher than what the value can really be. |
385 | ROffset = Range.getSignedMin(); |
386 | return true; |
387 | }; |
388 | |
389 | return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, |
390 | AttributorAnalysis); |
391 | } |
392 | |
393 | static const Value *getMinimalBaseOfAccsesPointerOperand( |
394 | Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, |
395 | int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { |
396 | const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); |
397 | if (!Ptr) |
398 | return nullptr; |
399 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); |
400 | const Value *Base = stripAndAccumulateMinimalOffsets( |
401 | A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); |
402 | |
403 | BytesOffset = OffsetAPInt.getSExtValue(); |
404 | return Base; |
405 | } |
406 | |
407 | static const Value * |
408 | getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, |
409 | const DataLayout &DL, |
410 | bool AllowNonInbounds = false) { |
411 | const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); |
412 | if (!Ptr) |
413 | return nullptr; |
414 | |
415 | return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, |
416 | AllowNonInbounds); |
417 | } |
418 | |
419 | /// Helper function to clamp a state \p S of type \p StateType with the |
420 | /// information in \p R and indicate/return if \p S did change (as-in update is |
421 | /// required to be run again). |
422 | template <typename StateType> |
423 | ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { |
424 | auto Assumed = S.getAssumed(); |
425 | S ^= R; |
426 | return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED |
427 | : ChangeStatus::CHANGED; |
428 | } |
429 | |
430 | /// Clamp the information known for all returned values of a function |
431 | /// (identified by \p QueryingAA) into \p S. |
432 | template <typename AAType, typename StateType = typename AAType::StateType> |
433 | static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, |
434 | StateType &S) { |
435 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp return value states for " << QueryingAA << " into " << S << "\n" ; } } while (false) |
436 | << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp return value states for " << QueryingAA << " into " << S << "\n" ; } } while (false); |
437 | |
438 | assert((QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)) |
439 | IRPosition::IRP_RETURNED ||(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)) |
440 | QueryingAA.getIRPosition().getPositionKind() ==(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)) |
441 | IRPosition::IRP_CALL_SITE_RETURNED) &&(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)) |
442 | "Can only clamp returned value states for a function returned or call "(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)) |
443 | "site returned position!")(((QueryingAA.getIRPosition().getPositionKind() == IRPosition ::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind( ) == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? static_cast<void> (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 443, __PRETTY_FUNCTION__)); |
444 | |
445 | // Use an optional state as there might not be any return values and we want |
446 | // to join (IntegerState::operator&) the state of all there are. |
447 | Optional<StateType> T; |
448 | |
449 | // Callback for each possibly returned value. |
450 | auto CheckReturnValue = [&](Value &RV) -> bool { |
451 | const IRPosition &RVPos = IRPosition::value(RV); |
452 | const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); |
453 | LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() << " @ " << RVPos << "\n"; } } while (false) |
454 | << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() << " @ " << RVPos << "\n"; } } while (false); |
455 | const StateType &AAS = AA.getState(); |
456 | if (T.hasValue()) |
457 | *T &= AAS; |
458 | else |
459 | T = AAS; |
460 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T << "\n"; } } while (false) |
461 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T << "\n"; } } while (false); |
462 | return T->isValidState(); |
463 | }; |
464 | |
465 | if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) |
466 | S.indicatePessimisticFixpoint(); |
467 | else if (T.hasValue()) |
468 | S ^= *T; |
469 | } |
470 | |
471 | /// Helper class for generic deduction: return value -> returned position. |
472 | template <typename AAType, typename BaseType, |
473 | typename StateType = typename BaseType::StateType> |
474 | struct AAReturnedFromReturnedValues : public BaseType { |
475 | AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) |
476 | : BaseType(IRP, A) {} |
477 | |
478 | /// See AbstractAttribute::updateImpl(...). |
479 | ChangeStatus updateImpl(Attributor &A) override { |
480 | StateType S(StateType::getBestState(this->getState())); |
481 | clampReturnedValueStates<AAType, StateType>(A, *this, S); |
482 | // TODO: If we know we visited all returned values, thus no are assumed |
483 | // dead, we can take the known information from the state T. |
484 | return clampStateAndIndicateChange<StateType>(this->getState(), S); |
485 | } |
486 | }; |
487 | |
488 | /// Clamp the information known at all call sites for a given argument |
489 | /// (identified by \p QueryingAA) into \p S. |
490 | template <typename AAType, typename StateType = typename AAType::StateType> |
491 | static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, |
492 | StateType &S) { |
493 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for " << QueryingAA << " into " << S << "\n" ; } } while (false) |
494 | << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for " << QueryingAA << " into " << S << "\n" ; } } while (false); |
495 | |
496 | assert(QueryingAA.getIRPosition().getPositionKind() ==((QueryingAA.getIRPosition().getPositionKind() == IRPosition:: IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 498, __PRETTY_FUNCTION__)) |
497 | IRPosition::IRP_ARGUMENT &&((QueryingAA.getIRPosition().getPositionKind() == IRPosition:: IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 498, __PRETTY_FUNCTION__)) |
498 | "Can only clamp call site argument states for an argument position!")((QueryingAA.getIRPosition().getPositionKind() == IRPosition:: IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? static_cast<void> (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 498, __PRETTY_FUNCTION__)); |
499 | |
500 | // Use an optional state as there might not be any return values and we want |
501 | // to join (IntegerState::operator&) the state of all there are. |
502 | Optional<StateType> T; |
503 | |
504 | // The argument number which is also the call site argument number. |
505 | unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); |
506 | |
507 | auto CallSiteCheck = [&](AbstractCallSite ACS) { |
508 | const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); |
509 | // Check if a coresponding argument was found or if it is on not associated |
510 | // (which can happen for callback calls). |
511 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
512 | return false; |
513 | |
514 | const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); |
515 | LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() << " AA: " << AA.getAsStr( ) << " @" << ACSArgPos << "\n"; } } while ( false) |
516 | << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() << " AA: " << AA.getAsStr( ) << " @" << ACSArgPos << "\n"; } } while ( false); |
517 | const StateType &AAS = AA.getState(); |
518 | if (T.hasValue()) |
519 | *T &= AAS; |
520 | else |
521 | T = AAS; |
522 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T << "\n"; } } while (false) |
523 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T << "\n"; } } while (false); |
524 | return T->isValidState(); |
525 | }; |
526 | |
527 | bool AllCallSitesKnown; |
528 | if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, |
529 | AllCallSitesKnown)) |
530 | S.indicatePessimisticFixpoint(); |
531 | else if (T.hasValue()) |
532 | S ^= *T; |
533 | } |
534 | |
535 | /// Helper class for generic deduction: call site argument -> argument position. |
536 | template <typename AAType, typename BaseType, |
537 | typename StateType = typename AAType::StateType> |
538 | struct AAArgumentFromCallSiteArguments : public BaseType { |
539 | AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) |
540 | : BaseType(IRP, A) {} |
541 | |
542 | /// See AbstractAttribute::updateImpl(...). |
543 | ChangeStatus updateImpl(Attributor &A) override { |
544 | StateType S(StateType::getBestState(this->getState())); |
545 | clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); |
546 | // TODO: If we know we visited all incoming values, thus no are assumed |
547 | // dead, we can take the known information from the state T. |
548 | return clampStateAndIndicateChange<StateType>(this->getState(), S); |
549 | } |
550 | }; |
551 | |
552 | /// Helper class for generic replication: function returned -> cs returned. |
553 | template <typename AAType, typename BaseType, |
554 | typename StateType = typename BaseType::StateType> |
555 | struct AACallSiteReturnedFromReturned : public BaseType { |
556 | AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) |
557 | : BaseType(IRP, A) {} |
558 | |
559 | /// See AbstractAttribute::updateImpl(...). |
560 | ChangeStatus updateImpl(Attributor &A) override { |
561 | assert(this->getIRPosition().getPositionKind() ==((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? static_cast<void> (0) : __assert_fail ( "this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 564, __PRETTY_FUNCTION__)) |
562 | IRPosition::IRP_CALL_SITE_RETURNED &&((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? static_cast<void> (0) : __assert_fail ( "this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 564, __PRETTY_FUNCTION__)) |
563 | "Can only wrap function returned positions for call site returned "((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? static_cast<void> (0) : __assert_fail ( "this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 564, __PRETTY_FUNCTION__)) |
564 | "positions!")((this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? static_cast<void> (0) : __assert_fail ( "this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 564, __PRETTY_FUNCTION__)); |
565 | auto &S = this->getState(); |
566 | |
567 | const Function *AssociatedFunction = |
568 | this->getIRPosition().getAssociatedFunction(); |
569 | if (!AssociatedFunction) |
570 | return S.indicatePessimisticFixpoint(); |
571 | |
572 | IRPosition FnPos = IRPosition::returned(*AssociatedFunction); |
573 | const AAType &AA = A.getAAFor<AAType>(*this, FnPos); |
574 | return clampStateAndIndicateChange(S, AA.getState()); |
575 | } |
576 | }; |
577 | |
578 | /// Helper function to accumulate uses. |
579 | template <class AAType, typename StateType = typename AAType::StateType> |
580 | static void followUsesInContext(AAType &AA, Attributor &A, |
581 | MustBeExecutedContextExplorer &Explorer, |
582 | const Instruction *CtxI, |
583 | SetVector<const Use *> &Uses, |
584 | StateType &State) { |
585 | auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); |
586 | for (unsigned u = 0; u < Uses.size(); ++u) { |
587 | const Use *U = Uses[u]; |
588 | if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { |
589 | bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); |
590 | if (Found && AA.followUseInMBEC(A, U, UserI, State)) |
591 | for (const Use &Us : UserI->uses()) |
592 | Uses.insert(&Us); |
593 | } |
594 | } |
595 | } |
596 | |
597 | /// Use the must-be-executed-context around \p I to add information into \p S. |
598 | /// The AAType class is required to have `followUseInMBEC` method with the |
599 | /// following signature and behaviour: |
600 | /// |
601 | /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) |
602 | /// U - Underlying use. |
603 | /// I - The user of the \p U. |
604 | /// Returns true if the value should be tracked transitively. |
605 | /// |
606 | template <class AAType, typename StateType = typename AAType::StateType> |
607 | static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, |
608 | Instruction &CtxI) { |
609 | |
610 | // Container for (transitive) uses of the associated value. |
611 | SetVector<const Use *> Uses; |
612 | for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) |
613 | Uses.insert(&U); |
614 | |
615 | MustBeExecutedContextExplorer &Explorer = |
616 | A.getInfoCache().getMustBeExecutedContextExplorer(); |
617 | |
618 | followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); |
619 | |
620 | if (S.isAtFixpoint()) |
621 | return; |
622 | |
623 | SmallVector<const BranchInst *, 4> BrInsts; |
624 | auto Pred = [&](const Instruction *I) { |
625 | if (const BranchInst *Br = dyn_cast<BranchInst>(I)) |
626 | if (Br->isConditional()) |
627 | BrInsts.push_back(Br); |
628 | return true; |
629 | }; |
630 | |
631 | // Here, accumulate conditional branch instructions in the context. We |
632 | // explore the child paths and collect the known states. The disjunction of |
633 | // those states can be merged to its own state. Let ParentState_i be a state |
634 | // to indicate the known information for an i-th branch instruction in the |
635 | // context. ChildStates are created for its successors respectively. |
636 | // |
637 | // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} |
638 | // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} |
639 | // ... |
640 | // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} |
641 | // |
642 | // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m |
643 | // |
644 | // FIXME: Currently, recursive branches are not handled. For example, we |
645 | // can't deduce that ptr must be dereferenced in below function. |
646 | // |
647 | // void f(int a, int c, int *ptr) { |
648 | // if(a) |
649 | // if (b) { |
650 | // *ptr = 0; |
651 | // } else { |
652 | // *ptr = 1; |
653 | // } |
654 | // else { |
655 | // if (b) { |
656 | // *ptr = 0; |
657 | // } else { |
658 | // *ptr = 1; |
659 | // } |
660 | // } |
661 | // } |
662 | |
663 | Explorer.checkForAllContext(&CtxI, Pred); |
664 | for (const BranchInst *Br : BrInsts) { |
665 | StateType ParentState; |
666 | |
667 | // The known state of the parent state is a conjunction of children's |
668 | // known states so it is initialized with a best state. |
669 | ParentState.indicateOptimisticFixpoint(); |
670 | |
671 | for (const BasicBlock *BB : Br->successors()) { |
672 | StateType ChildState; |
673 | |
674 | size_t BeforeSize = Uses.size(); |
675 | followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); |
676 | |
677 | // Erase uses which only appear in the child. |
678 | for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) |
679 | It = Uses.erase(It); |
680 | |
681 | ParentState &= ChildState; |
682 | } |
683 | |
684 | // Use only known state. |
685 | S += ParentState; |
686 | } |
687 | } |
688 | |
689 | /// -----------------------NoUnwind Function Attribute-------------------------- |
690 | |
691 | struct AANoUnwindImpl : AANoUnwind { |
692 | AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} |
693 | |
694 | const std::string getAsStr() const override { |
695 | return getAssumed() ? "nounwind" : "may-unwind"; |
696 | } |
697 | |
698 | /// See AbstractAttribute::updateImpl(...). |
699 | ChangeStatus updateImpl(Attributor &A) override { |
700 | auto Opcodes = { |
701 | (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, |
702 | (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, |
703 | (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; |
704 | |
705 | auto CheckForNoUnwind = [&](Instruction &I) { |
706 | if (!I.mayThrow()) |
707 | return true; |
708 | |
709 | if (const auto *CB = dyn_cast<CallBase>(&I)) { |
710 | const auto &NoUnwindAA = |
711 | A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); |
712 | return NoUnwindAA.isAssumedNoUnwind(); |
713 | } |
714 | return false; |
715 | }; |
716 | |
717 | if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) |
718 | return indicatePessimisticFixpoint(); |
719 | |
720 | return ChangeStatus::UNCHANGED; |
721 | } |
722 | }; |
723 | |
724 | struct AANoUnwindFunction final : public AANoUnwindImpl { |
725 | AANoUnwindFunction(const IRPosition &IRP, Attributor &A) |
726 | : AANoUnwindImpl(IRP, A) {} |
727 | |
728 | /// See AbstractAttribute::trackStatistics() |
729 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor" , "NumIRFunction_nounwind", ("Number of " "functions" " marked '" "nounwind" "'")};; ++(NumIRFunction_nounwind); } } |
730 | }; |
731 | |
732 | /// NoUnwind attribute deduction for a call sites. |
733 | struct AANoUnwindCallSite final : AANoUnwindImpl { |
734 | AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) |
735 | : AANoUnwindImpl(IRP, A) {} |
736 | |
737 | /// See AbstractAttribute::initialize(...). |
738 | void initialize(Attributor &A) override { |
739 | AANoUnwindImpl::initialize(A); |
740 | Function *F = getAssociatedFunction(); |
741 | if (!F || F->isDeclaration()) |
742 | indicatePessimisticFixpoint(); |
743 | } |
744 | |
745 | /// See AbstractAttribute::updateImpl(...). |
746 | ChangeStatus updateImpl(Attributor &A) override { |
747 | // TODO: Once we have call site specific value information we can provide |
748 | // call site specific liveness information and then it makes |
749 | // sense to specialize attributes for call sites arguments instead of |
750 | // redirecting requests to the callee argument. |
751 | Function *F = getAssociatedFunction(); |
752 | const IRPosition &FnPos = IRPosition::function(*F); |
753 | auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); |
754 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
755 | } |
756 | |
757 | /// See AbstractAttribute::trackStatistics() |
758 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind" , ("Number of " "call site" " marked '" "nounwind" "'")};; ++ (NumIRCS_nounwind); }; } |
759 | }; |
760 | |
761 | /// --------------------- Function Return Values ------------------------------- |
762 | |
763 | /// "Attribute" that collects all potential returned values and the return |
764 | /// instructions that they arise from. |
765 | /// |
766 | /// If there is a unique returned value R, the manifest method will: |
767 | /// - mark R with the "returned" attribute, if R is an argument. |
768 | class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { |
769 | |
770 | /// Mapping of values potentially returned by the associated function to the |
771 | /// return instructions that might return them. |
772 | MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; |
773 | |
774 | /// Mapping to remember the number of returned values for a call site such |
775 | /// that we can avoid updates if nothing changed. |
776 | DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; |
777 | |
778 | /// Set of unresolved calls returned by the associated function. |
779 | SmallSetVector<CallBase *, 4> UnresolvedCalls; |
780 | |
781 | /// State flags |
782 | /// |
783 | ///{ |
784 | bool IsFixed = false; |
785 | bool IsValidState = true; |
786 | ///} |
787 | |
788 | public: |
789 | AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) |
790 | : AAReturnedValues(IRP, A) {} |
791 | |
792 | /// See AbstractAttribute::initialize(...). |
793 | void initialize(Attributor &A) override { |
794 | // Reset the state. |
795 | IsFixed = false; |
796 | IsValidState = true; |
797 | ReturnedValues.clear(); |
798 | |
799 | Function *F = getAssociatedFunction(); |
800 | if (!F || F->isDeclaration()) { |
801 | indicatePessimisticFixpoint(); |
802 | return; |
803 | } |
804 | assert(!F->getReturnType()->isVoidTy() &&((!F->getReturnType()->isVoidTy() && "Did not expect a void return type!" ) ? static_cast<void> (0) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 805, __PRETTY_FUNCTION__)) |
805 | "Did not expect a void return type!")((!F->getReturnType()->isVoidTy() && "Did not expect a void return type!" ) ? static_cast<void> (0) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 805, __PRETTY_FUNCTION__)); |
806 | |
807 | // The map from instruction opcodes to those instructions in the function. |
808 | auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); |
809 | |
810 | // Look through all arguments, if one is marked as returned we are done. |
811 | for (Argument &Arg : F->args()) { |
812 | if (Arg.hasReturnedAttr()) { |
813 | auto &ReturnInstSet = ReturnedValues[&Arg]; |
814 | if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) |
815 | for (Instruction *RI : *Insts) |
816 | ReturnInstSet.insert(cast<ReturnInst>(RI)); |
817 | |
818 | indicateOptimisticFixpoint(); |
819 | return; |
820 | } |
821 | } |
822 | |
823 | if (!A.isFunctionIPOAmendable(*F)) |
824 | indicatePessimisticFixpoint(); |
825 | } |
826 | |
827 | /// See AbstractAttribute::manifest(...). |
828 | ChangeStatus manifest(Attributor &A) override; |
829 | |
830 | /// See AbstractAttribute::getState(...). |
831 | AbstractState &getState() override { return *this; } |
832 | |
833 | /// See AbstractAttribute::getState(...). |
834 | const AbstractState &getState() const override { return *this; } |
835 | |
836 | /// See AbstractAttribute::updateImpl(Attributor &A). |
837 | ChangeStatus updateImpl(Attributor &A) override; |
838 | |
839 | llvm::iterator_range<iterator> returned_values() override { |
840 | return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); |
841 | } |
842 | |
843 | llvm::iterator_range<const_iterator> returned_values() const override { |
844 | return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); |
845 | } |
846 | |
847 | const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { |
848 | return UnresolvedCalls; |
849 | } |
850 | |
851 | /// Return the number of potential return values, -1 if unknown. |
852 | size_t getNumReturnValues() const override { |
853 | return isValidState() ? ReturnedValues.size() : -1; |
854 | } |
855 | |
856 | /// Return an assumed unique return value if a single candidate is found. If |
857 | /// there cannot be one, return a nullptr. If it is not clear yet, return the |
858 | /// Optional::NoneType. |
859 | Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; |
860 | |
861 | /// See AbstractState::checkForAllReturnedValues(...). |
862 | bool checkForAllReturnedValuesAndReturnInsts( |
863 | function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) |
864 | const override; |
865 | |
866 | /// Pretty print the attribute similar to the IR representation. |
867 | const std::string getAsStr() const override; |
868 | |
869 | /// See AbstractState::isAtFixpoint(). |
870 | bool isAtFixpoint() const override { return IsFixed; } |
871 | |
872 | /// See AbstractState::isValidState(). |
873 | bool isValidState() const override { return IsValidState; } |
874 | |
875 | /// See AbstractState::indicateOptimisticFixpoint(...). |
876 | ChangeStatus indicateOptimisticFixpoint() override { |
877 | IsFixed = true; |
878 | return ChangeStatus::UNCHANGED; |
879 | } |
880 | |
881 | ChangeStatus indicatePessimisticFixpoint() override { |
882 | IsFixed = true; |
883 | IsValidState = false; |
884 | return ChangeStatus::CHANGED; |
885 | } |
886 | }; |
887 | |
888 | ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { |
889 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
890 | |
891 | // Bookkeeping. |
892 | assert(isValidState())((isValidState()) ? static_cast<void> (0) : __assert_fail ("isValidState()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 892, __PRETTY_FUNCTION__)); |
893 | STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues = {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values" };; ++(NumIRFunctionReturn_KnownReturnValues); } |
894 | "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues = {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values" };; ++(NumIRFunctionReturn_KnownReturnValues); }; |
895 | |
896 | // Check if we have an assumed unique return value that we could manifest. |
897 | Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); |
898 | |
899 | if (!UniqueRV.hasValue() || !UniqueRV.getValue()) |
900 | return Changed; |
901 | |
902 | // Bookkeeping. |
903 | STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue = {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return" };; ++(NumIRFunctionReturn_UniqueReturnValue); } |
904 | "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue = {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return" };; ++(NumIRFunctionReturn_UniqueReturnValue); }; |
905 | |
906 | // Callback to replace the uses of CB with the constant C. |
907 | auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { |
908 | if (CB.use_empty()) |
909 | return ChangeStatus::UNCHANGED; |
910 | if (A.changeValueAfterManifest(CB, C)) |
911 | return ChangeStatus::CHANGED; |
912 | return ChangeStatus::UNCHANGED; |
913 | }; |
914 | |
915 | // If the assumed unique return value is an argument, annotate it. |
916 | if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { |
917 | if (UniqueRVArg->getType()->canLosslesslyBitCastTo( |
918 | getAssociatedFunction()->getReturnType())) { |
919 | getIRPosition() = IRPosition::argument(*UniqueRVArg); |
920 | Changed = IRAttribute::manifest(A); |
921 | } |
922 | } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { |
923 | // We can replace the returned value with the unique returned constant. |
924 | Value &AnchorValue = getAnchorValue(); |
925 | if (Function *F = dyn_cast<Function>(&AnchorValue)) { |
926 | for (const Use &U : F->uses()) |
927 | if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) |
928 | if (CB->isCallee(&U)) { |
929 | Constant *RVCCast = |
930 | CB->getType() == RVC->getType() |
931 | ? RVC |
932 | : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); |
933 | Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; |
934 | } |
935 | } else { |
936 | assert(isa<CallBase>(AnchorValue) &&((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!" ) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 937, __PRETTY_FUNCTION__)) |
937 | "Expcected a function or call base anchor!")((isa<CallBase>(AnchorValue) && "Expcected a function or call base anchor!" ) ? static_cast<void> (0) : __assert_fail ("isa<CallBase>(AnchorValue) && \"Expcected a function or call base anchor!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 937, __PRETTY_FUNCTION__)); |
938 | Constant *RVCCast = |
939 | AnchorValue.getType() == RVC->getType() |
940 | ? RVC |
941 | : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); |
942 | Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); |
943 | } |
944 | if (Changed == ChangeStatus::CHANGED) |
945 | STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue = {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue" , "Number of function returns replaced by constant return"};; ++(NumIRFunctionReturn_UniqueConstantReturnValue); } |
946 | "Number of function returns replaced by constant return"){ static llvm::Statistic NumIRFunctionReturn_UniqueConstantReturnValue = {"attributor", "NumIRFunctionReturn_UniqueConstantReturnValue" , "Number of function returns replaced by constant return"};; ++(NumIRFunctionReturn_UniqueConstantReturnValue); }; |
947 | } |
948 | |
949 | return Changed; |
950 | } |
951 | |
952 | const std::string AAReturnedValuesImpl::getAsStr() const { |
953 | return (isAtFixpoint() ? "returns(#" : "may-return(#") + |
954 | (isValidState() ? std::to_string(getNumReturnValues()) : "?") + |
955 | ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; |
956 | } |
957 | |
958 | Optional<Value *> |
959 | AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { |
960 | // If checkForAllReturnedValues provides a unique value, ignoring potential |
961 | // undef values that can also be present, it is assumed to be the actual |
962 | // return value and forwarded to the caller of this method. If there are |
963 | // multiple, a nullptr is returned indicating there cannot be a unique |
964 | // returned value. |
965 | Optional<Value *> UniqueRV; |
966 | |
967 | auto Pred = [&](Value &RV) -> bool { |
968 | // If we found a second returned value and neither the current nor the saved |
969 | // one is an undef, there is no unique returned value. Undefs are special |
970 | // since we can pretend they have any value. |
971 | if (UniqueRV.hasValue() && UniqueRV != &RV && |
972 | !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { |
973 | UniqueRV = nullptr; |
974 | return false; |
975 | } |
976 | |
977 | // Do not overwrite a value with an undef. |
978 | if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) |
979 | UniqueRV = &RV; |
980 | |
981 | return true; |
982 | }; |
983 | |
984 | if (!A.checkForAllReturnedValues(Pred, *this)) |
985 | UniqueRV = nullptr; |
986 | |
987 | return UniqueRV; |
988 | } |
989 | |
990 | bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( |
991 | function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) |
992 | const { |
993 | if (!isValidState()) |
994 | return false; |
995 | |
996 | // Check all returned values but ignore call sites as long as we have not |
997 | // encountered an overdefined one during an update. |
998 | for (auto &It : ReturnedValues) { |
999 | Value *RV = It.first; |
1000 | |
1001 | CallBase *CB = dyn_cast<CallBase>(RV); |
1002 | if (CB && !UnresolvedCalls.count(CB)) |
1003 | continue; |
1004 | |
1005 | if (!Pred(*RV, It.second)) |
1006 | return false; |
1007 | } |
1008 | |
1009 | return true; |
1010 | } |
1011 | |
1012 | ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { |
1013 | size_t NumUnresolvedCalls = UnresolvedCalls.size(); |
1014 | bool Changed = false; |
1015 | |
1016 | // State used in the value traversals starting in returned values. |
1017 | struct RVState { |
1018 | // The map in which we collect return values -> return instrs. |
1019 | decltype(ReturnedValues) &RetValsMap; |
1020 | // The flag to indicate a change. |
1021 | bool &Changed; |
1022 | // The return instrs we come from. |
1023 | SmallSetVector<ReturnInst *, 4> RetInsts; |
1024 | }; |
1025 | |
1026 | // Callback for a leaf value returned by the associated function. |
1027 | auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, |
1028 | bool) -> bool { |
1029 | auto Size = RVS.RetValsMap[&Val].size(); |
1030 | RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); |
1031 | bool Inserted = RVS.RetValsMap[&Val].size() != Size; |
1032 | RVS.Changed |= Inserted; |
1033 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val << " => " << RVS.RetInsts.size() << "\n"; }; } } while (false) |
1034 | if (Inserted)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val << " => " << RVS.RetInsts.size() << "\n"; }; } } while (false) |
1035 | dbgs() << "[AAReturnedValues] 1 Add new returned value " << Valdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val << " => " << RVS.RetInsts.size() << "\n"; }; } } while (false) |
1036 | << " => " << RVS.RetInsts.size() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val << " => " << RVS.RetInsts.size() << "\n"; }; } } while (false) |
1037 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (Inserted) dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val << " => " << RVS.RetInsts.size() << "\n"; }; } } while (false); |
1038 | return true; |
1039 | }; |
1040 | |
1041 | // Helper method to invoke the generic value traversal. |
1042 | auto VisitReturnedValue = [&](Value &RV, RVState &RVS, |
1043 | const Instruction *CtxI) { |
1044 | IRPosition RetValPos = IRPosition::value(RV); |
1045 | return genericValueTraversal<AAReturnedValues, RVState>( |
1046 | A, RetValPos, *this, RVS, VisitValueCB, CtxI, |
1047 | /* UseValueSimplify */ false); |
1048 | }; |
1049 | |
1050 | // Callback for all "return intructions" live in the associated function. |
1051 | auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { |
1052 | ReturnInst &Ret = cast<ReturnInst>(I); |
1053 | RVState RVS({ReturnedValues, Changed, {}}); |
1054 | RVS.RetInsts.insert(&Ret); |
1055 | return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); |
1056 | }; |
1057 | |
1058 | // Start by discovering returned values from all live returned instructions in |
1059 | // the associated function. |
1060 | if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) |
1061 | return indicatePessimisticFixpoint(); |
1062 | |
1063 | // Once returned values "directly" present in the code are handled we try to |
1064 | // resolve returned calls. To avoid modifications to the ReturnedValues map |
1065 | // while we iterate over it we kept record of potential new entries in a copy |
1066 | // map, NewRVsMap. |
1067 | decltype(ReturnedValues) NewRVsMap; |
1068 | |
1069 | auto HandleReturnValue = [&](Value *RV, |
1070 | SmallSetVector<ReturnInst *, 4> &RIs) { |
1071 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #" << RIs.size() << " RIs\n" ; } } while (false) |
1072 | << RIs.size() << " RIs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #" << RIs.size() << " RIs\n" ; } } while (false); |
1073 | CallBase *CB = dyn_cast<CallBase>(RV); |
1074 | if (!CB || UnresolvedCalls.count(CB)) |
1075 | return; |
1076 | |
1077 | if (!CB->getCalledFunction()) { |
1078 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: " << *CB << "\n"; } } while (false) |
1079 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: " << *CB << "\n"; } } while (false); |
1080 | UnresolvedCalls.insert(CB); |
1081 | return; |
1082 | } |
1083 | |
1084 | // TODO: use the function scope once we have call site AAReturnedValues. |
1085 | const auto &RetValAA = A.getAAFor<AAReturnedValues>( |
1086 | *this, IRPosition::function(*CB->getCalledFunction())); |
1087 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " << RetValAA << "\n"; } } while (false) |
1088 | << RetValAA << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " << RetValAA << "\n"; } } while (false); |
1089 | |
1090 | // Skip dead ends, thus if we do not know anything about the returned |
1091 | // call we mark it as unresolved and it will stay that way. |
1092 | if (!RetValAA.getState().isValidState()) { |
1093 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: " << *CB << "\n"; } } while (false) |
1094 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Unresolved call: " << *CB << "\n"; } } while (false); |
1095 | UnresolvedCalls.insert(CB); |
1096 | return; |
1097 | } |
1098 | |
1099 | // Do not try to learn partial information. If the callee has unresolved |
1100 | // return values we will treat the call as unresolved/opaque. |
1101 | auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); |
1102 | if (!RetValAAUnresolvedCalls.empty()) { |
1103 | UnresolvedCalls.insert(CB); |
1104 | return; |
1105 | } |
1106 | |
1107 | // Now check if we can track transitively returned values. If possible, thus |
1108 | // if all return value can be represented in the current scope, do so. |
1109 | bool Unresolved = false; |
1110 | for (auto &RetValAAIt : RetValAA.returned_values()) { |
1111 | Value *RetVal = RetValAAIt.first; |
1112 | if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || |
1113 | isa<Constant>(RetVal)) |
1114 | continue; |
1115 | // Anything that did not fit in the above categories cannot be resolved, |
1116 | // mark the call as unresolved. |
1117 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value " "cannot be translated: " << *RetVal << "\n"; } } while (false) |
1118 | "cannot be translated: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value " "cannot be translated: " << *RetVal << "\n"; } } while (false) |
1119 | << *RetVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] transitively returned value " "cannot be translated: " << *RetVal << "\n"; } } while (false); |
1120 | UnresolvedCalls.insert(CB); |
1121 | Unresolved = true; |
1122 | break; |
1123 | } |
1124 | |
1125 | if (Unresolved) |
1126 | return; |
1127 | |
1128 | // Now track transitively returned values. |
1129 | unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; |
1130 | if (NumRetAA == RetValAA.getNumReturnValues()) { |
1131 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not " "changed since it was seen last\n"; } } while (false) |
1132 | "changed since it was seen last\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Skip call as it has not " "changed since it was seen last\n"; } } while (false); |
1133 | return; |
1134 | } |
1135 | NumRetAA = RetValAA.getNumReturnValues(); |
1136 | |
1137 | for (auto &RetValAAIt : RetValAA.returned_values()) { |
1138 | Value *RetVal = RetValAAIt.first; |
1139 | if (Argument *Arg = dyn_cast<Argument>(RetVal)) { |
1140 | // Arguments are mapped to call site operands and we begin the traversal |
1141 | // again. |
1142 | bool Unused = false; |
1143 | RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); |
1144 | VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); |
1145 | continue; |
1146 | } |
1147 | if (isa<CallBase>(RetVal)) { |
1148 | // Call sites are resolved by the callee attribute over time, no need to |
1149 | // do anything for us. |
1150 | continue; |
1151 | } |
1152 | if (isa<Constant>(RetVal)) { |
1153 | // Constants are valid everywhere, we can simply take them. |
1154 | NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); |
1155 | continue; |
1156 | } |
1157 | } |
1158 | }; |
1159 | |
1160 | for (auto &It : ReturnedValues) |
1161 | HandleReturnValue(It.first, It.second); |
1162 | |
1163 | // Because processing the new information can again lead to new return values |
1164 | // we have to be careful and iterate until this iteration is complete. The |
1165 | // idea is that we are in a stable state at the end of an update. All return |
1166 | // values have been handled and properly categorized. We might not update |
1167 | // again if we have not requested a non-fix attribute so we cannot "wait" for |
1168 | // the next update to analyze a new return value. |
1169 | while (!NewRVsMap.empty()) { |
1170 | auto It = std::move(NewRVsMap.back()); |
1171 | NewRVsMap.pop_back(); |
1172 | |
1173 | assert(!It.second.empty() && "Entry does not add anything.")((!It.second.empty() && "Entry does not add anything." ) ? static_cast<void> (0) : __assert_fail ("!It.second.empty() && \"Entry does not add anything.\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1173, __PRETTY_FUNCTION__)); |
1174 | auto &ReturnInsts = ReturnedValues[It.first]; |
1175 | for (ReturnInst *RI : It.second) |
1176 | if (ReturnInsts.insert(RI)) { |
1177 | LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value " << *It.first << " => " << *RI << "\n" ; } } while (false) |
1178 | << *It.first << " => " << *RI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReturnedValues] Add new returned value " << *It.first << " => " << *RI << "\n" ; } } while (false); |
1179 | HandleReturnValue(It.first, ReturnInsts); |
1180 | Changed = true; |
1181 | } |
1182 | } |
1183 | |
1184 | Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); |
1185 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
1186 | } |
1187 | |
1188 | struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { |
1189 | AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) |
1190 | : AAReturnedValuesImpl(IRP, A) {} |
1191 | |
1192 | /// See AbstractAttribute::trackStatistics() |
1193 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor" , "NumIRArguments_returned", ("Number of " "arguments" " marked '" "returned" "'")};; ++(NumIRArguments_returned); } } |
1194 | }; |
1195 | |
1196 | /// Returned values information for a call sites. |
1197 | struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { |
1198 | AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) |
1199 | : AAReturnedValuesImpl(IRP, A) {} |
1200 | |
1201 | /// See AbstractAttribute::initialize(...). |
1202 | void initialize(Attributor &A) override { |
1203 | // TODO: Once we have call site specific value information we can provide |
1204 | // call site specific liveness information and then it makes |
1205 | // sense to specialize attributes for call sites instead of |
1206 | // redirecting requests to the callee. |
1207 | llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not " "supported for call sites yet!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1208) |
1208 | "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not " "supported for call sites yet!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1208); |
1209 | } |
1210 | |
1211 | /// See AbstractAttribute::updateImpl(...). |
1212 | ChangeStatus updateImpl(Attributor &A) override { |
1213 | return indicatePessimisticFixpoint(); |
1214 | } |
1215 | |
1216 | /// See AbstractAttribute::trackStatistics() |
1217 | void trackStatistics() const override {} |
1218 | }; |
1219 | |
1220 | /// ------------------------ NoSync Function Attribute ------------------------- |
1221 | |
1222 | struct AANoSyncImpl : AANoSync { |
1223 | AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} |
1224 | |
1225 | const std::string getAsStr() const override { |
1226 | return getAssumed() ? "nosync" : "may-sync"; |
1227 | } |
1228 | |
1229 | /// See AbstractAttribute::updateImpl(...). |
1230 | ChangeStatus updateImpl(Attributor &A) override; |
1231 | |
1232 | /// Helper function used to determine whether an instruction is non-relaxed |
1233 | /// atomic. In other words, if an atomic instruction does not have unordered |
1234 | /// or monotonic ordering |
1235 | static bool isNonRelaxedAtomic(Instruction *I); |
1236 | |
1237 | /// Helper function used to determine whether an instruction is volatile. |
1238 | static bool isVolatile(Instruction *I); |
1239 | |
1240 | /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, |
1241 | /// memset). |
1242 | static bool isNoSyncIntrinsic(Instruction *I); |
1243 | }; |
1244 | |
1245 | bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { |
1246 | if (!I->isAtomic()) |
1247 | return false; |
1248 | |
1249 | AtomicOrdering Ordering; |
1250 | switch (I->getOpcode()) { |
1251 | case Instruction::AtomicRMW: |
1252 | Ordering = cast<AtomicRMWInst>(I)->getOrdering(); |
1253 | break; |
1254 | case Instruction::Store: |
1255 | Ordering = cast<StoreInst>(I)->getOrdering(); |
1256 | break; |
1257 | case Instruction::Load: |
1258 | Ordering = cast<LoadInst>(I)->getOrdering(); |
1259 | break; |
1260 | case Instruction::Fence: { |
1261 | auto *FI = cast<FenceInst>(I); |
1262 | if (FI->getSyncScopeID() == SyncScope::SingleThread) |
1263 | return false; |
1264 | Ordering = FI->getOrdering(); |
1265 | break; |
1266 | } |
1267 | case Instruction::AtomicCmpXchg: { |
1268 | AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); |
1269 | AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); |
1270 | // Only if both are relaxed, than it can be treated as relaxed. |
1271 | // Otherwise it is non-relaxed. |
1272 | if (Success != AtomicOrdering::Unordered && |
1273 | Success != AtomicOrdering::Monotonic) |
1274 | return true; |
1275 | if (Failure != AtomicOrdering::Unordered && |
1276 | Failure != AtomicOrdering::Monotonic) |
1277 | return true; |
1278 | return false; |
1279 | } |
1280 | default: |
1281 | llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor." , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1282) |
1282 | "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor." , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1282); |
1283 | } |
1284 | |
1285 | // Relaxed. |
1286 | if (Ordering == AtomicOrdering::Unordered || |
1287 | Ordering == AtomicOrdering::Monotonic) |
1288 | return false; |
1289 | return true; |
1290 | } |
1291 | |
1292 | /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. |
1293 | /// FIXME: We should ipmrove the handling of intrinsics. |
1294 | bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { |
1295 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { |
1296 | switch (II->getIntrinsicID()) { |
1297 | /// Element wise atomic memory intrinsics are can only be unordered, |
1298 | /// therefore nosync. |
1299 | case Intrinsic::memset_element_unordered_atomic: |
1300 | case Intrinsic::memmove_element_unordered_atomic: |
1301 | case Intrinsic::memcpy_element_unordered_atomic: |
1302 | return true; |
1303 | case Intrinsic::memset: |
1304 | case Intrinsic::memmove: |
1305 | case Intrinsic::memcpy: |
1306 | if (!cast<MemIntrinsic>(II)->isVolatile()) |
1307 | return true; |
1308 | return false; |
1309 | default: |
1310 | return false; |
1311 | } |
1312 | } |
1313 | return false; |
1314 | } |
1315 | |
1316 | bool AANoSyncImpl::isVolatile(Instruction *I) { |
1317 | assert(!isa<CallBase>(I) && "Calls should not be checked here")((!isa<CallBase>(I) && "Calls should not be checked here" ) ? static_cast<void> (0) : __assert_fail ("!isa<CallBase>(I) && \"Calls should not be checked here\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1317, __PRETTY_FUNCTION__)); |
1318 | |
1319 | switch (I->getOpcode()) { |
1320 | case Instruction::AtomicRMW: |
1321 | return cast<AtomicRMWInst>(I)->isVolatile(); |
1322 | case Instruction::Store: |
1323 | return cast<StoreInst>(I)->isVolatile(); |
1324 | case Instruction::Load: |
1325 | return cast<LoadInst>(I)->isVolatile(); |
1326 | case Instruction::AtomicCmpXchg: |
1327 | return cast<AtomicCmpXchgInst>(I)->isVolatile(); |
1328 | default: |
1329 | return false; |
1330 | } |
1331 | } |
1332 | |
1333 | ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { |
1334 | |
1335 | auto CheckRWInstForNoSync = [&](Instruction &I) { |
1336 | /// We are looking for volatile instructions or Non-Relaxed atomics. |
1337 | /// FIXME: We should improve the handling of intrinsics. |
1338 | |
1339 | if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) |
1340 | return true; |
1341 | |
1342 | if (const auto *CB = dyn_cast<CallBase>(&I)) { |
1343 | if (CB->hasFnAttr(Attribute::NoSync)) |
1344 | return true; |
1345 | |
1346 | const auto &NoSyncAA = |
1347 | A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); |
1348 | if (NoSyncAA.isAssumedNoSync()) |
1349 | return true; |
1350 | return false; |
1351 | } |
1352 | |
1353 | if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) |
1354 | return true; |
1355 | |
1356 | return false; |
1357 | }; |
1358 | |
1359 | auto CheckForNoSync = [&](Instruction &I) { |
1360 | // At this point we handled all read/write effects and they are all |
1361 | // nosync, so they can be skipped. |
1362 | if (I.mayReadOrWriteMemory()) |
1363 | return true; |
1364 | |
1365 | // non-convergent and readnone imply nosync. |
1366 | return !cast<CallBase>(I).isConvergent(); |
1367 | }; |
1368 | |
1369 | if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || |
1370 | !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) |
1371 | return indicatePessimisticFixpoint(); |
1372 | |
1373 | return ChangeStatus::UNCHANGED; |
1374 | } |
1375 | |
1376 | struct AANoSyncFunction final : public AANoSyncImpl { |
1377 | AANoSyncFunction(const IRPosition &IRP, Attributor &A) |
1378 | : AANoSyncImpl(IRP, A) {} |
1379 | |
1380 | /// See AbstractAttribute::trackStatistics() |
1381 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor" , "NumIRFunction_nosync", ("Number of " "functions" " marked '" "nosync" "'")};; ++(NumIRFunction_nosync); } } |
1382 | }; |
1383 | |
1384 | /// NoSync attribute deduction for a call sites. |
1385 | struct AANoSyncCallSite final : AANoSyncImpl { |
1386 | AANoSyncCallSite(const IRPosition &IRP, Attributor &A) |
1387 | : AANoSyncImpl(IRP, A) {} |
1388 | |
1389 | /// See AbstractAttribute::initialize(...). |
1390 | void initialize(Attributor &A) override { |
1391 | AANoSyncImpl::initialize(A); |
1392 | Function *F = getAssociatedFunction(); |
1393 | if (!F || F->isDeclaration()) |
1394 | indicatePessimisticFixpoint(); |
1395 | } |
1396 | |
1397 | /// See AbstractAttribute::updateImpl(...). |
1398 | ChangeStatus updateImpl(Attributor &A) override { |
1399 | // TODO: Once we have call site specific value information we can provide |
1400 | // call site specific liveness information and then it makes |
1401 | // sense to specialize attributes for call sites arguments instead of |
1402 | // redirecting requests to the callee argument. |
1403 | Function *F = getAssociatedFunction(); |
1404 | const IRPosition &FnPos = IRPosition::function(*F); |
1405 | auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); |
1406 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
1407 | } |
1408 | |
1409 | /// See AbstractAttribute::trackStatistics() |
1410 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync" , ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync ); }; } |
1411 | }; |
1412 | |
1413 | /// ------------------------ No-Free Attributes ---------------------------- |
1414 | |
1415 | struct AANoFreeImpl : public AANoFree { |
1416 | AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} |
1417 | |
1418 | /// See AbstractAttribute::updateImpl(...). |
1419 | ChangeStatus updateImpl(Attributor &A) override { |
1420 | auto CheckForNoFree = [&](Instruction &I) { |
1421 | const auto &CB = cast<CallBase>(I); |
1422 | if (CB.hasFnAttr(Attribute::NoFree)) |
1423 | return true; |
1424 | |
1425 | const auto &NoFreeAA = |
1426 | A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); |
1427 | return NoFreeAA.isAssumedNoFree(); |
1428 | }; |
1429 | |
1430 | if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) |
1431 | return indicatePessimisticFixpoint(); |
1432 | return ChangeStatus::UNCHANGED; |
1433 | } |
1434 | |
1435 | /// See AbstractAttribute::getAsStr(). |
1436 | const std::string getAsStr() const override { |
1437 | return getAssumed() ? "nofree" : "may-free"; |
1438 | } |
1439 | }; |
1440 | |
1441 | struct AANoFreeFunction final : public AANoFreeImpl { |
1442 | AANoFreeFunction(const IRPosition &IRP, Attributor &A) |
1443 | : AANoFreeImpl(IRP, A) {} |
1444 | |
1445 | /// See AbstractAttribute::trackStatistics() |
1446 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor" , "NumIRFunction_nofree", ("Number of " "functions" " marked '" "nofree" "'")};; ++(NumIRFunction_nofree); } } |
1447 | }; |
1448 | |
1449 | /// NoFree attribute deduction for a call sites. |
1450 | struct AANoFreeCallSite final : AANoFreeImpl { |
1451 | AANoFreeCallSite(const IRPosition &IRP, Attributor &A) |
1452 | : AANoFreeImpl(IRP, A) {} |
1453 | |
1454 | /// See AbstractAttribute::initialize(...). |
1455 | void initialize(Attributor &A) override { |
1456 | AANoFreeImpl::initialize(A); |
1457 | Function *F = getAssociatedFunction(); |
1458 | if (!F || F->isDeclaration()) |
1459 | indicatePessimisticFixpoint(); |
1460 | } |
1461 | |
1462 | /// See AbstractAttribute::updateImpl(...). |
1463 | ChangeStatus updateImpl(Attributor &A) override { |
1464 | // TODO: Once we have call site specific value information we can provide |
1465 | // call site specific liveness information and then it makes |
1466 | // sense to specialize attributes for call sites arguments instead of |
1467 | // redirecting requests to the callee argument. |
1468 | Function *F = getAssociatedFunction(); |
1469 | const IRPosition &FnPos = IRPosition::function(*F); |
1470 | auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); |
1471 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
1472 | } |
1473 | |
1474 | /// See AbstractAttribute::trackStatistics() |
1475 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree" , ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree ); }; } |
1476 | }; |
1477 | |
1478 | /// NoFree attribute for floating values. |
1479 | struct AANoFreeFloating : AANoFreeImpl { |
1480 | AANoFreeFloating(const IRPosition &IRP, Attributor &A) |
1481 | : AANoFreeImpl(IRP, A) {} |
1482 | |
1483 | /// See AbstractAttribute::trackStatistics() |
1484 | void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor" , "NumIRFloating_nofree", ("Number of floating values known to be '" "nofree" "'")};; ++(NumIRFloating_nofree); }} |
1485 | |
1486 | /// See Abstract Attribute::updateImpl(...). |
1487 | ChangeStatus updateImpl(Attributor &A) override { |
1488 | const IRPosition &IRP = getIRPosition(); |
1489 | |
1490 | const auto &NoFreeAA = |
1491 | A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); |
1492 | if (NoFreeAA.isAssumedNoFree()) |
1493 | return ChangeStatus::UNCHANGED; |
1494 | |
1495 | Value &AssociatedValue = getIRPosition().getAssociatedValue(); |
1496 | auto Pred = [&](const Use &U, bool &Follow) -> bool { |
1497 | Instruction *UserI = cast<Instruction>(U.getUser()); |
1498 | if (auto *CB = dyn_cast<CallBase>(UserI)) { |
1499 | if (CB->isBundleOperand(&U)) |
1500 | return false; |
1501 | if (!CB->isArgOperand(&U)) |
1502 | return true; |
1503 | unsigned ArgNo = CB->getArgOperandNo(&U); |
1504 | |
1505 | const auto &NoFreeArg = A.getAAFor<AANoFree>( |
1506 | *this, IRPosition::callsite_argument(*CB, ArgNo)); |
1507 | return NoFreeArg.isAssumedNoFree(); |
1508 | } |
1509 | |
1510 | if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || |
1511 | isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { |
1512 | Follow = true; |
1513 | return true; |
1514 | } |
1515 | if (isa<ReturnInst>(UserI)) |
1516 | return true; |
1517 | |
1518 | // Unknown user. |
1519 | return false; |
1520 | }; |
1521 | if (!A.checkForAllUses(Pred, *this, AssociatedValue)) |
1522 | return indicatePessimisticFixpoint(); |
1523 | |
1524 | return ChangeStatus::UNCHANGED; |
1525 | } |
1526 | }; |
1527 | |
1528 | /// NoFree attribute for a call site argument. |
1529 | struct AANoFreeArgument final : AANoFreeFloating { |
1530 | AANoFreeArgument(const IRPosition &IRP, Attributor &A) |
1531 | : AANoFreeFloating(IRP, A) {} |
1532 | |
1533 | /// See AbstractAttribute::trackStatistics() |
1534 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor" , "NumIRArguments_nofree", ("Number of " "arguments" " marked '" "nofree" "'")};; ++(NumIRArguments_nofree); } } |
1535 | }; |
1536 | |
1537 | /// NoFree attribute for call site arguments. |
1538 | struct AANoFreeCallSiteArgument final : AANoFreeFloating { |
1539 | AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) |
1540 | : AANoFreeFloating(IRP, A) {} |
1541 | |
1542 | /// See AbstractAttribute::updateImpl(...). |
1543 | ChangeStatus updateImpl(Attributor &A) override { |
1544 | // TODO: Once we have call site specific value information we can provide |
1545 | // call site specific liveness information and then it makes |
1546 | // sense to specialize attributes for call sites arguments instead of |
1547 | // redirecting requests to the callee argument. |
1548 | Argument *Arg = getAssociatedArgument(); |
1549 | if (!Arg) |
1550 | return indicatePessimisticFixpoint(); |
1551 | const IRPosition &ArgPos = IRPosition::argument(*Arg); |
1552 | auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); |
1553 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); |
1554 | } |
1555 | |
1556 | /// See AbstractAttribute::trackStatistics() |
1557 | void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor" , "NumIRCSArguments_nofree", ("Number of " "call site arguments" " marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }}; |
1558 | }; |
1559 | |
1560 | /// NoFree attribute for function return value. |
1561 | struct AANoFreeReturned final : AANoFreeFloating { |
1562 | AANoFreeReturned(const IRPosition &IRP, Attributor &A) |
1563 | : AANoFreeFloating(IRP, A) { |
1564 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1564); |
1565 | } |
1566 | |
1567 | /// See AbstractAttribute::initialize(...). |
1568 | void initialize(Attributor &A) override { |
1569 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1569); |
1570 | } |
1571 | |
1572 | /// See AbstractAttribute::updateImpl(...). |
1573 | ChangeStatus updateImpl(Attributor &A) override { |
1574 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1574); |
1575 | } |
1576 | |
1577 | /// See AbstractAttribute::trackStatistics() |
1578 | void trackStatistics() const override {} |
1579 | }; |
1580 | |
1581 | /// NoFree attribute deduction for a call site return value. |
1582 | struct AANoFreeCallSiteReturned final : AANoFreeFloating { |
1583 | AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) |
1584 | : AANoFreeFloating(IRP, A) {} |
1585 | |
1586 | ChangeStatus manifest(Attributor &A) override { |
1587 | return ChangeStatus::UNCHANGED; |
1588 | } |
1589 | /// See AbstractAttribute::trackStatistics() |
1590 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor" , "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '" "nofree" "'")};; ++(NumIRCSReturn_nofree); } } |
1591 | }; |
1592 | |
1593 | /// ------------------------ NonNull Argument Attribute ------------------------ |
1594 | static int64_t getKnownNonNullAndDerefBytesForUse( |
1595 | Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, |
1596 | const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { |
1597 | TrackUse = false; |
1598 | |
1599 | const Value *UseV = U->get(); |
1600 | if (!UseV->getType()->isPointerTy()) |
1601 | return 0; |
1602 | |
1603 | Type *PtrTy = UseV->getType(); |
1604 | const Function *F = I->getFunction(); |
1605 | bool NullPointerIsDefined = |
1606 | F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; |
1607 | const DataLayout &DL = A.getInfoCache().getDL(); |
1608 | if (const auto *CB = dyn_cast<CallBase>(I)) { |
1609 | if (CB->isBundleOperand(U)) { |
1610 | if (RetainedKnowledge RK = getKnowledgeFromUse( |
1611 | U, {Attribute::NonNull, Attribute::Dereferenceable})) { |
1612 | IsNonNull |= |
1613 | (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); |
1614 | return RK.ArgValue; |
1615 | } |
1616 | return 0; |
1617 | } |
1618 | |
1619 | if (CB->isCallee(U)) { |
1620 | IsNonNull |= !NullPointerIsDefined; |
1621 | return 0; |
1622 | } |
1623 | |
1624 | unsigned ArgNo = CB->getArgOperandNo(U); |
1625 | IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); |
1626 | // As long as we only use known information there is no need to track |
1627 | // dependences here. |
1628 | auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, |
1629 | /* TrackDependence */ false); |
1630 | IsNonNull |= DerefAA.isKnownNonNull(); |
1631 | return DerefAA.getKnownDereferenceableBytes(); |
1632 | } |
1633 | |
1634 | // We need to follow common pointer manipulation uses to the accesses they |
1635 | // feed into. We can try to be smart to avoid looking through things we do not |
1636 | // like for now, e.g., non-inbounds GEPs. |
1637 | if (isa<CastInst>(I)) { |
1638 | TrackUse = true; |
1639 | return 0; |
1640 | } |
1641 | |
1642 | if (isa<GetElementPtrInst>(I)) { |
1643 | TrackUse = true; |
1644 | return 0; |
1645 | } |
1646 | |
1647 | int64_t Offset; |
1648 | const Value *Base = |
1649 | getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); |
1650 | if (Base) { |
1651 | if (Base == &AssociatedValue && |
1652 | getPointerOperand(I, /* AllowVolatile */ false) == UseV) { |
1653 | int64_t DerefBytes = |
1654 | (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; |
1655 | |
1656 | IsNonNull |= !NullPointerIsDefined; |
1657 | return std::max(int64_t(0), DerefBytes); |
1658 | } |
1659 | } |
1660 | |
1661 | /// Corner case when an offset is 0. |
1662 | Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, |
1663 | /*AllowNonInbounds*/ true); |
1664 | if (Base) { |
1665 | if (Offset == 0 && Base == &AssociatedValue && |
1666 | getPointerOperand(I, /* AllowVolatile */ false) == UseV) { |
1667 | int64_t DerefBytes = |
1668 | (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); |
1669 | IsNonNull |= !NullPointerIsDefined; |
1670 | return std::max(int64_t(0), DerefBytes); |
1671 | } |
1672 | } |
1673 | |
1674 | return 0; |
1675 | } |
1676 | |
1677 | struct AANonNullImpl : AANonNull { |
1678 | AANonNullImpl(const IRPosition &IRP, Attributor &A) |
1679 | : AANonNull(IRP, A), |
1680 | NullIsDefined(NullPointerIsDefined( |
1681 | getAnchorScope(), |
1682 | getAssociatedValue().getType()->getPointerAddressSpace())) {} |
1683 | |
1684 | /// See AbstractAttribute::initialize(...). |
1685 | void initialize(Attributor &A) override { |
1686 | Value &V = getAssociatedValue(); |
1687 | if (!NullIsDefined && |
1688 | hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, |
1689 | /* IgnoreSubsumingPositions */ false, &A)) { |
1690 | indicateOptimisticFixpoint(); |
1691 | return; |
1692 | } |
1693 | |
1694 | if (isa<ConstantPointerNull>(V)) { |
1695 | indicatePessimisticFixpoint(); |
1696 | return; |
1697 | } |
1698 | |
1699 | AANonNull::initialize(A); |
1700 | |
1701 | bool CanBeNull = true; |
1702 | if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) { |
1703 | if (!CanBeNull) { |
1704 | indicateOptimisticFixpoint(); |
1705 | return; |
1706 | } |
1707 | } |
1708 | |
1709 | if (isa<GlobalValue>(&getAssociatedValue())) { |
1710 | indicatePessimisticFixpoint(); |
1711 | return; |
1712 | } |
1713 | |
1714 | if (Instruction *CtxI = getCtxI()) |
1715 | followUsesInMBEC(*this, A, getState(), *CtxI); |
1716 | } |
1717 | |
1718 | /// See followUsesInMBEC |
1719 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
1720 | AANonNull::StateType &State) { |
1721 | bool IsNonNull = false; |
1722 | bool TrackUse = false; |
1723 | getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, |
1724 | IsNonNull, TrackUse); |
1725 | State.setKnown(IsNonNull); |
1726 | return TrackUse; |
1727 | } |
1728 | |
1729 | /// See AbstractAttribute::getAsStr(). |
1730 | const std::string getAsStr() const override { |
1731 | return getAssumed() ? "nonnull" : "may-null"; |
1732 | } |
1733 | |
1734 | /// Flag to determine if the underlying value can be null and still allow |
1735 | /// valid accesses. |
1736 | const bool NullIsDefined; |
1737 | }; |
1738 | |
1739 | /// NonNull attribute for a floating value. |
1740 | struct AANonNullFloating : public AANonNullImpl { |
1741 | AANonNullFloating(const IRPosition &IRP, Attributor &A) |
1742 | : AANonNullImpl(IRP, A) {} |
1743 | |
1744 | /// See AbstractAttribute::updateImpl(...). |
1745 | ChangeStatus updateImpl(Attributor &A) override { |
1746 | const DataLayout &DL = A.getDataLayout(); |
1747 | |
1748 | DominatorTree *DT = nullptr; |
1749 | AssumptionCache *AC = nullptr; |
1750 | InformationCache &InfoCache = A.getInfoCache(); |
1751 | if (const Function *Fn = getAnchorScope()) { |
1752 | DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); |
1753 | AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); |
1754 | } |
1755 | |
1756 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI, |
1757 | AANonNull::StateType &T, bool Stripped) -> bool { |
1758 | const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); |
1759 | if (!Stripped && this == &AA) { |
1760 | if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) |
1761 | T.indicatePessimisticFixpoint(); |
1762 | } else { |
1763 | // Use abstract attribute information. |
1764 | const AANonNull::StateType &NS = AA.getState(); |
1765 | T ^= NS; |
1766 | } |
1767 | return T.isValidState(); |
1768 | }; |
1769 | |
1770 | StateType T; |
1771 | if (!genericValueTraversal<AANonNull, StateType>( |
1772 | A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) |
1773 | return indicatePessimisticFixpoint(); |
1774 | |
1775 | return clampStateAndIndicateChange(getState(), T); |
1776 | } |
1777 | |
1778 | /// See AbstractAttribute::trackStatistics() |
1779 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor" , "NumIRFunctionReturn_nonnull", ("Number of " "function returns" " marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull ); } } |
1780 | }; |
1781 | |
1782 | /// NonNull attribute for function return value. |
1783 | struct AANonNullReturned final |
1784 | : AAReturnedFromReturnedValues<AANonNull, AANonNull> { |
1785 | AANonNullReturned(const IRPosition &IRP, Attributor &A) |
1786 | : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} |
1787 | |
1788 | /// See AbstractAttribute::getAsStr(). |
1789 | const std::string getAsStr() const override { |
1790 | return getAssumed() ? "nonnull" : "may-null"; |
1791 | } |
1792 | |
1793 | /// See AbstractAttribute::trackStatistics() |
1794 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor" , "NumIRFunctionReturn_nonnull", ("Number of " "function returns" " marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull ); } } |
1795 | }; |
1796 | |
1797 | /// NonNull attribute for function argument. |
1798 | struct AANonNullArgument final |
1799 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { |
1800 | AANonNullArgument(const IRPosition &IRP, Attributor &A) |
1801 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} |
1802 | |
1803 | /// See AbstractAttribute::trackStatistics() |
1804 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor" , "NumIRArguments_nonnull", ("Number of " "arguments" " marked '" "nonnull" "'")};; ++(NumIRArguments_nonnull); } } |
1805 | }; |
1806 | |
1807 | struct AANonNullCallSiteArgument final : AANonNullFloating { |
1808 | AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) |
1809 | : AANonNullFloating(IRP, A) {} |
1810 | |
1811 | /// See AbstractAttribute::trackStatistics() |
1812 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor" , "NumIRCSArguments_nonnull", ("Number of " "call site arguments" " marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); } } |
1813 | }; |
1814 | |
1815 | /// NonNull attribute for a call site return position. |
1816 | struct AANonNullCallSiteReturned final |
1817 | : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { |
1818 | AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) |
1819 | : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} |
1820 | |
1821 | /// See AbstractAttribute::trackStatistics() |
1822 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor" , "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '" "nonnull" "'")};; ++(NumIRCSReturn_nonnull); } } |
1823 | }; |
1824 | |
1825 | /// ------------------------ No-Recurse Attributes ---------------------------- |
1826 | |
1827 | struct AANoRecurseImpl : public AANoRecurse { |
1828 | AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} |
1829 | |
1830 | /// See AbstractAttribute::getAsStr() |
1831 | const std::string getAsStr() const override { |
1832 | return getAssumed() ? "norecurse" : "may-recurse"; |
1833 | } |
1834 | }; |
1835 | |
1836 | struct AANoRecurseFunction final : AANoRecurseImpl { |
1837 | AANoRecurseFunction(const IRPosition &IRP, Attributor &A) |
1838 | : AANoRecurseImpl(IRP, A) {} |
1839 | |
1840 | /// See AbstractAttribute::initialize(...). |
1841 | void initialize(Attributor &A) override { |
1842 | AANoRecurseImpl::initialize(A); |
1843 | if (const Function *F = getAnchorScope()) |
1844 | if (A.getInfoCache().getSccSize(*F) != 1) |
1845 | indicatePessimisticFixpoint(); |
1846 | } |
1847 | |
1848 | /// See AbstractAttribute::updateImpl(...). |
1849 | ChangeStatus updateImpl(Attributor &A) override { |
1850 | |
1851 | // If all live call sites are known to be no-recurse, we are as well. |
1852 | auto CallSitePred = [&](AbstractCallSite ACS) { |
1853 | const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( |
1854 | *this, IRPosition::function(*ACS.getInstruction()->getFunction()), |
1855 | /* TrackDependence */ false, DepClassTy::OPTIONAL); |
1856 | return NoRecurseAA.isKnownNoRecurse(); |
1857 | }; |
1858 | bool AllCallSitesKnown; |
1859 | if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { |
1860 | // If we know all call sites and all are known no-recurse, we are done. |
1861 | // If all known call sites, which might not be all that exist, are known |
1862 | // to be no-recurse, we are not done but we can continue to assume |
1863 | // no-recurse. If one of the call sites we have not visited will become |
1864 | // live, another update is triggered. |
1865 | if (AllCallSitesKnown) |
1866 | indicateOptimisticFixpoint(); |
1867 | return ChangeStatus::UNCHANGED; |
1868 | } |
1869 | |
1870 | // If the above check does not hold anymore we look at the calls. |
1871 | auto CheckForNoRecurse = [&](Instruction &I) { |
1872 | const auto &CB = cast<CallBase>(I); |
1873 | if (CB.hasFnAttr(Attribute::NoRecurse)) |
1874 | return true; |
1875 | |
1876 | const auto &NoRecurseAA = |
1877 | A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); |
1878 | if (!NoRecurseAA.isAssumedNoRecurse()) |
1879 | return false; |
1880 | |
1881 | // Recursion to the same function |
1882 | if (CB.getCalledFunction() == getAnchorScope()) |
1883 | return false; |
1884 | |
1885 | return true; |
1886 | }; |
1887 | |
1888 | if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) |
1889 | return indicatePessimisticFixpoint(); |
1890 | return ChangeStatus::UNCHANGED; |
1891 | } |
1892 | |
1893 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor" , "NumIRFunction_norecurse", ("Number of " "functions" " marked '" "norecurse" "'")};; ++(NumIRFunction_norecurse); } } |
1894 | }; |
1895 | |
1896 | /// NoRecurse attribute deduction for a call sites. |
1897 | struct AANoRecurseCallSite final : AANoRecurseImpl { |
1898 | AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) |
1899 | : AANoRecurseImpl(IRP, A) {} |
1900 | |
1901 | /// See AbstractAttribute::initialize(...). |
1902 | void initialize(Attributor &A) override { |
1903 | AANoRecurseImpl::initialize(A); |
1904 | Function *F = getAssociatedFunction(); |
1905 | if (!F || F->isDeclaration()) |
1906 | indicatePessimisticFixpoint(); |
1907 | } |
1908 | |
1909 | /// See AbstractAttribute::updateImpl(...). |
1910 | ChangeStatus updateImpl(Attributor &A) override { |
1911 | // TODO: Once we have call site specific value information we can provide |
1912 | // call site specific liveness information and then it makes |
1913 | // sense to specialize attributes for call sites arguments instead of |
1914 | // redirecting requests to the callee argument. |
1915 | Function *F = getAssociatedFunction(); |
1916 | const IRPosition &FnPos = IRPosition::function(*F); |
1917 | auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); |
1918 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
1919 | } |
1920 | |
1921 | /// See AbstractAttribute::trackStatistics() |
1922 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse" , ("Number of " "call site" " marked '" "norecurse" "'")};; ++ (NumIRCS_norecurse); }; } |
1923 | }; |
1924 | |
1925 | /// -------------------- Undefined-Behavior Attributes ------------------------ |
1926 | |
1927 | struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { |
1928 | AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) |
1929 | : AAUndefinedBehavior(IRP, A) {} |
1930 | |
1931 | /// See AbstractAttribute::updateImpl(...). |
1932 | // through a pointer (i.e. also branches etc.) |
1933 | ChangeStatus updateImpl(Attributor &A) override { |
1934 | const size_t UBPrevSize = KnownUBInsts.size(); |
1935 | const size_t NoUBPrevSize = AssumedNoUBInsts.size(); |
1936 | |
1937 | auto InspectMemAccessInstForUB = [&](Instruction &I) { |
1938 | // Skip instructions that are already saved. |
1939 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
1940 | return true; |
1941 | |
1942 | // If we reach here, we know we have an instruction |
1943 | // that accesses memory through a pointer operand, |
1944 | // for which getPointerOperand() should give it to us. |
1945 | const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); |
1946 | assert(PtrOp &&((PtrOp && "Expected pointer operand of memory accessing instruction" ) ? static_cast<void> (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1947, __PRETTY_FUNCTION__)) |
1947 | "Expected pointer operand of memory accessing instruction")((PtrOp && "Expected pointer operand of memory accessing instruction" ) ? static_cast<void> (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1947, __PRETTY_FUNCTION__)); |
1948 | |
1949 | // Either we stopped and the appropriate action was taken, |
1950 | // or we got back a simplified value to continue. |
1951 | Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); |
1952 | if (!SimplifiedPtrOp.hasValue()) |
1953 | return true; |
1954 | const Value *PtrOpVal = SimplifiedPtrOp.getValue(); |
1955 | |
1956 | // A memory access through a pointer is considered UB |
1957 | // only if the pointer has constant null value. |
1958 | // TODO: Expand it to not only check constant values. |
1959 | if (!isa<ConstantPointerNull>(PtrOpVal)) { |
1960 | AssumedNoUBInsts.insert(&I); |
1961 | return true; |
1962 | } |
1963 | const Type *PtrTy = PtrOpVal->getType(); |
1964 | |
1965 | // Because we only consider instructions inside functions, |
1966 | // assume that a parent function exists. |
1967 | const Function *F = I.getFunction(); |
1968 | |
1969 | // A memory access using constant null pointer is only considered UB |
1970 | // if null pointer is _not_ defined for the target platform. |
1971 | if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) |
1972 | AssumedNoUBInsts.insert(&I); |
1973 | else |
1974 | KnownUBInsts.insert(&I); |
1975 | return true; |
1976 | }; |
1977 | |
1978 | auto InspectBrInstForUB = [&](Instruction &I) { |
1979 | // A conditional branch instruction is considered UB if it has `undef` |
1980 | // condition. |
1981 | |
1982 | // Skip instructions that are already saved. |
1983 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
1984 | return true; |
1985 | |
1986 | // We know we have a branch instruction. |
1987 | auto BrInst = cast<BranchInst>(&I); |
1988 | |
1989 | // Unconditional branches are never considered UB. |
1990 | if (BrInst->isUnconditional()) |
1991 | return true; |
1992 | |
1993 | // Either we stopped and the appropriate action was taken, |
1994 | // or we got back a simplified value to continue. |
1995 | Optional<Value *> SimplifiedCond = |
1996 | stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); |
1997 | if (!SimplifiedCond.hasValue()) |
1998 | return true; |
1999 | AssumedNoUBInsts.insert(&I); |
2000 | return true; |
2001 | }; |
2002 | |
2003 | auto InspectCallSiteForUB = [&](Instruction &I) { |
2004 | // Check whether a callsite always cause UB or not |
2005 | |
2006 | // Skip instructions that are already saved. |
2007 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) |
2008 | return true; |
2009 | |
2010 | // Check nonnull and noundef argument attribute violation for each |
2011 | // callsite. |
2012 | CallBase &CB = cast<CallBase>(I); |
2013 | Function *Callee = CB.getCalledFunction(); |
2014 | if (!Callee) |
2015 | return true; |
2016 | for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) { |
2017 | // If current argument is known to be simplified to null pointer and the |
2018 | // corresponding argument position is known to have nonnull attribute, |
2019 | // the argument is poison. Furthermore, if the argument is poison and |
2020 | // the position is known to have noundef attriubte, this callsite is |
2021 | // considered UB. |
2022 | if (idx >= Callee->arg_size()) |
2023 | break; |
2024 | Value *ArgVal = CB.getArgOperand(idx); |
2025 | if (!ArgVal) |
2026 | continue; |
2027 | // Here, we handle three cases. |
2028 | // (1) Not having a value means it is dead. (we can replace the value |
2029 | // with undef) |
2030 | // (2) Simplified to undef. The argument violate noundef attriubte. |
2031 | // (3) Simplified to null pointer where known to be nonnull. |
2032 | // The argument is a poison value and violate noundef attribute. |
2033 | IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); |
2034 | auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, |
2035 | /* TrackDependence */ false); |
2036 | if (!NoUndefAA.isKnownNoUndef()) |
2037 | continue; |
2038 | auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( |
2039 | *this, IRPosition::value(*ArgVal), /* TrackDependence */ false); |
2040 | if (!ValueSimplifyAA.isKnown()) |
2041 | continue; |
2042 | Optional<Value *> SimplifiedVal = |
2043 | ValueSimplifyAA.getAssumedSimplifiedValue(A); |
2044 | if (!SimplifiedVal.hasValue() || |
2045 | isa<UndefValue>(*SimplifiedVal.getValue())) { |
2046 | KnownUBInsts.insert(&I); |
2047 | continue; |
2048 | } |
2049 | if (!ArgVal->getType()->isPointerTy() || |
2050 | !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) |
2051 | continue; |
2052 | auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, |
2053 | /* TrackDependence */ false); |
2054 | if (NonNullAA.isKnownNonNull()) |
2055 | KnownUBInsts.insert(&I); |
2056 | } |
2057 | return true; |
2058 | }; |
2059 | |
2060 | auto InspectReturnInstForUB = |
2061 | [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { |
2062 | // Check if a return instruction always cause UB or not |
2063 | // Note: It is guaranteed that the returned position of the anchor |
2064 | // scope has noundef attribute when this is called. |
2065 | // We also ensure the return position is not "assumed dead" |
2066 | // because the returned value was then potentially simplified to |
2067 | // `undef` in AAReturnedValues without removing the `noundef` |
2068 | // attribute yet. |
2069 | |
2070 | // When the returned position has noundef attriubte, UB occur in the |
2071 | // following cases. |
2072 | // (1) Returned value is known to be undef. |
2073 | // (2) The value is known to be a null pointer and the returned |
2074 | // position has nonnull attribute (because the returned value is |
2075 | // poison). |
2076 | bool FoundUB = false; |
2077 | if (isa<UndefValue>(V)) { |
2078 | FoundUB = true; |
2079 | } else { |
2080 | if (isa<ConstantPointerNull>(V)) { |
2081 | auto &NonNullAA = A.getAAFor<AANonNull>( |
2082 | *this, IRPosition::returned(*getAnchorScope()), |
2083 | /* TrackDependence */ false); |
2084 | if (NonNullAA.isKnownNonNull()) |
2085 | FoundUB = true; |
2086 | } |
2087 | } |
2088 | |
2089 | if (FoundUB) |
2090 | for (ReturnInst *RI : RetInsts) |
2091 | KnownUBInsts.insert(RI); |
2092 | return true; |
2093 | }; |
2094 | |
2095 | A.checkForAllInstructions(InspectMemAccessInstForUB, *this, |
2096 | {Instruction::Load, Instruction::Store, |
2097 | Instruction::AtomicCmpXchg, |
2098 | Instruction::AtomicRMW}, |
2099 | /* CheckBBLivenessOnly */ true); |
2100 | A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, |
2101 | /* CheckBBLivenessOnly */ true); |
2102 | A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this); |
2103 | |
2104 | // If the returned position of the anchor scope has noundef attriubte, check |
2105 | // all returned instructions. |
2106 | if (!getAnchorScope()->getReturnType()->isVoidTy()) { |
2107 | const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); |
2108 | if (!A.isAssumedDead(ReturnIRP, this, nullptr)) { |
2109 | auto &RetPosNoUndefAA = |
2110 | A.getAAFor<AANoUndef>(*this, ReturnIRP, |
2111 | /* TrackDependence */ false); |
2112 | if (RetPosNoUndefAA.isKnownNoUndef()) |
2113 | A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, |
2114 | *this); |
2115 | } |
2116 | } |
2117 | |
2118 | if (NoUBPrevSize != AssumedNoUBInsts.size() || |
2119 | UBPrevSize != KnownUBInsts.size()) |
2120 | return ChangeStatus::CHANGED; |
2121 | return ChangeStatus::UNCHANGED; |
2122 | } |
2123 | |
2124 | bool isKnownToCauseUB(Instruction *I) const override { |
2125 | return KnownUBInsts.count(I); |
2126 | } |
2127 | |
2128 | bool isAssumedToCauseUB(Instruction *I) const override { |
2129 | // In simple words, if an instruction is not in the assumed to _not_ |
2130 | // cause UB, then it is assumed UB (that includes those |
2131 | // in the KnownUBInsts set). The rest is boilerplate |
2132 | // is to ensure that it is one of the instructions we test |
2133 | // for UB. |
2134 | |
2135 | switch (I->getOpcode()) { |
2136 | case Instruction::Load: |
2137 | case Instruction::Store: |
2138 | case Instruction::AtomicCmpXchg: |
2139 | case Instruction::AtomicRMW: |
2140 | return !AssumedNoUBInsts.count(I); |
2141 | case Instruction::Br: { |
2142 | auto BrInst = cast<BranchInst>(I); |
2143 | if (BrInst->isUnconditional()) |
2144 | return false; |
2145 | return !AssumedNoUBInsts.count(I); |
2146 | } break; |
2147 | default: |
2148 | return false; |
2149 | } |
2150 | return false; |
2151 | } |
2152 | |
2153 | ChangeStatus manifest(Attributor &A) override { |
2154 | if (KnownUBInsts.empty()) |
2155 | return ChangeStatus::UNCHANGED; |
2156 | for (Instruction *I : KnownUBInsts) |
2157 | A.changeToUnreachableAfterManifest(I); |
2158 | return ChangeStatus::CHANGED; |
2159 | } |
2160 | |
2161 | /// See AbstractAttribute::getAsStr() |
2162 | const std::string getAsStr() const override { |
2163 | return getAssumed() ? "undefined-behavior" : "no-ub"; |
2164 | } |
2165 | |
2166 | /// Note: The correctness of this analysis depends on the fact that the |
2167 | /// following 2 sets will stop changing after some point. |
2168 | /// "Change" here means that their size changes. |
2169 | /// The size of each set is monotonically increasing |
2170 | /// (we only add items to them) and it is upper bounded by the number of |
2171 | /// instructions in the processed function (we can never save more |
2172 | /// elements in either set than this number). Hence, at some point, |
2173 | /// they will stop increasing. |
2174 | /// Consequently, at some point, both sets will have stopped |
2175 | /// changing, effectively making the analysis reach a fixpoint. |
2176 | |
2177 | /// Note: These 2 sets are disjoint and an instruction can be considered |
2178 | /// one of 3 things: |
2179 | /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in |
2180 | /// the KnownUBInsts set. |
2181 | /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior |
2182 | /// has a reason to assume it). |
2183 | /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior |
2184 | /// could not find a reason to assume or prove that it can cause UB, |
2185 | /// hence it assumes it doesn't. We have a set for these instructions |
2186 | /// so that we don't reprocess them in every update. |
2187 | /// Note however that instructions in this set may cause UB. |
2188 | |
2189 | protected: |
2190 | /// A set of all live instructions _known_ to cause UB. |
2191 | SmallPtrSet<Instruction *, 8> KnownUBInsts; |
2192 | |
2193 | private: |
2194 | /// A set of all the (live) instructions that are assumed to _not_ cause UB. |
2195 | SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; |
2196 | |
2197 | // Should be called on updates in which if we're processing an instruction |
2198 | // \p I that depends on a value \p V, one of the following has to happen: |
2199 | // - If the value is assumed, then stop. |
2200 | // - If the value is known but undef, then consider it UB. |
2201 | // - Otherwise, do specific processing with the simplified value. |
2202 | // We return None in the first 2 cases to signify that an appropriate |
2203 | // action was taken and the caller should stop. |
2204 | // Otherwise, we return the simplified value that the caller should |
2205 | // use for specific processing. |
2206 | Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, |
2207 | Instruction *I) { |
2208 | const auto &ValueSimplifyAA = |
2209 | A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); |
2210 | Optional<Value *> SimplifiedV = |
2211 | ValueSimplifyAA.getAssumedSimplifiedValue(A); |
2212 | if (!ValueSimplifyAA.isKnown()) { |
2213 | // Don't depend on assumed values. |
2214 | return llvm::None; |
2215 | } |
2216 | if (!SimplifiedV.hasValue()) { |
2217 | // If it is known (which we tested above) but it doesn't have a value, |
2218 | // then we can assume `undef` and hence the instruction is UB. |
2219 | KnownUBInsts.insert(I); |
2220 | return llvm::None; |
2221 | } |
2222 | Value *Val = SimplifiedV.getValue(); |
2223 | if (isa<UndefValue>(Val)) { |
2224 | KnownUBInsts.insert(I); |
2225 | return llvm::None; |
2226 | } |
2227 | return Val; |
2228 | } |
2229 | }; |
2230 | |
2231 | struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { |
2232 | AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) |
2233 | : AAUndefinedBehaviorImpl(IRP, A) {} |
2234 | |
2235 | /// See AbstractAttribute::trackStatistics() |
2236 | void trackStatistics() const override { |
2237 | STATS_DECL(UndefinedBehaviorInstruction, Instruction,static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction = {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction" , "Number of instructions known to have UB"};; |
2238 | "Number of instructions known to have UB")static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction = {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction" , "Number of instructions known to have UB"};;; |
2239 | BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction)NumIRInstruction_UndefinedBehaviorInstruction += |
2240 | KnownUBInsts.size(); |
2241 | } |
2242 | }; |
2243 | |
2244 | /// ------------------------ Will-Return Attributes ---------------------------- |
2245 | |
2246 | // Helper function that checks whether a function has any cycle which we don't |
2247 | // know if it is bounded or not. |
2248 | // Loops with maximum trip count are considered bounded, any other cycle not. |
2249 | static bool mayContainUnboundedCycle(Function &F, Attributor &A) { |
2250 | ScalarEvolution *SE = |
2251 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); |
2252 | LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); |
2253 | // If either SCEV or LoopInfo is not available for the function then we assume |
2254 | // any cycle to be unbounded cycle. |
2255 | // We use scc_iterator which uses Tarjan algorithm to find all the maximal |
2256 | // SCCs.To detect if there's a cycle, we only need to find the maximal ones. |
2257 | if (!SE || !LI) { |
2258 | for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) |
2259 | if (SCCI.hasCycle()) |
2260 | return true; |
2261 | return false; |
2262 | } |
2263 | |
2264 | // If there's irreducible control, the function may contain non-loop cycles. |
2265 | if (mayContainIrreducibleControl(F, LI)) |
2266 | return true; |
2267 | |
2268 | // Any loop that does not have a max trip count is considered unbounded cycle. |
2269 | for (auto *L : LI->getLoopsInPreorder()) { |
2270 | if (!SE->getSmallConstantMaxTripCount(L)) |
2271 | return true; |
2272 | } |
2273 | return false; |
2274 | } |
2275 | |
2276 | struct AAWillReturnImpl : public AAWillReturn { |
2277 | AAWillReturnImpl(const IRPosition &IRP, Attributor &A) |
2278 | : AAWillReturn(IRP, A) {} |
2279 | |
2280 | /// See AbstractAttribute::initialize(...). |
2281 | void initialize(Attributor &A) override { |
2282 | AAWillReturn::initialize(A); |
2283 | |
2284 | Function *F = getAnchorScope(); |
2285 | if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) |
2286 | indicatePessimisticFixpoint(); |
2287 | } |
2288 | |
2289 | /// See AbstractAttribute::updateImpl(...). |
2290 | ChangeStatus updateImpl(Attributor &A) override { |
2291 | auto CheckForWillReturn = [&](Instruction &I) { |
2292 | IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); |
2293 | const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); |
2294 | if (WillReturnAA.isKnownWillReturn()) |
2295 | return true; |
2296 | if (!WillReturnAA.isAssumedWillReturn()) |
2297 | return false; |
2298 | const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); |
2299 | return NoRecurseAA.isAssumedNoRecurse(); |
2300 | }; |
2301 | |
2302 | if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) |
2303 | return indicatePessimisticFixpoint(); |
2304 | |
2305 | return ChangeStatus::UNCHANGED; |
2306 | } |
2307 | |
2308 | /// See AbstractAttribute::getAsStr() |
2309 | const std::string getAsStr() const override { |
2310 | return getAssumed() ? "willreturn" : "may-noreturn"; |
2311 | } |
2312 | }; |
2313 | |
2314 | struct AAWillReturnFunction final : AAWillReturnImpl { |
2315 | AAWillReturnFunction(const IRPosition &IRP, Attributor &A) |
2316 | : AAWillReturnImpl(IRP, A) {} |
2317 | |
2318 | /// See AbstractAttribute::trackStatistics() |
2319 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor" , "NumIRFunction_willreturn", ("Number of " "functions" " marked '" "willreturn" "'")};; ++(NumIRFunction_willreturn); } } |
2320 | }; |
2321 | |
2322 | /// WillReturn attribute deduction for a call sites. |
2323 | struct AAWillReturnCallSite final : AAWillReturnImpl { |
2324 | AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) |
2325 | : AAWillReturnImpl(IRP, A) {} |
2326 | |
2327 | /// See AbstractAttribute::initialize(...). |
2328 | void initialize(Attributor &A) override { |
2329 | AAWillReturn::initialize(A); |
2330 | Function *F = getAssociatedFunction(); |
2331 | if (!F || !A.isFunctionIPOAmendable(*F)) |
2332 | indicatePessimisticFixpoint(); |
2333 | } |
2334 | |
2335 | /// See AbstractAttribute::updateImpl(...). |
2336 | ChangeStatus updateImpl(Attributor &A) override { |
2337 | // TODO: Once we have call site specific value information we can provide |
2338 | // call site specific liveness information and then it makes |
2339 | // sense to specialize attributes for call sites arguments instead of |
2340 | // redirecting requests to the callee argument. |
2341 | Function *F = getAssociatedFunction(); |
2342 | const IRPosition &FnPos = IRPosition::function(*F); |
2343 | auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); |
2344 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
2345 | } |
2346 | |
2347 | /// See AbstractAttribute::trackStatistics() |
2348 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn" , ("Number of " "call site" " marked '" "willreturn" "'")};; ++ (NumIRCS_willreturn); }; } |
2349 | }; |
2350 | |
2351 | /// -------------------AAReachability Attribute-------------------------- |
2352 | |
2353 | struct AAReachabilityImpl : AAReachability { |
2354 | AAReachabilityImpl(const IRPosition &IRP, Attributor &A) |
2355 | : AAReachability(IRP, A) {} |
2356 | |
2357 | const std::string getAsStr() const override { |
2358 | // TODO: Return the number of reachable queries. |
2359 | return "reachable"; |
2360 | } |
2361 | |
2362 | /// See AbstractAttribute::initialize(...). |
2363 | void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } |
2364 | |
2365 | /// See AbstractAttribute::updateImpl(...). |
2366 | ChangeStatus updateImpl(Attributor &A) override { |
2367 | return indicatePessimisticFixpoint(); |
2368 | } |
2369 | }; |
2370 | |
2371 | struct AAReachabilityFunction final : public AAReachabilityImpl { |
2372 | AAReachabilityFunction(const IRPosition &IRP, Attributor &A) |
2373 | : AAReachabilityImpl(IRP, A) {} |
2374 | |
2375 | /// See AbstractAttribute::trackStatistics() |
2376 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable){ static llvm::Statistic NumIRFunction_reachable = {"attributor" , "NumIRFunction_reachable", ("Number of " "functions" " marked '" "reachable" "'")};; ++(NumIRFunction_reachable); }; } |
2377 | }; |
2378 | |
2379 | /// ------------------------ NoAlias Argument Attribute ------------------------ |
2380 | |
2381 | struct AANoAliasImpl : AANoAlias { |
2382 | AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { |
2383 | assert(getAssociatedType()->isPointerTy() &&((getAssociatedType()->isPointerTy() && "Noalias is a pointer attribute" ) ? static_cast<void> (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 2384, __PRETTY_FUNCTION__)) |
2384 | "Noalias is a pointer attribute")((getAssociatedType()->isPointerTy() && "Noalias is a pointer attribute" ) ? static_cast<void> (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 2384, __PRETTY_FUNCTION__)); |
2385 | } |
2386 | |
2387 | const std::string getAsStr() const override { |
2388 | return getAssumed() ? "noalias" : "may-alias"; |
2389 | } |
2390 | }; |
2391 | |
2392 | /// NoAlias attribute for a floating value. |
2393 | struct AANoAliasFloating final : AANoAliasImpl { |
2394 | AANoAliasFloating(const IRPosition &IRP, Attributor &A) |
2395 | : AANoAliasImpl(IRP, A) {} |
2396 | |
2397 | /// See AbstractAttribute::initialize(...). |
2398 | void initialize(Attributor &A) override { |
2399 | AANoAliasImpl::initialize(A); |
2400 | Value *Val = &getAssociatedValue(); |
2401 | do { |
2402 | CastInst *CI = dyn_cast<CastInst>(Val); |
2403 | if (!CI) |
2404 | break; |
2405 | Value *Base = CI->getOperand(0); |
2406 | if (!Base->hasOneUse()) |
2407 | break; |
2408 | Val = Base; |
2409 | } while (true); |
2410 | |
2411 | if (!Val->getType()->isPointerTy()) { |
2412 | indicatePessimisticFixpoint(); |
2413 | return; |
2414 | } |
2415 | |
2416 | if (isa<AllocaInst>(Val)) |
2417 | indicateOptimisticFixpoint(); |
2418 | else if (isa<ConstantPointerNull>(Val) && |
2419 | !NullPointerIsDefined(getAnchorScope(), |
2420 | Val->getType()->getPointerAddressSpace())) |
2421 | indicateOptimisticFixpoint(); |
2422 | else if (Val != &getAssociatedValue()) { |
2423 | const auto &ValNoAliasAA = |
2424 | A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); |
2425 | if (ValNoAliasAA.isKnownNoAlias()) |
2426 | indicateOptimisticFixpoint(); |
2427 | } |
2428 | } |
2429 | |
2430 | /// See AbstractAttribute::updateImpl(...). |
2431 | ChangeStatus updateImpl(Attributor &A) override { |
2432 | // TODO: Implement this. |
2433 | return indicatePessimisticFixpoint(); |
2434 | } |
2435 | |
2436 | /// See AbstractAttribute::trackStatistics() |
2437 | void trackStatistics() const override { |
2438 | STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor" , "NumIRFloating_noalias", ("Number of floating values known to be '" "noalias" "'")};; ++(NumIRFloating_noalias); } |
2439 | } |
2440 | }; |
2441 | |
2442 | /// NoAlias attribute for an argument. |
2443 | struct AANoAliasArgument final |
2444 | : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { |
2445 | using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; |
2446 | AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
2447 | |
2448 | /// See AbstractAttribute::initialize(...). |
2449 | void initialize(Attributor &A) override { |
2450 | Base::initialize(A); |
2451 | // See callsite argument attribute and callee argument attribute. |
2452 | if (hasAttr({Attribute::ByVal})) |
2453 | indicateOptimisticFixpoint(); |
2454 | } |
2455 | |
2456 | /// See AbstractAttribute::update(...). |
2457 | ChangeStatus updateImpl(Attributor &A) override { |
2458 | // We have to make sure no-alias on the argument does not break |
2459 | // synchronization when this is a callback argument, see also [1] below. |
2460 | // If synchronization cannot be affected, we delegate to the base updateImpl |
2461 | // function, otherwise we give up for now. |
2462 | |
2463 | // If the function is no-sync, no-alias cannot break synchronization. |
2464 | const auto &NoSyncAA = A.getAAFor<AANoSync>( |
2465 | *this, IRPosition::function_scope(getIRPosition())); |
2466 | if (NoSyncAA.isAssumedNoSync()) |
2467 | return Base::updateImpl(A); |
2468 | |
2469 | // If the argument is read-only, no-alias cannot break synchronization. |
2470 | const auto &MemBehaviorAA = |
2471 | A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); |
2472 | if (MemBehaviorAA.isAssumedReadOnly()) |
2473 | return Base::updateImpl(A); |
2474 | |
2475 | // If the argument is never passed through callbacks, no-alias cannot break |
2476 | // synchronization. |
2477 | bool AllCallSitesKnown; |
2478 | if (A.checkForAllCallSites( |
2479 | [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, |
2480 | true, AllCallSitesKnown)) |
2481 | return Base::updateImpl(A); |
2482 | |
2483 | // TODO: add no-alias but make sure it doesn't break synchronization by |
2484 | // introducing fake uses. See: |
2485 | // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, |
2486 | // International Workshop on OpenMP 2018, |
2487 | // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf |
2488 | |
2489 | return indicatePessimisticFixpoint(); |
2490 | } |
2491 | |
2492 | /// See AbstractAttribute::trackStatistics() |
2493 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor" , "NumIRArguments_noalias", ("Number of " "arguments" " marked '" "noalias" "'")};; ++(NumIRArguments_noalias); } } |
2494 | }; |
2495 | |
2496 | struct AANoAliasCallSiteArgument final : AANoAliasImpl { |
2497 | AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) |
2498 | : AANoAliasImpl(IRP, A) {} |
2499 | |
2500 | /// See AbstractAttribute::initialize(...). |
2501 | void initialize(Attributor &A) override { |
2502 | // See callsite argument attribute and callee argument attribute. |
2503 | const auto &CB = cast<CallBase>(getAnchorValue()); |
2504 | if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) |
2505 | indicateOptimisticFixpoint(); |
2506 | Value &Val = getAssociatedValue(); |
2507 | if (isa<ConstantPointerNull>(Val) && |
2508 | !NullPointerIsDefined(getAnchorScope(), |
2509 | Val.getType()->getPointerAddressSpace())) |
2510 | indicateOptimisticFixpoint(); |
2511 | } |
2512 | |
2513 | /// Determine if the underlying value may alias with the call site argument |
2514 | /// \p OtherArgNo of \p ICS (= the underlying call site). |
2515 | bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, |
2516 | const AAMemoryBehavior &MemBehaviorAA, |
2517 | const CallBase &CB, unsigned OtherArgNo) { |
2518 | // We do not need to worry about aliasing with the underlying IRP. |
2519 | if (this->getCalleeArgNo() == (int)OtherArgNo) |
2520 | return false; |
2521 | |
2522 | // If it is not a pointer or pointer vector we do not alias. |
2523 | const Value *ArgOp = CB.getArgOperand(OtherArgNo); |
2524 | if (!ArgOp->getType()->isPtrOrPtrVectorTy()) |
2525 | return false; |
2526 | |
2527 | auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( |
2528 | *this, IRPosition::callsite_argument(CB, OtherArgNo), |
2529 | /* TrackDependence */ false); |
2530 | |
2531 | // If the argument is readnone, there is no read-write aliasing. |
2532 | if (CBArgMemBehaviorAA.isAssumedReadNone()) { |
2533 | A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); |
2534 | return false; |
2535 | } |
2536 | |
2537 | // If the argument is readonly and the underlying value is readonly, there |
2538 | // is no read-write aliasing. |
2539 | bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); |
2540 | if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { |
2541 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); |
2542 | A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); |
2543 | return false; |
2544 | } |
2545 | |
2546 | // We have to utilize actual alias analysis queries so we need the object. |
2547 | if (!AAR) |
2548 | AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); |
2549 | |
2550 | // Try to rule it out at the call site. |
2551 | bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); |
2552 | LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) |
2553 | "callsite arguments: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) |
2554 | << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) |
2555 | << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false); |
2556 | |
2557 | return IsAliasing; |
2558 | } |
2559 | |
2560 | bool |
2561 | isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, |
2562 | const AAMemoryBehavior &MemBehaviorAA, |
2563 | const AANoAlias &NoAliasAA) { |
2564 | // We can deduce "noalias" if the following conditions hold. |
2565 | // (i) Associated value is assumed to be noalias in the definition. |
2566 | // (ii) Associated value is assumed to be no-capture in all the uses |
2567 | // possibly executed before this callsite. |
2568 | // (iii) There is no other pointer argument which could alias with the |
2569 | // value. |
2570 | |
2571 | bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); |
2572 | if (!AssociatedValueIsNoAliasAtDef) { |
2573 | LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue () << " is not no-alias at the definition\n"; } } while (false) |
2574 | << " is not no-alias at the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue () << " is not no-alias at the definition\n"; } } while (false); |
2575 | return false; |
2576 | } |
2577 | |
2578 | A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); |
2579 | |
2580 | const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); |
2581 | const Function *ScopeFn = VIRP.getAnchorScope(); |
2582 | auto &NoCaptureAA = |
2583 | A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); |
2584 | // Check whether the value is captured in the scope using AANoCapture. |
2585 | // Look at CFG and check only uses possibly executed before this |
2586 | // callsite. |
2587 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { |
2588 | Instruction *UserI = cast<Instruction>(U.getUser()); |
2589 | |
2590 | // If UserI is the curr instruction and there is a single potential use of |
2591 | // the value in UserI we allow the use. |
2592 | // TODO: We should inspect the operands and allow those that cannot alias |
2593 | // with the value. |
2594 | if (UserI == getCtxI() && UserI->getNumOperands() == 1) |
2595 | return true; |
2596 | |
2597 | if (ScopeFn) { |
2598 | const auto &ReachabilityAA = |
2599 | A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); |
2600 | |
2601 | if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) |
2602 | return true; |
2603 | |
2604 | if (auto *CB = dyn_cast<CallBase>(UserI)) { |
2605 | if (CB->isArgOperand(&U)) { |
2606 | |
2607 | unsigned ArgNo = CB->getArgOperandNo(&U); |
2608 | |
2609 | const auto &NoCaptureAA = A.getAAFor<AANoCapture>( |
2610 | *this, IRPosition::callsite_argument(*CB, ArgNo)); |
2611 | |
2612 | if (NoCaptureAA.isAssumedNoCapture()) |
2613 | return true; |
2614 | } |
2615 | } |
2616 | } |
2617 | |
2618 | // For cases which can potentially have more users |
2619 | if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || |
2620 | isa<SelectInst>(U)) { |
2621 | Follow = true; |
2622 | return true; |
2623 | } |
2624 | |
2625 | LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"; } } while (false); |
2626 | return false; |
2627 | }; |
2628 | |
2629 | if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { |
2630 | if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { |
2631 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false) |
2632 | dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false) |
2633 | << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false); |
2634 | return false; |
2635 | } |
2636 | } |
2637 | A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); |
2638 | |
2639 | // Check there is no other pointer argument which could alias with the |
2640 | // value passed at this call site. |
2641 | // TODO: AbstractCallSite |
2642 | const auto &CB = cast<CallBase>(getAnchorValue()); |
2643 | for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); |
2644 | OtherArgNo++) |
2645 | if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) |
2646 | return false; |
2647 | |
2648 | return true; |
2649 | } |
2650 | |
2651 | /// See AbstractAttribute::updateImpl(...). |
2652 | ChangeStatus updateImpl(Attributor &A) override { |
2653 | // If the argument is readnone we are done as there are no accesses via the |
2654 | // argument. |
2655 | auto &MemBehaviorAA = |
2656 | A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), |
2657 | /* TrackDependence */ false); |
2658 | if (MemBehaviorAA.isAssumedReadNone()) { |
2659 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); |
2660 | return ChangeStatus::UNCHANGED; |
2661 | } |
2662 | |
2663 | const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); |
2664 | const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, |
2665 | /* TrackDependence */ false); |
2666 | |
2667 | AAResults *AAR = nullptr; |
2668 | if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, |
2669 | NoAliasAA)) { |
2670 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n" ; } } while (false) |
2671 | dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n" ; } } while (false); |
2672 | return ChangeStatus::UNCHANGED; |
2673 | } |
2674 | |
2675 | return indicatePessimisticFixpoint(); |
2676 | } |
2677 | |
2678 | /// See AbstractAttribute::trackStatistics() |
2679 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor" , "NumIRCSArguments_noalias", ("Number of " "call site arguments" " marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); } } |
2680 | }; |
2681 | |
2682 | /// NoAlias attribute for function return value. |
2683 | struct AANoAliasReturned final : AANoAliasImpl { |
2684 | AANoAliasReturned(const IRPosition &IRP, Attributor &A) |
2685 | : AANoAliasImpl(IRP, A) {} |
2686 | |
2687 | /// See AbstractAttribute::initialize(...). |
2688 | void initialize(Attributor &A) override { |
2689 | AANoAliasImpl::initialize(A); |
2690 | Function *F = getAssociatedFunction(); |
2691 | if (!F || F->isDeclaration()) |
2692 | indicatePessimisticFixpoint(); |
2693 | } |
2694 | |
2695 | /// See AbstractAttribute::updateImpl(...). |
2696 | virtual ChangeStatus updateImpl(Attributor &A) override { |
2697 | |
2698 | auto CheckReturnValue = [&](Value &RV) -> bool { |
2699 | if (Constant *C = dyn_cast<Constant>(&RV)) |
2700 | if (C->isNullValue() || isa<UndefValue>(C)) |
2701 | return true; |
2702 | |
2703 | /// For now, we can only deduce noalias if we have call sites. |
2704 | /// FIXME: add more support. |
2705 | if (!isa<CallBase>(&RV)) |
2706 | return false; |
2707 | |
2708 | const IRPosition &RVPos = IRPosition::value(RV); |
2709 | const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); |
2710 | if (!NoAliasAA.isAssumedNoAlias()) |
2711 | return false; |
2712 | |
2713 | const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); |
2714 | return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); |
2715 | }; |
2716 | |
2717 | if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) |
2718 | return indicatePessimisticFixpoint(); |
2719 | |
2720 | return ChangeStatus::UNCHANGED; |
2721 | } |
2722 | |
2723 | /// See AbstractAttribute::trackStatistics() |
2724 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor" , "NumIRFunctionReturn_noalias", ("Number of " "function returns" " marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias ); } } |
2725 | }; |
2726 | |
2727 | /// NoAlias attribute deduction for a call site return value. |
2728 | struct AANoAliasCallSiteReturned final : AANoAliasImpl { |
2729 | AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) |
2730 | : AANoAliasImpl(IRP, A) {} |
2731 | |
2732 | /// See AbstractAttribute::initialize(...). |
2733 | void initialize(Attributor &A) override { |
2734 | AANoAliasImpl::initialize(A); |
2735 | Function *F = getAssociatedFunction(); |
2736 | if (!F || F->isDeclaration()) |
2737 | indicatePessimisticFixpoint(); |
2738 | } |
2739 | |
2740 | /// See AbstractAttribute::updateImpl(...). |
2741 | ChangeStatus updateImpl(Attributor &A) override { |
2742 | // TODO: Once we have call site specific value information we can provide |
2743 | // call site specific liveness information and then it makes |
2744 | // sense to specialize attributes for call sites arguments instead of |
2745 | // redirecting requests to the callee argument. |
2746 | Function *F = getAssociatedFunction(); |
2747 | const IRPosition &FnPos = IRPosition::returned(*F); |
2748 | auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); |
2749 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
2750 | } |
2751 | |
2752 | /// See AbstractAttribute::trackStatistics() |
2753 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor" , "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '" "noalias" "'")};; ++(NumIRCSReturn_noalias); }; } |
2754 | }; |
2755 | |
2756 | /// -------------------AAIsDead Function Attribute----------------------- |
2757 | |
2758 | struct AAIsDeadValueImpl : public AAIsDead { |
2759 | AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} |
2760 | |
2761 | /// See AAIsDead::isAssumedDead(). |
2762 | bool isAssumedDead() const override { return getAssumed(); } |
2763 | |
2764 | /// See AAIsDead::isKnownDead(). |
2765 | bool isKnownDead() const override { return getKnown(); } |
2766 | |
2767 | /// See AAIsDead::isAssumedDead(BasicBlock *). |
2768 | bool isAssumedDead(const BasicBlock *BB) const override { return false; } |
2769 | |
2770 | /// See AAIsDead::isKnownDead(BasicBlock *). |
2771 | bool isKnownDead(const BasicBlock *BB) const override { return false; } |
2772 | |
2773 | /// See AAIsDead::isAssumedDead(Instruction *I). |
2774 | bool isAssumedDead(const Instruction *I) const override { |
2775 | return I == getCtxI() && isAssumedDead(); |
2776 | } |
2777 | |
2778 | /// See AAIsDead::isKnownDead(Instruction *I). |
2779 | bool isKnownDead(const Instruction *I) const override { |
2780 | return isAssumedDead(I) && getKnown(); |
2781 | } |
2782 | |
2783 | /// See AbstractAttribute::getAsStr(). |
2784 | const std::string getAsStr() const override { |
2785 | return isAssumedDead() ? "assumed-dead" : "assumed-live"; |
2786 | } |
2787 | |
2788 | /// Check if all uses are assumed dead. |
2789 | bool areAllUsesAssumedDead(Attributor &A, Value &V) { |
2790 | auto UsePred = [&](const Use &U, bool &Follow) { return false; }; |
2791 | // Explicitly set the dependence class to required because we want a long |
2792 | // chain of N dependent instructions to be considered live as soon as one is |
2793 | // without going through N update cycles. This is not required for |
2794 | // correctness. |
2795 | return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); |
2796 | } |
2797 | |
2798 | /// Determine if \p I is assumed to be side-effect free. |
2799 | bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { |
2800 | if (!I || wouldInstructionBeTriviallyDead(I)) |
2801 | return true; |
2802 | |
2803 | auto *CB = dyn_cast<CallBase>(I); |
2804 | if (!CB || isa<IntrinsicInst>(CB)) |
2805 | return false; |
2806 | |
2807 | const IRPosition &CallIRP = IRPosition::callsite_function(*CB); |
2808 | const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( |
2809 | *this, CallIRP, /* TrackDependence */ false); |
2810 | if (!NoUnwindAA.isAssumedNoUnwind()) |
2811 | return false; |
2812 | if (!NoUnwindAA.isKnownNoUnwind()) |
2813 | A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); |
2814 | |
2815 | const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( |
2816 | *this, CallIRP, /* TrackDependence */ false); |
2817 | if (MemBehaviorAA.isAssumedReadOnly()) { |
2818 | if (!MemBehaviorAA.isKnownReadOnly()) |
2819 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); |
2820 | return true; |
2821 | } |
2822 | return false; |
2823 | } |
2824 | }; |
2825 | |
2826 | struct AAIsDeadFloating : public AAIsDeadValueImpl { |
2827 | AAIsDeadFloating(const IRPosition &IRP, Attributor &A) |
2828 | : AAIsDeadValueImpl(IRP, A) {} |
2829 | |
2830 | /// See AbstractAttribute::initialize(...). |
2831 | void initialize(Attributor &A) override { |
2832 | if (isa<UndefValue>(getAssociatedValue())) { |
2833 | indicatePessimisticFixpoint(); |
2834 | return; |
2835 | } |
2836 | |
2837 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); |
2838 | if (!isAssumedSideEffectFree(A, I)) |
2839 | indicatePessimisticFixpoint(); |
2840 | } |
2841 | |
2842 | /// See AbstractAttribute::updateImpl(...). |
2843 | ChangeStatus updateImpl(Attributor &A) override { |
2844 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); |
2845 | if (!isAssumedSideEffectFree(A, I)) |
2846 | return indicatePessimisticFixpoint(); |
2847 | |
2848 | if (!areAllUsesAssumedDead(A, getAssociatedValue())) |
2849 | return indicatePessimisticFixpoint(); |
2850 | return ChangeStatus::UNCHANGED; |
2851 | } |
2852 | |
2853 | /// See AbstractAttribute::manifest(...). |
2854 | ChangeStatus manifest(Attributor &A) override { |
2855 | Value &V = getAssociatedValue(); |
2856 | if (auto *I = dyn_cast<Instruction>(&V)) { |
2857 | // If we get here we basically know the users are all dead. We check if |
2858 | // isAssumedSideEffectFree returns true here again because it might not be |
2859 | // the case and only the users are dead but the instruction (=call) is |
2860 | // still needed. |
2861 | if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { |
2862 | A.deleteAfterManifest(*I); |
2863 | return ChangeStatus::CHANGED; |
2864 | } |
2865 | } |
2866 | if (V.use_empty()) |
2867 | return ChangeStatus::UNCHANGED; |
2868 | |
2869 | bool UsedAssumedInformation = false; |
2870 | Optional<Constant *> C = |
2871 | A.getAssumedConstant(V, *this, UsedAssumedInformation); |
2872 | if (C.hasValue() && C.getValue()) |
2873 | return ChangeStatus::UNCHANGED; |
2874 | |
2875 | // Replace the value with undef as it is dead but keep droppable uses around |
2876 | // as they provide information we don't want to give up on just yet. |
2877 | UndefValue &UV = *UndefValue::get(V.getType()); |
2878 | bool AnyChange = |
2879 | A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); |
2880 | return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
2881 | } |
2882 | |
2883 | /// See AbstractAttribute::trackStatistics() |
2884 | void trackStatistics() const override { |
2885 | STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor" , "NumIRFloating_IsDead", ("Number of floating values known to be '" "IsDead" "'")};; ++(NumIRFloating_IsDead); } |
2886 | } |
2887 | }; |
2888 | |
2889 | struct AAIsDeadArgument : public AAIsDeadFloating { |
2890 | AAIsDeadArgument(const IRPosition &IRP, Attributor &A) |
2891 | : AAIsDeadFloating(IRP, A) {} |
2892 | |
2893 | /// See AbstractAttribute::initialize(...). |
2894 | void initialize(Attributor &A) override { |
2895 | if (!A.isFunctionIPOAmendable(*getAnchorScope())) |
2896 | indicatePessimisticFixpoint(); |
2897 | } |
2898 | |
2899 | /// See AbstractAttribute::manifest(...). |
2900 | ChangeStatus manifest(Attributor &A) override { |
2901 | ChangeStatus Changed = AAIsDeadFloating::manifest(A); |
2902 | Argument &Arg = *getAssociatedArgument(); |
2903 | if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) |
2904 | if (A.registerFunctionSignatureRewrite( |
2905 | Arg, /* ReplacementTypes */ {}, |
2906 | Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, |
2907 | Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { |
2908 | Arg.dropDroppableUses(); |
2909 | return ChangeStatus::CHANGED; |
2910 | } |
2911 | return Changed; |
2912 | } |
2913 | |
2914 | /// See AbstractAttribute::trackStatistics() |
2915 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor" , "NumIRArguments_IsDead", ("Number of " "arguments" " marked '" "IsDead" "'")};; ++(NumIRArguments_IsDead); } } |
2916 | }; |
2917 | |
2918 | struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { |
2919 | AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) |
2920 | : AAIsDeadValueImpl(IRP, A) {} |
2921 | |
2922 | /// See AbstractAttribute::initialize(...). |
2923 | void initialize(Attributor &A) override { |
2924 | if (isa<UndefValue>(getAssociatedValue())) |
2925 | indicatePessimisticFixpoint(); |
2926 | } |
2927 | |
2928 | /// See AbstractAttribute::updateImpl(...). |
2929 | ChangeStatus updateImpl(Attributor &A) override { |
2930 | // TODO: Once we have call site specific value information we can provide |
2931 | // call site specific liveness information and then it makes |
2932 | // sense to specialize attributes for call sites arguments instead of |
2933 | // redirecting requests to the callee argument. |
2934 | Argument *Arg = getAssociatedArgument(); |
2935 | if (!Arg) |
2936 | return indicatePessimisticFixpoint(); |
2937 | const IRPosition &ArgPos = IRPosition::argument(*Arg); |
2938 | auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); |
2939 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); |
2940 | } |
2941 | |
2942 | /// See AbstractAttribute::manifest(...). |
2943 | ChangeStatus manifest(Attributor &A) override { |
2944 | CallBase &CB = cast<CallBase>(getAnchorValue()); |
2945 | Use &U = CB.getArgOperandUse(getCallSiteArgNo()); |
2946 | assert(!isa<UndefValue>(U.get()) &&((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!" ) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 2947, __PRETTY_FUNCTION__)) |
2947 | "Expected undef values to be filtered out!")((!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!" ) ? static_cast<void> (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 2947, __PRETTY_FUNCTION__)); |
2948 | UndefValue &UV = *UndefValue::get(U->getType()); |
2949 | if (A.changeUseAfterManifest(U, UV)) |
2950 | return ChangeStatus::CHANGED; |
2951 | return ChangeStatus::UNCHANGED; |
2952 | } |
2953 | |
2954 | /// See AbstractAttribute::trackStatistics() |
2955 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor" , "NumIRCSArguments_IsDead", ("Number of " "call site arguments" " marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); } } |
2956 | }; |
2957 | |
2958 | struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { |
2959 | AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) |
2960 | : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} |
2961 | |
2962 | /// See AAIsDead::isAssumedDead(). |
2963 | bool isAssumedDead() const override { |
2964 | return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; |
2965 | } |
2966 | |
2967 | /// See AbstractAttribute::initialize(...). |
2968 | void initialize(Attributor &A) override { |
2969 | if (isa<UndefValue>(getAssociatedValue())) { |
2970 | indicatePessimisticFixpoint(); |
2971 | return; |
2972 | } |
2973 | |
2974 | // We track this separately as a secondary state. |
2975 | IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); |
2976 | } |
2977 | |
2978 | /// See AbstractAttribute::updateImpl(...). |
2979 | ChangeStatus updateImpl(Attributor &A) override { |
2980 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
2981 | if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { |
2982 | IsAssumedSideEffectFree = false; |
2983 | Changed = ChangeStatus::CHANGED; |
2984 | } |
2985 | |
2986 | if (!areAllUsesAssumedDead(A, getAssociatedValue())) |
2987 | return indicatePessimisticFixpoint(); |
2988 | return Changed; |
2989 | } |
2990 | |
2991 | /// See AbstractAttribute::trackStatistics() |
2992 | void trackStatistics() const override { |
2993 | if (IsAssumedSideEffectFree) |
2994 | STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor" , "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '" "IsDead" "'")};; ++(NumIRCSReturn_IsDead); } |
2995 | else |
2996 | STATS_DECLTRACK_CSRET_ATTR(UnusedResult){ static llvm::Statistic NumIRCSReturn_UnusedResult = {"attributor" , "NumIRCSReturn_UnusedResult", ("Number of " "call site returns" " marked '" "UnusedResult" "'")};; ++(NumIRCSReturn_UnusedResult ); } |
2997 | } |
2998 | |
2999 | /// See AbstractAttribute::getAsStr(). |
3000 | const std::string getAsStr() const override { |
3001 | return isAssumedDead() |
3002 | ? "assumed-dead" |
3003 | : (getAssumed() ? "assumed-dead-users" : "assumed-live"); |
3004 | } |
3005 | |
3006 | private: |
3007 | bool IsAssumedSideEffectFree; |
3008 | }; |
3009 | |
3010 | struct AAIsDeadReturned : public AAIsDeadValueImpl { |
3011 | AAIsDeadReturned(const IRPosition &IRP, Attributor &A) |
3012 | : AAIsDeadValueImpl(IRP, A) {} |
3013 | |
3014 | /// See AbstractAttribute::updateImpl(...). |
3015 | ChangeStatus updateImpl(Attributor &A) override { |
3016 | |
3017 | A.checkForAllInstructions([](Instruction &) { return true; }, *this, |
3018 | {Instruction::Ret}); |
3019 | |
3020 | auto PredForCallSite = [&](AbstractCallSite ACS) { |
3021 | if (ACS.isCallbackCall() || !ACS.getInstruction()) |
3022 | return false; |
3023 | return areAllUsesAssumedDead(A, *ACS.getInstruction()); |
3024 | }; |
3025 | |
3026 | bool AllCallSitesKnown; |
3027 | if (!A.checkForAllCallSites(PredForCallSite, *this, true, |
3028 | AllCallSitesKnown)) |
3029 | return indicatePessimisticFixpoint(); |
3030 | |
3031 | return ChangeStatus::UNCHANGED; |
3032 | } |
3033 | |
3034 | /// See AbstractAttribute::manifest(...). |
3035 | ChangeStatus manifest(Attributor &A) override { |
3036 | // TODO: Rewrite the signature to return void? |
3037 | bool AnyChange = false; |
3038 | UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); |
3039 | auto RetInstPred = [&](Instruction &I) { |
3040 | ReturnInst &RI = cast<ReturnInst>(I); |
3041 | if (!isa<UndefValue>(RI.getReturnValue())) |
3042 | AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); |
3043 | return true; |
3044 | }; |
3045 | A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); |
3046 | return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; |
3047 | } |
3048 | |
3049 | /// See AbstractAttribute::trackStatistics() |
3050 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor" , "NumIRFunctionReturn_IsDead", ("Number of " "function returns" " marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead); } } |
3051 | }; |
3052 | |
3053 | struct AAIsDeadFunction : public AAIsDead { |
3054 | AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} |
3055 | |
3056 | /// See AbstractAttribute::initialize(...). |
3057 | void initialize(Attributor &A) override { |
3058 | const Function *F = getAnchorScope(); |
3059 | if (F && !F->isDeclaration()) { |
3060 | // We only want to compute liveness once. If the function is not part of |
3061 | // the SCC, skip it. |
3062 | if (A.isRunOn(*const_cast<Function *>(F))) { |
3063 | ToBeExploredFrom.insert(&F->getEntryBlock().front()); |
3064 | assumeLive(A, F->getEntryBlock()); |
3065 | } else { |
3066 | indicatePessimisticFixpoint(); |
3067 | } |
3068 | } |
3069 | } |
3070 | |
3071 | /// See AbstractAttribute::getAsStr(). |
3072 | const std::string getAsStr() const override { |
3073 | return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + |
3074 | std::to_string(getAnchorScope()->size()) + "][#TBEP " + |
3075 | std::to_string(ToBeExploredFrom.size()) + "][#KDE " + |
3076 | std::to_string(KnownDeadEnds.size()) + "]"; |
3077 | } |
3078 | |
3079 | /// See AbstractAttribute::manifest(...). |
3080 | ChangeStatus manifest(Attributor &A) override { |
3081 | assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!" ) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3082, __PRETTY_FUNCTION__)) |
3082 | "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!" ) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3082, __PRETTY_FUNCTION__)); |
3083 | |
3084 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; |
3085 | Function &F = *getAnchorScope(); |
3086 | |
3087 | if (AssumedLiveBlocks.empty()) { |
3088 | A.deleteAfterManifest(F); |
3089 | return ChangeStatus::CHANGED; |
3090 | } |
3091 | |
3092 | // Flag to determine if we can change an invoke to a call assuming the |
3093 | // callee is nounwind. This is not possible if the personality of the |
3094 | // function allows to catch asynchronous exceptions. |
3095 | bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); |
3096 | |
3097 | KnownDeadEnds.set_union(ToBeExploredFrom); |
3098 | for (const Instruction *DeadEndI : KnownDeadEnds) { |
3099 | auto *CB = dyn_cast<CallBase>(DeadEndI); |
3100 | if (!CB) |
3101 | continue; |
3102 | const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( |
3103 | *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, |
3104 | DepClassTy::OPTIONAL); |
3105 | bool MayReturn = !NoReturnAA.isAssumedNoReturn(); |
3106 | if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) |
3107 | continue; |
3108 | |
3109 | if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) |
3110 | A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); |
3111 | else |
3112 | A.changeToUnreachableAfterManifest( |
3113 | const_cast<Instruction *>(DeadEndI->getNextNode())); |
3114 | HasChanged = ChangeStatus::CHANGED; |
3115 | } |
3116 | |
3117 | STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor" , "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted." };;; |
3118 | for (BasicBlock &BB : F) |
3119 | if (!AssumedLiveBlocks.count(&BB)) { |
3120 | A.deleteAfterManifest(BB); |
3121 | ++BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead; |
3122 | } |
3123 | |
3124 | return HasChanged; |
3125 | } |
3126 | |
3127 | /// See AbstractAttribute::updateImpl(...). |
3128 | ChangeStatus updateImpl(Attributor &A) override; |
3129 | |
3130 | bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { |
3131 | return !AssumedLiveEdges.count(std::make_pair(From, To)); |
3132 | } |
3133 | |
3134 | /// See AbstractAttribute::trackStatistics() |
3135 | void trackStatistics() const override {} |
3136 | |
3137 | /// Returns true if the function is assumed dead. |
3138 | bool isAssumedDead() const override { return false; } |
3139 | |
3140 | /// See AAIsDead::isKnownDead(). |
3141 | bool isKnownDead() const override { return false; } |
3142 | |
3143 | /// See AAIsDead::isAssumedDead(BasicBlock *). |
3144 | bool isAssumedDead(const BasicBlock *BB) const override { |
3145 | assert(BB->getParent() == getAnchorScope() &&((BB->getParent() == getAnchorScope() && "BB must be in the same anchor scope function." ) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3146, __PRETTY_FUNCTION__)) |
3146 | "BB must be in the same anchor scope function.")((BB->getParent() == getAnchorScope() && "BB must be in the same anchor scope function." ) ? static_cast<void> (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3146, __PRETTY_FUNCTION__)); |
3147 | |
3148 | if (!getAssumed()) |
3149 | return false; |
3150 | return !AssumedLiveBlocks.count(BB); |
3151 | } |
3152 | |
3153 | /// See AAIsDead::isKnownDead(BasicBlock *). |
3154 | bool isKnownDead(const BasicBlock *BB) const override { |
3155 | return getKnown() && isAssumedDead(BB); |
3156 | } |
3157 | |
3158 | /// See AAIsDead::isAssumed(Instruction *I). |
3159 | bool isAssumedDead(const Instruction *I) const override { |
3160 | assert(I->getParent()->getParent() == getAnchorScope() &&((I->getParent()->getParent() == getAnchorScope() && "Instruction must be in the same anchor scope function.") ? static_cast <void> (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3161, __PRETTY_FUNCTION__)) |
3161 | "Instruction must be in the same anchor scope function.")((I->getParent()->getParent() == getAnchorScope() && "Instruction must be in the same anchor scope function.") ? static_cast <void> (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3161, __PRETTY_FUNCTION__)); |
3162 | |
3163 | if (!getAssumed()) |
3164 | return false; |
3165 | |
3166 | // If it is not in AssumedLiveBlocks then it for sure dead. |
3167 | // Otherwise, it can still be after noreturn call in a live block. |
3168 | if (!AssumedLiveBlocks.count(I->getParent())) |
3169 | return true; |
3170 | |
3171 | // If it is not after a liveness barrier it is live. |
3172 | const Instruction *PrevI = I->getPrevNode(); |
3173 | while (PrevI) { |
3174 | if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) |
3175 | return true; |
3176 | PrevI = PrevI->getPrevNode(); |
3177 | } |
3178 | return false; |
3179 | } |
3180 | |
3181 | /// See AAIsDead::isKnownDead(Instruction *I). |
3182 | bool isKnownDead(const Instruction *I) const override { |
3183 | return getKnown() && isAssumedDead(I); |
3184 | } |
3185 | |
3186 | /// Assume \p BB is (partially) live now and indicate to the Attributor \p A |
3187 | /// that internal function called from \p BB should now be looked at. |
3188 | bool assumeLive(Attributor &A, const BasicBlock &BB) { |
3189 | if (!AssumedLiveBlocks.insert(&BB).second) |
3190 | return false; |
3191 | |
3192 | // We assume that all of BB is (probably) live now and if there are calls to |
3193 | // internal functions we will assume that those are now live as well. This |
3194 | // is a performance optimization for blocks with calls to a lot of internal |
3195 | // functions. It can however cause dead functions to be treated as live. |
3196 | for (const Instruction &I : BB) |
3197 | if (const auto *CB = dyn_cast<CallBase>(&I)) |
3198 | if (const Function *F = CB->getCalledFunction()) |
3199 | if (F->hasLocalLinkage()) |
3200 | A.markLiveInternalFunction(*F); |
3201 | return true; |
3202 | } |
3203 | |
3204 | /// Collection of instructions that need to be explored again, e.g., we |
3205 | /// did assume they do not transfer control to (one of their) successors. |
3206 | SmallSetVector<const Instruction *, 8> ToBeExploredFrom; |
3207 | |
3208 | /// Collection of instructions that are known to not transfer control. |
3209 | SmallSetVector<const Instruction *, 8> KnownDeadEnds; |
3210 | |
3211 | /// Collection of all assumed live edges |
3212 | DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; |
3213 | |
3214 | /// Collection of all assumed live BasicBlocks. |
3215 | DenseSet<const BasicBlock *> AssumedLiveBlocks; |
3216 | }; |
3217 | |
3218 | static bool |
3219 | identifyAliveSuccessors(Attributor &A, const CallBase &CB, |
3220 | AbstractAttribute &AA, |
3221 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
3222 | const IRPosition &IPos = IRPosition::callsite_function(CB); |
3223 | |
3224 | const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( |
3225 | AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); |
3226 | if (NoReturnAA.isAssumedNoReturn()) |
3227 | return !NoReturnAA.isKnownNoReturn(); |
3228 | if (CB.isTerminator()) |
3229 | AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); |
3230 | else |
3231 | AliveSuccessors.push_back(CB.getNextNode()); |
3232 | return false; |
3233 | } |
3234 | |
3235 | static bool |
3236 | identifyAliveSuccessors(Attributor &A, const InvokeInst &II, |
3237 | AbstractAttribute &AA, |
3238 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
3239 | bool UsedAssumedInformation = |
3240 | identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); |
3241 | |
3242 | // First, determine if we can change an invoke to a call assuming the |
3243 | // callee is nounwind. This is not possible if the personality of the |
3244 | // function allows to catch asynchronous exceptions. |
3245 | if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { |
3246 | AliveSuccessors.push_back(&II.getUnwindDest()->front()); |
3247 | } else { |
3248 | const IRPosition &IPos = IRPosition::callsite_function(II); |
3249 | const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( |
3250 | AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); |
3251 | if (AANoUnw.isAssumedNoUnwind()) { |
3252 | UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); |
3253 | } else { |
3254 | AliveSuccessors.push_back(&II.getUnwindDest()->front()); |
3255 | } |
3256 | } |
3257 | return UsedAssumedInformation; |
3258 | } |
3259 | |
3260 | static bool |
3261 | identifyAliveSuccessors(Attributor &A, const BranchInst &BI, |
3262 | AbstractAttribute &AA, |
3263 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
3264 | bool UsedAssumedInformation = false; |
3265 | if (BI.getNumSuccessors() == 1) { |
3266 | AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); |
3267 | } else { |
3268 | Optional<ConstantInt *> CI = getAssumedConstantInt( |
3269 | A, *BI.getCondition(), AA, UsedAssumedInformation); |
3270 | if (!CI.hasValue()) { |
3271 | // No value yet, assume both edges are dead. |
3272 | } else if (CI.getValue()) { |
3273 | const BasicBlock *SuccBB = |
3274 | BI.getSuccessor(1 - CI.getValue()->getZExtValue()); |
3275 | AliveSuccessors.push_back(&SuccBB->front()); |
3276 | } else { |
3277 | AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); |
3278 | AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); |
3279 | UsedAssumedInformation = false; |
3280 | } |
3281 | } |
3282 | return UsedAssumedInformation; |
3283 | } |
3284 | |
3285 | static bool |
3286 | identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, |
3287 | AbstractAttribute &AA, |
3288 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { |
3289 | bool UsedAssumedInformation = false; |
3290 | Optional<ConstantInt *> CI = |
3291 | getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); |
3292 | if (!CI.hasValue()) { |
3293 | // No value yet, assume all edges are dead. |
3294 | } else if (CI.getValue()) { |
3295 | for (auto &CaseIt : SI.cases()) { |
3296 | if (CaseIt.getCaseValue() == CI.getValue()) { |
3297 | AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); |
3298 | return UsedAssumedInformation; |
3299 | } |
3300 | } |
3301 | AliveSuccessors.push_back(&SI.getDefaultDest()->front()); |
3302 | return UsedAssumedInformation; |
3303 | } else { |
3304 | for (const BasicBlock *SuccBB : successors(SI.getParent())) |
3305 | AliveSuccessors.push_back(&SuccBB->front()); |
3306 | } |
3307 | return UsedAssumedInformation; |
3308 | } |
3309 | |
3310 | ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { |
3311 | ChangeStatus Change = ChangeStatus::UNCHANGED; |
3312 | |
3313 | LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) |
3314 | << getAnchorScope()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) |
3315 | << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) |
3316 | << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false); |
3317 | |
3318 | // Copy and clear the list of instructions we need to explore from. It is |
3319 | // refilled with instructions the next update has to look at. |
3320 | SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), |
3321 | ToBeExploredFrom.end()); |
3322 | decltype(ToBeExploredFrom) NewToBeExploredFrom; |
3323 | |
3324 | SmallVector<const Instruction *, 8> AliveSuccessors; |
3325 | while (!Worklist.empty()) { |
3326 | const Instruction *I = Worklist.pop_back_val(); |
3327 | LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"; } } while (false); |
3328 | |
3329 | // Fast forward for uninteresting instructions. We could look for UB here |
3330 | // though. |
3331 | while (!I->isTerminator() && !isa<CallBase>(I)) { |
3332 | Change = ChangeStatus::CHANGED; |
3333 | I = I->getNextNode(); |
3334 | } |
3335 | |
3336 | AliveSuccessors.clear(); |
3337 | |
3338 | bool UsedAssumedInformation = false; |
3339 | switch (I->getOpcode()) { |
3340 | // TODO: look for (assumed) UB to backwards propagate "deadness". |
3341 | default: |
3342 | assert(I->isTerminator() &&((I->isTerminator() && "Expected non-terminators to be handled already!" ) ? static_cast<void> (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3343, __PRETTY_FUNCTION__)) |
3343 | "Expected non-terminators to be handled already!")((I->isTerminator() && "Expected non-terminators to be handled already!" ) ? static_cast<void> (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3343, __PRETTY_FUNCTION__)); |
3344 | for (const BasicBlock *SuccBB : successors(I->getParent())) |
3345 | AliveSuccessors.push_back(&SuccBB->front()); |
3346 | break; |
3347 | case Instruction::Call: |
3348 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), |
3349 | *this, AliveSuccessors); |
3350 | break; |
3351 | case Instruction::Invoke: |
3352 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), |
3353 | *this, AliveSuccessors); |
3354 | break; |
3355 | case Instruction::Br: |
3356 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), |
3357 | *this, AliveSuccessors); |
3358 | break; |
3359 | case Instruction::Switch: |
3360 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), |
3361 | *this, AliveSuccessors); |
3362 | break; |
3363 | } |
3364 | |
3365 | if (UsedAssumedInformation) { |
3366 | NewToBeExploredFrom.insert(I); |
3367 | } else { |
3368 | Change = ChangeStatus::CHANGED; |
3369 | if (AliveSuccessors.empty() || |
3370 | (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) |
3371 | KnownDeadEnds.insert(I); |
3372 | } |
3373 | |
3374 | LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ) |
3375 | << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ) |
3376 | << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ); |
3377 | |
3378 | for (const Instruction *AliveSuccessor : AliveSuccessors) { |
3379 | if (!I->isTerminator()) { |
3380 | assert(AliveSuccessors.size() == 1 &&((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!" ) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3381, __PRETTY_FUNCTION__)) |
3381 | "Non-terminator expected to have a single successor!")((AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!" ) ? static_cast<void> (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3381, __PRETTY_FUNCTION__)); |
3382 | Worklist.push_back(AliveSuccessor); |
3383 | } else { |
3384 | // record the assumed live edge |
3385 | AssumedLiveEdges.insert( |
3386 | std::make_pair(I->getParent(), AliveSuccessor->getParent())); |
3387 | if (assumeLive(A, *AliveSuccessor->getParent())) |
3388 | Worklist.push_back(AliveSuccessor); |
3389 | } |
3390 | } |
3391 | } |
3392 | |
3393 | ToBeExploredFrom = std::move(NewToBeExploredFrom); |
3394 | |
3395 | // If we know everything is live there is no need to query for liveness. |
3396 | // Instead, indicating a pessimistic fixpoint will cause the state to be |
3397 | // "invalid" and all queries to be answered conservatively without lookups. |
3398 | // To be in this state we have to (1) finished the exploration and (3) not |
3399 | // discovered any non-trivial dead end and (2) not ruled unreachable code |
3400 | // dead. |
3401 | if (ToBeExploredFrom.empty() && |
3402 | getAnchorScope()->size() == AssumedLiveBlocks.size() && |
3403 | llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { |
3404 | return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; |
3405 | })) |
3406 | return indicatePessimisticFixpoint(); |
3407 | return Change; |
3408 | } |
3409 | |
3410 | /// Liveness information for a call sites. |
3411 | struct AAIsDeadCallSite final : AAIsDeadFunction { |
3412 | AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) |
3413 | : AAIsDeadFunction(IRP, A) {} |
3414 | |
3415 | /// See AbstractAttribute::initialize(...). |
3416 | void initialize(Attributor &A) override { |
3417 | // TODO: Once we have call site specific value information we can provide |
3418 | // call site specific liveness information and then it makes |
3419 | // sense to specialize attributes for call sites instead of |
3420 | // redirecting requests to the callee. |
3421 | llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not " "supported for call sites yet!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3422) |
3422 | "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not " "supported for call sites yet!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3422); |
3423 | } |
3424 | |
3425 | /// See AbstractAttribute::updateImpl(...). |
3426 | ChangeStatus updateImpl(Attributor &A) override { |
3427 | return indicatePessimisticFixpoint(); |
3428 | } |
3429 | |
3430 | /// See AbstractAttribute::trackStatistics() |
3431 | void trackStatistics() const override {} |
3432 | }; |
3433 | |
3434 | /// -------------------- Dereferenceable Argument Attribute -------------------- |
3435 | |
3436 | template <> |
3437 | ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, |
3438 | const DerefState &R) { |
3439 | ChangeStatus CS0 = |
3440 | clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); |
3441 | ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); |
3442 | return CS0 | CS1; |
3443 | } |
3444 | |
3445 | struct AADereferenceableImpl : AADereferenceable { |
3446 | AADereferenceableImpl(const IRPosition &IRP, Attributor &A) |
3447 | : AADereferenceable(IRP, A) {} |
3448 | using StateType = DerefState; |
3449 | |
3450 | /// See AbstractAttribute::initialize(...). |
3451 | void initialize(Attributor &A) override { |
3452 | SmallVector<Attribute, 4> Attrs; |
3453 | getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, |
3454 | Attrs, /* IgnoreSubsumingPositions */ false, &A); |
3455 | for (const Attribute &Attr : Attrs) |
3456 | takeKnownDerefBytesMaximum(Attr.getValueAsInt()); |
3457 | |
3458 | const IRPosition &IRP = this->getIRPosition(); |
3459 | NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, |
3460 | /* TrackDependence */ false); |
3461 | |
3462 | bool CanBeNull; |
3463 | takeKnownDerefBytesMaximum( |
3464 | IRP.getAssociatedValue().getPointerDereferenceableBytes( |
3465 | A.getDataLayout(), CanBeNull)); |
3466 | |
3467 | bool IsFnInterface = IRP.isFnInterfaceKind(); |
3468 | Function *FnScope = IRP.getAnchorScope(); |
3469 | if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { |
3470 | indicatePessimisticFixpoint(); |
3471 | return; |
3472 | } |
3473 | |
3474 | if (Instruction *CtxI = getCtxI()) |
3475 | followUsesInMBEC(*this, A, getState(), *CtxI); |
3476 | } |
3477 | |
3478 | /// See AbstractAttribute::getState() |
3479 | /// { |
3480 | StateType &getState() override { return *this; } |
3481 | const StateType &getState() const override { return *this; } |
3482 | /// } |
3483 | |
3484 | /// Helper function for collecting accessed bytes in must-be-executed-context |
3485 | void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, |
3486 | DerefState &State) { |
3487 | const Value *UseV = U->get(); |
3488 | if (!UseV->getType()->isPointerTy()) |
3489 | return; |
3490 | |
3491 | Type *PtrTy = UseV->getType(); |
3492 | const DataLayout &DL = A.getDataLayout(); |
3493 | int64_t Offset; |
3494 | if (const Value *Base = getBasePointerOfAccessPointerOperand( |
3495 | I, Offset, DL, /*AllowNonInbounds*/ true)) { |
3496 | if (Base == &getAssociatedValue() && |
3497 | getPointerOperand(I, /* AllowVolatile */ false) == UseV) { |
3498 | uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); |
3499 | State.addAccessedBytes(Offset, Size); |
3500 | } |
3501 | } |
3502 | } |
3503 | |
3504 | /// See followUsesInMBEC |
3505 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
3506 | AADereferenceable::StateType &State) { |
3507 | bool IsNonNull = false; |
3508 | bool TrackUse = false; |
3509 | int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( |
3510 | A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); |
3511 | LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes << " for instruction " << *I << "\n"; } } while (false) |
3512 | << " for instruction " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes << " for instruction " << *I << "\n"; } } while (false); |
3513 | |
3514 | addAccessedBytesForUse(A, U, I, State); |
3515 | State.takeKnownDerefBytesMaximum(DerefBytes); |
3516 | return TrackUse; |
3517 | } |
3518 | |
3519 | /// See AbstractAttribute::manifest(...). |
3520 | ChangeStatus manifest(Attributor &A) override { |
3521 | ChangeStatus Change = AADereferenceable::manifest(A); |
3522 | if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { |
3523 | removeAttrs({Attribute::DereferenceableOrNull}); |
3524 | return ChangeStatus::CHANGED; |
3525 | } |
3526 | return Change; |
3527 | } |
3528 | |
3529 | void getDeducedAttributes(LLVMContext &Ctx, |
3530 | SmallVectorImpl<Attribute> &Attrs) const override { |
3531 | // TODO: Add *_globally support |
3532 | if (isAssumedNonNull()) |
3533 | Attrs.emplace_back(Attribute::getWithDereferenceableBytes( |
3534 | Ctx, getAssumedDereferenceableBytes())); |
3535 | else |
3536 | Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( |
3537 | Ctx, getAssumedDereferenceableBytes())); |
3538 | } |
3539 | |
3540 | /// See AbstractAttribute::getAsStr(). |
3541 | const std::string getAsStr() const override { |
3542 | if (!getAssumedDereferenceableBytes()) |
3543 | return "unknown-dereferenceable"; |
3544 | return std::string("dereferenceable") + |
3545 | (isAssumedNonNull() ? "" : "_or_null") + |
3546 | (isAssumedGlobal() ? "_globally" : "") + "<" + |
3547 | std::to_string(getKnownDereferenceableBytes()) + "-" + |
3548 | std::to_string(getAssumedDereferenceableBytes()) + ">"; |
3549 | } |
3550 | }; |
3551 | |
3552 | /// Dereferenceable attribute for a floating value. |
3553 | struct AADereferenceableFloating : AADereferenceableImpl { |
3554 | AADereferenceableFloating(const IRPosition &IRP, Attributor &A) |
3555 | : AADereferenceableImpl(IRP, A) {} |
3556 | |
3557 | /// See AbstractAttribute::updateImpl(...). |
3558 | ChangeStatus updateImpl(Attributor &A) override { |
3559 | const DataLayout &DL = A.getDataLayout(); |
3560 | |
3561 | auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, |
3562 | bool Stripped) -> bool { |
3563 | unsigned IdxWidth = |
3564 | DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); |
3565 | APInt Offset(IdxWidth, 0); |
3566 | const Value *Base = |
3567 | stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); |
3568 | |
3569 | const auto &AA = |
3570 | A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); |
3571 | int64_t DerefBytes = 0; |
3572 | if (!Stripped && this == &AA) { |
3573 | // Use IR information if we did not strip anything. |
3574 | // TODO: track globally. |
3575 | bool CanBeNull; |
3576 | DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); |
3577 | T.GlobalState.indicatePessimisticFixpoint(); |
3578 | } else { |
3579 | const DerefState &DS = AA.getState(); |
3580 | DerefBytes = DS.DerefBytesState.getAssumed(); |
3581 | T.GlobalState &= DS.GlobalState; |
3582 | } |
3583 | |
3584 | // For now we do not try to "increase" dereferenceability due to negative |
3585 | // indices as we first have to come up with code to deal with loops and |
3586 | // for overflows of the dereferenceable bytes. |
3587 | int64_t OffsetSExt = Offset.getSExtValue(); |
3588 | if (OffsetSExt < 0) |
3589 | OffsetSExt = 0; |
3590 | |
3591 | T.takeAssumedDerefBytesMinimum( |
3592 | std::max(int64_t(0), DerefBytes - OffsetSExt)); |
3593 | |
3594 | if (this == &AA) { |
3595 | if (!Stripped) { |
3596 | // If nothing was stripped IR information is all we got. |
3597 | T.takeKnownDerefBytesMaximum( |
3598 | std::max(int64_t(0), DerefBytes - OffsetSExt)); |
3599 | T.indicatePessimisticFixpoint(); |
3600 | } else if (OffsetSExt > 0) { |
3601 | // If something was stripped but there is circular reasoning we look |
3602 | // for the offset. If it is positive we basically decrease the |
3603 | // dereferenceable bytes in a circluar loop now, which will simply |
3604 | // drive them down to the known value in a very slow way which we |
3605 | // can accelerate. |
3606 | T.indicatePessimisticFixpoint(); |
3607 | } |
3608 | } |
3609 | |
3610 | return T.isValidState(); |
3611 | }; |
3612 | |
3613 | DerefState T; |
3614 | if (!genericValueTraversal<AADereferenceable, DerefState>( |
3615 | A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) |
3616 | return indicatePessimisticFixpoint(); |
3617 | |
3618 | return clampStateAndIndicateChange(getState(), T); |
3619 | } |
3620 | |
3621 | /// See AbstractAttribute::trackStatistics() |
3622 | void trackStatistics() const override { |
3623 | STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor" , "NumIRFloating_dereferenceable", ("Number of floating values known to be '" "dereferenceable" "'")};; ++(NumIRFloating_dereferenceable); } |
3624 | } |
3625 | }; |
3626 | |
3627 | /// Dereferenceable attribute for a return value. |
3628 | struct AADereferenceableReturned final |
3629 | : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { |
3630 | AADereferenceableReturned(const IRPosition &IRP, Attributor &A) |
3631 | : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( |
3632 | IRP, A) {} |
3633 | |
3634 | /// See AbstractAttribute::trackStatistics() |
3635 | void trackStatistics() const override { |
3636 | STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable = {"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of " "function returns" " marked '" "dereferenceable" "'")};; ++( NumIRFunctionReturn_dereferenceable); } |
3637 | } |
3638 | }; |
3639 | |
3640 | /// Dereferenceable attribute for an argument |
3641 | struct AADereferenceableArgument final |
3642 | : AAArgumentFromCallSiteArguments<AADereferenceable, |
3643 | AADereferenceableImpl> { |
3644 | using Base = |
3645 | AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; |
3646 | AADereferenceableArgument(const IRPosition &IRP, Attributor &A) |
3647 | : Base(IRP, A) {} |
3648 | |
3649 | /// See AbstractAttribute::trackStatistics() |
3650 | void trackStatistics() const override { |
3651 | STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor" , "NumIRArguments_dereferenceable", ("Number of " "arguments" " marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable ); } |
3652 | } |
3653 | }; |
3654 | |
3655 | /// Dereferenceable attribute for a call site argument. |
3656 | struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { |
3657 | AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) |
3658 | : AADereferenceableFloating(IRP, A) {} |
3659 | |
3660 | /// See AbstractAttribute::trackStatistics() |
3661 | void trackStatistics() const override { |
3662 | STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = { "attributor", "NumIRCSArguments_dereferenceable", ("Number of " "call site arguments" " marked '" "dereferenceable" "'")};; ++ (NumIRCSArguments_dereferenceable); } |
3663 | } |
3664 | }; |
3665 | |
3666 | /// Dereferenceable attribute deduction for a call site return value. |
3667 | struct AADereferenceableCallSiteReturned final |
3668 | : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { |
3669 | using Base = |
3670 | AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; |
3671 | AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) |
3672 | : Base(IRP, A) {} |
3673 | |
3674 | /// See AbstractAttribute::trackStatistics() |
3675 | void trackStatistics() const override { |
3676 | STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor" , "NumIRCS_dereferenceable", ("Number of " "call site" " marked '" "dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }; |
3677 | } |
3678 | }; |
3679 | |
3680 | // ------------------------ Align Argument Attribute ------------------------ |
3681 | |
3682 | static unsigned getKnownAlignForUse(Attributor &A, |
3683 | AbstractAttribute &QueryingAA, |
3684 | Value &AssociatedValue, const Use *U, |
3685 | const Instruction *I, bool &TrackUse) { |
3686 | // We need to follow common pointer manipulation uses to the accesses they |
3687 | // feed into. |
3688 | if (isa<CastInst>(I)) { |
3689 | // Follow all but ptr2int casts. |
3690 | TrackUse = !isa<PtrToIntInst>(I); |
3691 | return 0; |
3692 | } |
3693 | if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { |
3694 | if (GEP->hasAllConstantIndices()) { |
3695 | TrackUse = true; |
3696 | return 0; |
3697 | } |
3698 | } |
3699 | |
3700 | MaybeAlign MA; |
3701 | if (const auto *CB = dyn_cast<CallBase>(I)) { |
3702 | if (CB->isBundleOperand(U) || CB->isCallee(U)) |
3703 | return 0; |
3704 | |
3705 | unsigned ArgNo = CB->getArgOperandNo(U); |
3706 | IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); |
3707 | // As long as we only use known information there is no need to track |
3708 | // dependences here. |
3709 | auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, |
3710 | /* TrackDependence */ false); |
3711 | MA = MaybeAlign(AlignAA.getKnownAlign()); |
3712 | } |
3713 | |
3714 | const DataLayout &DL = A.getDataLayout(); |
3715 | const Value *UseV = U->get(); |
3716 | if (auto *SI = dyn_cast<StoreInst>(I)) { |
3717 | if (SI->getPointerOperand() == UseV) |
3718 | MA = SI->getAlign(); |
3719 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { |
3720 | if (LI->getPointerOperand() == UseV) |
3721 | MA = LI->getAlign(); |
3722 | } |
3723 | |
3724 | if (!MA || *MA <= 1) |
3725 | return 0; |
3726 | |
3727 | unsigned Alignment = MA->value(); |
3728 | int64_t Offset; |
3729 | |
3730 | if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { |
3731 | if (Base == &AssociatedValue) { |
3732 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. |
3733 | // So we can say that the maximum power of two which is a divisor of |
3734 | // gcd(Offset, Alignment) is an alignment. |
3735 | |
3736 | uint32_t gcd = |
3737 | greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); |
3738 | Alignment = llvm::PowerOf2Floor(gcd); |
3739 | } |
3740 | } |
3741 | |
3742 | return Alignment; |
3743 | } |
3744 | |
3745 | struct AAAlignImpl : AAAlign { |
3746 | AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} |
3747 | |
3748 | /// See AbstractAttribute::initialize(...). |
3749 | void initialize(Attributor &A) override { |
3750 | SmallVector<Attribute, 4> Attrs; |
3751 | getAttrs({Attribute::Alignment}, Attrs); |
3752 | for (const Attribute &Attr : Attrs) |
3753 | takeKnownMaximum(Attr.getValueAsInt()); |
3754 | |
3755 | Value &V = getAssociatedValue(); |
3756 | // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int |
3757 | // use of the function pointer. This was caused by D73131. We want to |
3758 | // avoid this for function pointers especially because we iterate |
3759 | // their uses and int2ptr is not handled. It is not a correctness |
3760 | // problem though! |
3761 | if (!V.getType()->getPointerElementType()->isFunctionTy()) |
3762 | takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); |
3763 | |
3764 | if (getIRPosition().isFnInterfaceKind() && |
3765 | (!getAnchorScope() || |
3766 | !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { |
3767 | indicatePessimisticFixpoint(); |
3768 | return; |
3769 | } |
3770 | |
3771 | if (Instruction *CtxI = getCtxI()) |
3772 | followUsesInMBEC(*this, A, getState(), *CtxI); |
3773 | } |
3774 | |
3775 | /// See AbstractAttribute::manifest(...). |
3776 | ChangeStatus manifest(Attributor &A) override { |
3777 | ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; |
3778 | |
3779 | // Check for users that allow alignment annotations. |
3780 | Value &AssociatedValue = getAssociatedValue(); |
3781 | for (const Use &U : AssociatedValue.uses()) { |
3782 | if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { |
3783 | if (SI->getPointerOperand() == &AssociatedValue) |
3784 | if (SI->getAlignment() < getAssumedAlign()) { |
3785 | STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign" , "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign ); } |
3786 | "Number of times alignment added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign" , "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign ); }; |
3787 | SI->setAlignment(Align(getAssumedAlign())); |
3788 | LoadStoreChanged = ChangeStatus::CHANGED; |
3789 | } |
3790 | } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { |
3791 | if (LI->getPointerOperand() == &AssociatedValue) |
3792 | if (LI->getAlignment() < getAssumedAlign()) { |
3793 | LI->setAlignment(Align(getAssumedAlign())); |
3794 | STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign" , "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign ); } |
3795 | "Number of times alignment added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign" , "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign ); }; |
3796 | LoadStoreChanged = ChangeStatus::CHANGED; |
3797 | } |
3798 | } |
3799 | } |
3800 | |
3801 | ChangeStatus Changed = AAAlign::manifest(A); |
3802 | |
3803 | Align InheritAlign = |
3804 | getAssociatedValue().getPointerAlignment(A.getDataLayout()); |
3805 | if (InheritAlign >= getAssumedAlign()) |
3806 | return LoadStoreChanged; |
3807 | return Changed | LoadStoreChanged; |
3808 | } |
3809 | |
3810 | // TODO: Provide a helper to determine the implied ABI alignment and check in |
3811 | // the existing manifest method and a new one for AAAlignImpl that value |
3812 | // to avoid making the alignment explicit if it did not improve. |
3813 | |
3814 | /// See AbstractAttribute::getDeducedAttributes |
3815 | virtual void |
3816 | getDeducedAttributes(LLVMContext &Ctx, |
3817 | SmallVectorImpl<Attribute> &Attrs) const override { |
3818 | if (getAssumedAlign() > 1) |
3819 | Attrs.emplace_back( |
3820 | Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); |
3821 | } |
3822 | |
3823 | /// See followUsesInMBEC |
3824 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, |
3825 | AAAlign::StateType &State) { |
3826 | bool TrackUse = false; |
3827 | |
3828 | unsigned int KnownAlign = |
3829 | getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); |
3830 | State.takeKnownMaximum(KnownAlign); |
3831 | |
3832 | return TrackUse; |
3833 | } |
3834 | |
3835 | /// See AbstractAttribute::getAsStr(). |
3836 | const std::string getAsStr() const override { |
3837 | return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + |
3838 | "-" + std::to_string(getAssumedAlign()) + ">") |
3839 | : "unknown-align"; |
3840 | } |
3841 | }; |
3842 | |
3843 | /// Align attribute for a floating value. |
3844 | struct AAAlignFloating : AAAlignImpl { |
3845 | AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} |
3846 | |
3847 | /// See AbstractAttribute::updateImpl(...). |
3848 | ChangeStatus updateImpl(Attributor &A) override { |
3849 | const DataLayout &DL = A.getDataLayout(); |
3850 | |
3851 | auto VisitValueCB = [&](Value &V, const Instruction *, |
3852 | AAAlign::StateType &T, bool Stripped) -> bool { |
3853 | const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); |
3854 | if (!Stripped && this == &AA) { |
3855 | int64_t Offset; |
3856 | unsigned Alignment = 1; |
3857 | if (const Value *Base = |
3858 | GetPointerBaseWithConstantOffset(&V, Offset, DL)) { |
3859 | Align PA = Base->getPointerAlignment(DL); |
3860 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. |
3861 | // So we can say that the maximum power of two which is a divisor of |
3862 | // gcd(Offset, Alignment) is an alignment. |
3863 | |
3864 | uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), |
3865 | uint32_t(PA.value())); |
3866 | Alignment = llvm::PowerOf2Floor(gcd); |
3867 | } else { |
3868 | Alignment = V.getPointerAlignment(DL).value(); |
3869 | } |
3870 | // Use only IR information if we did not strip anything. |
3871 | T.takeKnownMaximum(Alignment); |
3872 | T.indicatePessimisticFixpoint(); |
3873 | } else { |
3874 | // Use abstract attribute information. |
3875 | const AAAlign::StateType &DS = AA.getState(); |
3876 | T ^= DS; |
3877 | } |
3878 | return T.isValidState(); |
3879 | }; |
3880 | |
3881 | StateType T; |
3882 | if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, |
3883 | VisitValueCB, getCtxI())) |
3884 | return indicatePessimisticFixpoint(); |
3885 | |
3886 | // TODO: If we know we visited all incoming values, thus no are assumed |
3887 | // dead, we can take the known information from the state T. |
3888 | return clampStateAndIndicateChange(getState(), T); |
3889 | } |
3890 | |
3891 | /// See AbstractAttribute::trackStatistics() |
3892 | void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor", "NumIRFloating_align", ("Number of floating values known to be '" "align" "'")};; ++(NumIRFloating_align); } } |
3893 | }; |
3894 | |
3895 | /// Align attribute for function return value. |
3896 | struct AAAlignReturned final |
3897 | : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { |
3898 | using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; |
3899 | AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
3900 | |
3901 | /// See AbstractAttribute::initialize(...). |
3902 | void initialize(Attributor &A) override { |
3903 | Base::initialize(A); |
3904 | Function *F = getAssociatedFunction(); |
3905 | if (!F || F->isDeclaration()) |
3906 | indicatePessimisticFixpoint(); |
3907 | } |
3908 | |
3909 | /// See AbstractAttribute::trackStatistics() |
3910 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor" , "NumIRFunctionReturn_aligned", ("Number of " "function returns" " marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned ); } } |
3911 | }; |
3912 | |
3913 | /// Align attribute for function argument. |
3914 | struct AAAlignArgument final |
3915 | : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { |
3916 | using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; |
3917 | AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} |
3918 | |
3919 | /// See AbstractAttribute::manifest(...). |
3920 | ChangeStatus manifest(Attributor &A) override { |
3921 | // If the associated argument is involved in a must-tail call we give up |
3922 | // because we would need to keep the argument alignments of caller and |
3923 | // callee in-sync. Just does not seem worth the trouble right now. |
3924 | if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) |
3925 | return ChangeStatus::UNCHANGED; |
3926 | return Base::manifest(A); |
3927 | } |
3928 | |
3929 | /// See AbstractAttribute::trackStatistics() |
3930 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor" , "NumIRArguments_aligned", ("Number of " "arguments" " marked '" "aligned" "'")};; ++(NumIRArguments_aligned); } } |
3931 | }; |
3932 | |
3933 | struct AAAlignCallSiteArgument final : AAAlignFloating { |
3934 | AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) |
3935 | : AAAlignFloating(IRP, A) {} |
3936 | |
3937 | /// See AbstractAttribute::manifest(...). |
3938 | ChangeStatus manifest(Attributor &A) override { |
3939 | // If the associated argument is involved in a must-tail call we give up |
3940 | // because we would need to keep the argument alignments of caller and |
3941 | // callee in-sync. Just does not seem worth the trouble right now. |
3942 | if (Argument *Arg = getAssociatedArgument()) |
3943 | if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) |
3944 | return ChangeStatus::UNCHANGED; |
3945 | ChangeStatus Changed = AAAlignImpl::manifest(A); |
3946 | Align InheritAlign = |
3947 | getAssociatedValue().getPointerAlignment(A.getDataLayout()); |
3948 | if (InheritAlign >= getAssumedAlign()) |
3949 | Changed = ChangeStatus::UNCHANGED; |
3950 | return Changed; |
3951 | } |
3952 | |
3953 | /// See AbstractAttribute::updateImpl(Attributor &A). |
3954 | ChangeStatus updateImpl(Attributor &A) override { |
3955 | ChangeStatus Changed = AAAlignFloating::updateImpl(A); |
3956 | if (Argument *Arg = getAssociatedArgument()) { |
3957 | // We only take known information from the argument |
3958 | // so we do not need to track a dependence. |
3959 | const auto &ArgAlignAA = A.getAAFor<AAAlign>( |
3960 | *this, IRPosition::argument(*Arg), /* TrackDependence */ false); |
3961 | takeKnownMaximum(ArgAlignAA.getKnownAlign()); |
3962 | } |
3963 | return Changed; |
3964 | } |
3965 | |
3966 | /// See AbstractAttribute::trackStatistics() |
3967 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor" , "NumIRCSArguments_aligned", ("Number of " "call site arguments" " marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); } } |
3968 | }; |
3969 | |
3970 | /// Align attribute deduction for a call site return value. |
3971 | struct AAAlignCallSiteReturned final |
3972 | : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { |
3973 | using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; |
3974 | AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) |
3975 | : Base(IRP, A) {} |
3976 | |
3977 | /// See AbstractAttribute::initialize(...). |
3978 | void initialize(Attributor &A) override { |
3979 | Base::initialize(A); |
3980 | Function *F = getAssociatedFunction(); |
3981 | if (!F || F->isDeclaration()) |
3982 | indicatePessimisticFixpoint(); |
3983 | } |
3984 | |
3985 | /// See AbstractAttribute::trackStatistics() |
3986 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align" , ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align ); }; } |
3987 | }; |
3988 | |
3989 | /// ------------------ Function No-Return Attribute ---------------------------- |
3990 | struct AANoReturnImpl : public AANoReturn { |
3991 | AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} |
3992 | |
3993 | /// See AbstractAttribute::initialize(...). |
3994 | void initialize(Attributor &A) override { |
3995 | AANoReturn::initialize(A); |
3996 | Function *F = getAssociatedFunction(); |
3997 | if (!F || F->isDeclaration()) |
3998 | indicatePessimisticFixpoint(); |
3999 | } |
4000 | |
4001 | /// See AbstractAttribute::getAsStr(). |
4002 | const std::string getAsStr() const override { |
4003 | return getAssumed() ? "noreturn" : "may-return"; |
4004 | } |
4005 | |
4006 | /// See AbstractAttribute::updateImpl(Attributor &A). |
4007 | virtual ChangeStatus updateImpl(Attributor &A) override { |
4008 | auto CheckForNoReturn = [](Instruction &) { return false; }; |
4009 | if (!A.checkForAllInstructions(CheckForNoReturn, *this, |
4010 | {(unsigned)Instruction::Ret})) |
4011 | return indicatePessimisticFixpoint(); |
4012 | return ChangeStatus::UNCHANGED; |
4013 | } |
4014 | }; |
4015 | |
4016 | struct AANoReturnFunction final : AANoReturnImpl { |
4017 | AANoReturnFunction(const IRPosition &IRP, Attributor &A) |
4018 | : AANoReturnImpl(IRP, A) {} |
4019 | |
4020 | /// See AbstractAttribute::trackStatistics() |
4021 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor" , "NumIRFunction_noreturn", ("Number of " "functions" " marked '" "noreturn" "'")};; ++(NumIRFunction_noreturn); } } |
4022 | }; |
4023 | |
4024 | /// NoReturn attribute deduction for a call sites. |
4025 | struct AANoReturnCallSite final : AANoReturnImpl { |
4026 | AANoReturnCallSite(const IRPosition &IRP, Attributor &A) |
4027 | : AANoReturnImpl(IRP, A) {} |
4028 | |
4029 | /// See AbstractAttribute::initialize(...). |
4030 | void initialize(Attributor &A) override { |
4031 | AANoReturnImpl::initialize(A); |
4032 | if (Function *F = getAssociatedFunction()) { |
4033 | const IRPosition &FnPos = IRPosition::function(*F); |
4034 | auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); |
4035 | if (!FnAA.isAssumedNoReturn()) |
4036 | indicatePessimisticFixpoint(); |
4037 | } |
4038 | } |
4039 | |
4040 | /// See AbstractAttribute::updateImpl(...). |
4041 | ChangeStatus updateImpl(Attributor &A) override { |
4042 | // TODO: Once we have call site specific value information we can provide |
4043 | // call site specific liveness information and then it makes |
4044 | // sense to specialize attributes for call sites arguments instead of |
4045 | // redirecting requests to the callee argument. |
4046 | Function *F = getAssociatedFunction(); |
4047 | const IRPosition &FnPos = IRPosition::function(*F); |
4048 | auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); |
4049 | return clampStateAndIndicateChange(getState(), FnAA.getState()); |
4050 | } |
4051 | |
4052 | /// See AbstractAttribute::trackStatistics() |
4053 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn" , ("Number of " "call site" " marked '" "noreturn" "'")};; ++ (NumIRCS_noreturn); }; } |
4054 | }; |
4055 | |
4056 | /// ----------------------- Variable Capturing --------------------------------- |
4057 | |
4058 | /// A class to hold the state of for no-capture attributes. |
4059 | struct AANoCaptureImpl : public AANoCapture { |
4060 | AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} |
4061 | |
4062 | /// See AbstractAttribute::initialize(...). |
4063 | void initialize(Attributor &A) override { |
4064 | if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { |
4065 | indicateOptimisticFixpoint(); |
4066 | return; |
4067 | } |
4068 | Function *AnchorScope = getAnchorScope(); |
4069 | if (isFnInterfaceKind() && |
4070 | (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { |
4071 | indicatePessimisticFixpoint(); |
4072 | return; |
4073 | } |
4074 | |
4075 | // You cannot "capture" null in the default address space. |
4076 | if (isa<ConstantPointerNull>(getAssociatedValue()) && |
4077 | getAssociatedValue().getType()->getPointerAddressSpace() == 0) { |
4078 | indicateOptimisticFixpoint(); |
4079 | return; |
4080 | } |
4081 | |
4082 | const Function *F = |
4083 | isArgumentPosition() ? getAssociatedFunction() : AnchorScope; |
4084 | |
4085 | // Check what state the associated function can actually capture. |
4086 | if (F) |
4087 | determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); |
4088 | else |
4089 | indicatePessimisticFixpoint(); |
4090 | } |
4091 | |
4092 | /// See AbstractAttribute::updateImpl(...). |
4093 | ChangeStatus updateImpl(Attributor &A) override; |
4094 | |
4095 | /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). |
4096 | virtual void |
4097 | getDeducedAttributes(LLVMContext &Ctx, |
4098 | SmallVectorImpl<Attribute> &Attrs) const override { |
4099 | if (!isAssumedNoCaptureMaybeReturned()) |
4100 | return; |
4101 | |
4102 | if (isArgumentPosition()) { |
4103 | if (isAssumedNoCapture()) |
4104 | Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); |
4105 | else if (ManifestInternal) |
4106 | Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); |
4107 | } |
4108 | } |
4109 | |
4110 | /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known |
4111 | /// depending on the ability of the function associated with \p IRP to capture |
4112 | /// state in memory and through "returning/throwing", respectively. |
4113 | static void determineFunctionCaptureCapabilities(const IRPosition &IRP, |
4114 | const Function &F, |
4115 | BitIntegerState &State) { |
4116 | // TODO: Once we have memory behavior attributes we should use them here. |
4117 | |
4118 | // If we know we cannot communicate or write to memory, we do not care about |
4119 | // ptr2int anymore. |
4120 | if (F.onlyReadsMemory() && F.doesNotThrow() && |
4121 | F.getReturnType()->isVoidTy()) { |
4122 | State.addKnownBits(NO_CAPTURE); |
4123 | return; |
4124 | } |
4125 | |
4126 | // A function cannot capture state in memory if it only reads memory, it can |
4127 | // however return/throw state and the state might be influenced by the |
4128 | // pointer value, e.g., loading from a returned pointer might reveal a bit. |
4129 | if (F.onlyReadsMemory()) |
4130 | State.addKnownBits(NOT_CAPTURED_IN_MEM); |
4131 | |
4132 | // A function cannot communicate state back if it does not through |
4133 | // exceptions and doesn not return values. |
4134 | if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) |
4135 | State.addKnownBits(NOT_CAPTURED_IN_RET); |
4136 | |
4137 | // Check existing "returned" attributes. |
4138 | int ArgNo = IRP.getCalleeArgNo(); |
4139 | if (F.doesNotThrow() && ArgNo >= 0) { |
4140 | for (unsigned u = 0, e = F.arg_size(); u < e; ++u) |
4141 | if (F.hasParamAttribute(u, Attribute::Returned)) { |
4142 | if (u == unsigned(ArgNo)) |
4143 | State.removeAssumedBits(NOT_CAPTURED_IN_RET); |
4144 | else if (F.onlyReadsMemory()) |
4145 | State.addKnownBits(NO_CAPTURE); |
4146 | else |
4147 | State.addKnownBits(NOT_CAPTURED_IN_RET); |
4148 | break; |
4149 | } |
4150 | } |
4151 | } |
4152 | |
4153 | /// See AbstractState::getAsStr(). |
4154 | const std::string getAsStr() const override { |
4155 | if (isKnownNoCapture()) |
4156 | return "known not-captured"; |
4157 | if (isAssumedNoCapture()) |
4158 | return "assumed not-captured"; |
4159 | if (isKnownNoCaptureMaybeReturned()) |
4160 | return "known not-captured-maybe-returned"; |
4161 | if (isAssumedNoCaptureMaybeReturned()) |
4162 | return "assumed not-captured-maybe-returned"; |
4163 | return "assumed-captured"; |
4164 | } |
4165 | }; |
4166 | |
4167 | /// Attributor-aware capture tracker. |
4168 | struct AACaptureUseTracker final : public CaptureTracker { |
4169 | |
4170 | /// Create a capture tracker that can lookup in-flight abstract attributes |
4171 | /// through the Attributor \p A. |
4172 | /// |
4173 | /// If a use leads to a potential capture, \p CapturedInMemory is set and the |
4174 | /// search is stopped. If a use leads to a return instruction, |
4175 | /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. |
4176 | /// If a use leads to a ptr2int which may capture the value, |
4177 | /// \p CapturedInInteger is set. If a use is found that is currently assumed |
4178 | /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies |
4179 | /// set. All values in \p PotentialCopies are later tracked as well. For every |
4180 | /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, |
4181 | /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger |
4182 | /// conservatively set to true. |
4183 | AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, |
4184 | const AAIsDead &IsDeadAA, AANoCapture::StateType &State, |
4185 | SmallVectorImpl<const Value *> &PotentialCopies, |
4186 | unsigned &RemainingUsesToExplore) |
4187 | : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), |
4188 | PotentialCopies(PotentialCopies), |
4189 | RemainingUsesToExplore(RemainingUsesToExplore) {} |
4190 | |
4191 | /// Determine if \p V maybe captured. *Also updates the state!* |
4192 | bool valueMayBeCaptured(const Value *V) { |
4193 | if (V->getType()->isPointerTy()) { |
4194 | PointerMayBeCaptured(V, this); |
4195 | } else { |
4196 | State.indicatePessimisticFixpoint(); |
4197 | } |
4198 | return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); |
4199 | } |
4200 | |
4201 | /// See CaptureTracker::tooManyUses(). |
4202 | void tooManyUses() override { |
4203 | State.removeAssumedBits(AANoCapture::NO_CAPTURE); |
4204 | } |
4205 | |
4206 | bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { |
4207 | if (CaptureTracker::isDereferenceableOrNull(O, DL)) |
4208 | return true; |
4209 | const auto &DerefAA = A.getAAFor<AADereferenceable>( |
4210 | NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, |
4211 | DepClassTy::OPTIONAL); |
4212 | return DerefAA.getAssumedDereferenceableBytes(); |
4213 | } |
4214 | |
4215 | /// See CaptureTracker::captured(...). |
4216 | bool captured(const Use *U) override { |
4217 | Instruction *UInst = cast<Instruction>(U->getUser()); |
4218 | LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Check use: " << *U-> get() << " in " << *UInst << "\n"; } } while (false) |
4219 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Check use: " << *U-> get() << " in " << *UInst << "\n"; } } while (false); |
4220 | |
4221 | // Because we may reuse the tracker multiple times we keep track of the |
4222 | // number of explored uses ourselves as well. |
4223 | if (RemainingUsesToExplore-- == 0) { |
4224 | LLVM_DEBUG(dbgs() << " - too many uses to explore!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - too many uses to explore!\n" ; } } while (false); |
4225 | return isCapturedIn(/* Memory */ true, /* Integer */ true, |
4226 | /* Return */ true); |
4227 | } |
4228 | |
4229 | // Deal with ptr2int by following uses. |
4230 | if (isa<PtrToIntInst>(UInst)) { |
4231 | LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - ptr2int assume the worst!\n" ; } } while (false); |
4232 | return valueMayBeCaptured(UInst); |
4233 | } |
4234 | |
4235 | // Explicitly catch return instructions. |
4236 | if (isa<ReturnInst>(UInst)) |
4237 | return isCapturedIn(/* Memory */ false, /* Integer */ false, |
4238 | /* Return */ true); |
4239 | |
4240 | // For now we only use special logic for call sites. However, the tracker |
4241 | // itself knows about a lot of other non-capturing cases already. |
4242 | auto *CB = dyn_cast<CallBase>(UInst); |
4243 | if (!CB || !CB->isArgOperand(U)) |
4244 | return isCapturedIn(/* Memory */ true, /* Integer */ true, |
4245 | /* Return */ true); |
4246 | |
4247 | unsigned ArgNo = CB->getArgOperandNo(U); |
4248 | const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); |
4249 | // If we have a abstract no-capture attribute for the argument we can use |
4250 | // it to justify a non-capture attribute here. This allows recursion! |
4251 | auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); |
4252 | if (ArgNoCaptureAA.isAssumedNoCapture()) |
4253 | return isCapturedIn(/* Memory */ false, /* Integer */ false, |
4254 | /* Return */ false); |
4255 | if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { |
4256 | addPotentialCopy(*CB); |
4257 | return isCapturedIn(/* Memory */ false, /* Integer */ false, |
4258 | /* Return */ false); |
4259 | } |
4260 | |
4261 | // Lastly, we could not find a reason no-capture can be assumed so we don't. |
4262 | return isCapturedIn(/* Memory */ true, /* Integer */ true, |
4263 | /* Return */ true); |
4264 | } |
4265 | |
4266 | /// Register \p CS as potential copy of the value we are checking. |
4267 | void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } |
4268 | |
4269 | /// See CaptureTracker::shouldExplore(...). |
4270 | bool shouldExplore(const Use *U) override { |
4271 | // Check liveness and ignore droppable users. |
4272 | return !U->getUser()->isDroppable() && |
4273 | !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); |
4274 | } |
4275 | |
4276 | /// Update the state according to \p CapturedInMem, \p CapturedInInt, and |
4277 | /// \p CapturedInRet, then return the appropriate value for use in the |
4278 | /// CaptureTracker::captured() interface. |
4279 | bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, |
4280 | bool CapturedInRet) { |
4281 | LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - captures [Mem " << CapturedInMem << "|Int " << CapturedInInt << "|Ret " << CapturedInRet << "]\n"; } } while (false ) |
4282 | << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - captures [Mem " << CapturedInMem << "|Int " << CapturedInInt << "|Ret " << CapturedInRet << "]\n"; } } while (false ); |
4283 | if (CapturedInMem) |
4284 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); |
4285 | if (CapturedInInt) |
4286 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); |
4287 | if (CapturedInRet) |
4288 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); |
4289 | return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); |
4290 | } |
4291 | |
4292 | private: |
4293 | /// The attributor providing in-flight abstract attributes. |
4294 | Attributor &A; |
4295 | |
4296 | /// The abstract attribute currently updated. |
4297 | AANoCapture &NoCaptureAA; |
4298 | |
4299 | /// The abstract liveness state. |
4300 | const AAIsDead &IsDeadAA; |
4301 | |
4302 | /// The state currently updated. |
4303 | AANoCapture::StateType &State; |
4304 | |
4305 | /// Set of potential copies of the tracked value. |
4306 | SmallVectorImpl<const Value *> &PotentialCopies; |
4307 | |
4308 | /// Global counter to limit the number of explored uses. |
4309 | unsigned &RemainingUsesToExplore; |
4310 | }; |
4311 | |
4312 | ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { |
4313 | const IRPosition &IRP = getIRPosition(); |
4314 | const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() |
4315 | : &IRP.getAssociatedValue(); |
4316 | if (!V) |
4317 | return indicatePessimisticFixpoint(); |
4318 | |
4319 | const Function *F = |
4320 | isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); |
4321 | assert(F && "Expected a function!")((F && "Expected a function!") ? static_cast<void> (0) : __assert_fail ("F && \"Expected a function!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4321, __PRETTY_FUNCTION__)); |
4322 | const IRPosition &FnPos = IRPosition::function(*F); |
4323 | const auto &IsDeadAA = |
4324 | A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); |
4325 | |
4326 | AANoCapture::StateType T; |
4327 | |
4328 | // Readonly means we cannot capture through memory. |
4329 | const auto &FnMemAA = |
4330 | A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); |
4331 | if (FnMemAA.isAssumedReadOnly()) { |
4332 | T.addKnownBits(NOT_CAPTURED_IN_MEM); |
4333 | if (FnMemAA.isKnownReadOnly()) |
4334 | addKnownBits(NOT_CAPTURED_IN_MEM); |
4335 | else |
4336 | A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); |
4337 | } |
4338 | |
4339 | // Make sure all returned values are different than the underlying value. |
4340 | // TODO: we could do this in a more sophisticated way inside |
4341 | // AAReturnedValues, e.g., track all values that escape through returns |
4342 | // directly somehow. |
4343 | auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { |
4344 | bool SeenConstant = false; |
4345 | for (auto &It : RVAA.returned_values()) { |
4346 | if (isa<Constant>(It.first)) { |
4347 | if (SeenConstant) |
4348 | return false; |
4349 | SeenConstant = true; |
4350 | } else if (!isa<Argument>(It.first) || |
4351 | It.first == getAssociatedArgument()) |
4352 | return false; |
4353 | } |
4354 | return true; |
4355 | }; |
4356 | |
4357 | const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( |
4358 | *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); |
4359 | if (NoUnwindAA.isAssumedNoUnwind()) { |
4360 | bool IsVoidTy = F->getReturnType()->isVoidTy(); |
4361 | const AAReturnedValues *RVAA = |
4362 | IsVoidTy ? nullptr |
4363 | : &A.getAAFor<AAReturnedValues>(*this, FnPos, |
4364 | /* TrackDependence */ true, |
4365 | DepClassTy::OPTIONAL); |
4366 | if (IsVoidTy || CheckReturnedArgs(*RVAA)) { |
4367 | T.addKnownBits(NOT_CAPTURED_IN_RET); |
4368 | if (T.isKnown(NOT_CAPTURED_IN_MEM)) |
4369 | return ChangeStatus::UNCHANGED; |
4370 | if (NoUnwindAA.isKnownNoUnwind() && |
4371 | (IsVoidTy || RVAA->getState().isAtFixpoint())) { |
4372 | addKnownBits(NOT_CAPTURED_IN_RET); |
4373 | if (isKnown(NOT_CAPTURED_IN_MEM)) |
4374 | return indicateOptimisticFixpoint(); |
4375 | } |
4376 | } |
4377 | } |
4378 | |
4379 | // Use the CaptureTracker interface and logic with the specialized tracker, |
4380 | // defined in AACaptureUseTracker, that can look at in-flight abstract |
4381 | // attributes and directly updates the assumed state. |
4382 | SmallVector<const Value *, 4> PotentialCopies; |
4383 | unsigned RemainingUsesToExplore = |
4384 | getDefaultMaxUsesToExploreForCaptureTracking(); |
4385 | AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, |
4386 | RemainingUsesToExplore); |
4387 | |
4388 | // Check all potential copies of the associated value until we can assume |
4389 | // none will be captured or we have to assume at least one might be. |
4390 | unsigned Idx = 0; |
4391 | PotentialCopies.push_back(V); |
4392 | while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) |
4393 | Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); |
4394 | |
4395 | AANoCapture::StateType &S = getState(); |
4396 | auto Assumed = S.getAssumed(); |
4397 | S.intersectAssumedBits(T.getAssumed()); |
4398 | if (!isAssumedNoCaptureMaybeReturned()) |
4399 | return indicatePessimisticFixpoint(); |
4400 | return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED |
4401 | : ChangeStatus::CHANGED; |
4402 | } |
4403 | |
4404 | /// NoCapture attribute for function arguments. |
4405 | struct AANoCaptureArgument final : AANoCaptureImpl { |
4406 | AANoCaptureArgument(const IRPosition &IRP, Attributor &A) |
4407 | : AANoCaptureImpl(IRP, A) {} |
4408 | |
4409 | /// See AbstractAttribute::trackStatistics() |
4410 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor" , "NumIRArguments_nocapture", ("Number of " "arguments" " marked '" "nocapture" "'")};; ++(NumIRArguments_nocapture); } } |
4411 | }; |
4412 | |
4413 | /// NoCapture attribute for call site arguments. |
4414 | struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { |
4415 | AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) |
4416 | : AANoCaptureImpl(IRP, A) {} |
4417 | |
4418 | /// See AbstractAttribute::initialize(...). |
4419 | void initialize(Attributor &A) override { |
4420 | if (Argument *Arg = getAssociatedArgument()) |
4421 | if (Arg->hasByValAttr()) |
4422 | indicateOptimisticFixpoint(); |
4423 | AANoCaptureImpl::initialize(A); |
4424 | } |
4425 | |
4426 | /// See AbstractAttribute::updateImpl(...). |
4427 | ChangeStatus updateImpl(Attributor &A) override { |
4428 | // TODO: Once we have call site specific value information we can provide |
4429 | // call site specific liveness information and then it makes |
4430 | // sense to specialize attributes for call sites arguments instead of |
4431 | // redirecting requests to the callee argument. |
4432 | Argument *Arg = getAssociatedArgument(); |
4433 | if (!Arg) |
4434 | return indicatePessimisticFixpoint(); |
4435 | const IRPosition &ArgPos = IRPosition::argument(*Arg); |
4436 | auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); |
4437 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); |
4438 | } |
4439 | |
4440 | /// See AbstractAttribute::trackStatistics() |
4441 | void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor" , "NumIRCSArguments_nocapture", ("Number of " "call site arguments" " marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture ); }}; |
4442 | }; |
4443 | |
4444 | /// NoCapture attribute for floating values. |
4445 | struct AANoCaptureFloating final : AANoCaptureImpl { |
4446 | AANoCaptureFloating(const IRPosition &IRP, Attributor &A) |
4447 | : AANoCaptureImpl(IRP, A) {} |
4448 | |
4449 | /// See AbstractAttribute::trackStatistics() |
4450 | void trackStatistics() const override { |
4451 | STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor" , "NumIRFloating_nocapture", ("Number of floating values known to be '" "nocapture" "'")};; ++(NumIRFloating_nocapture); } |
4452 | } |
4453 | }; |
4454 | |
4455 | /// NoCapture attribute for function return value. |
4456 | struct AANoCaptureReturned final : AANoCaptureImpl { |
4457 | AANoCaptureReturned(const IRPosition &IRP, Attributor &A) |
4458 | : AANoCaptureImpl(IRP, A) { |
4459 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4459); |
4460 | } |
4461 | |
4462 | /// See AbstractAttribute::initialize(...). |
4463 | void initialize(Attributor &A) override { |
4464 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4464); |
4465 | } |
4466 | |
4467 | /// See AbstractAttribute::updateImpl(...). |
4468 | ChangeStatus updateImpl(Attributor &A) override { |
4469 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4469); |
4470 | } |
4471 | |
4472 | /// See AbstractAttribute::trackStatistics() |
4473 | void trackStatistics() const override {} |
4474 | }; |
4475 | |
4476 | /// NoCapture attribute deduction for a call site return value. |
4477 | struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { |
4478 | AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) |
4479 | : AANoCaptureImpl(IRP, A) {} |
4480 | |
4481 | /// See AbstractAttribute::trackStatistics() |
4482 | void trackStatistics() const override { |
4483 | STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor" , "NumIRCSReturn_nocapture", ("Number of " "call site returns" " marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture); } |
4484 | } |
4485 | }; |
4486 | |
4487 | /// ------------------ Value Simplify Attribute ---------------------------- |
4488 | struct AAValueSimplifyImpl : AAValueSimplify { |
4489 | AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) |
4490 | : AAValueSimplify(IRP, A) {} |
4491 | |
4492 | /// See AbstractAttribute::initialize(...). |
4493 | void initialize(Attributor &A) override { |
4494 | if (getAssociatedValue().getType()->isVoidTy()) |
4495 | indicatePessimisticFixpoint(); |
4496 | } |
4497 | |
4498 | /// See AbstractAttribute::getAsStr(). |
4499 | const std::string getAsStr() const override { |
4500 | return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") |
4501 | : "not-simple"; |
4502 | } |
4503 | |
4504 | /// See AbstractAttribute::trackStatistics() |
4505 | void trackStatistics() const override {} |
4506 | |
4507 | /// See AAValueSimplify::getAssumedSimplifiedValue() |
4508 | Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { |
4509 | if (!getAssumed()) |
4510 | return const_cast<Value *>(&getAssociatedValue()); |
4511 | return SimplifiedAssociatedValue; |
4512 | } |
4513 | |
4514 | /// Helper function for querying AAValueSimplify and updating candicate. |
4515 | /// \param QueryingValue Value trying to unify with SimplifiedValue |
4516 | /// \param AccumulatedSimplifiedValue Current simplification result. |
4517 | static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, |
4518 | Value &QueryingValue, |
4519 | Optional<Value *> &AccumulatedSimplifiedValue) { |
4520 | // FIXME: Add a typecast support. |
4521 | |
4522 | auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( |
4523 | QueryingAA, IRPosition::value(QueryingValue)); |
4524 | |
4525 | Optional<Value *> QueryingValueSimplified = |
4526 | ValueSimplifyAA.getAssumedSimplifiedValue(A); |
4527 | |
4528 | if (!QueryingValueSimplified.hasValue()) |
4529 | return true; |
4530 | |
4531 | if (!QueryingValueSimplified.getValue()) |
4532 | return false; |
4533 | |
4534 | Value &QueryingValueSimplifiedUnwrapped = |
4535 | *QueryingValueSimplified.getValue(); |
4536 | |
4537 | if (AccumulatedSimplifiedValue.hasValue() && |
4538 | !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && |
4539 | !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) |
4540 | return AccumulatedSimplifiedValue == QueryingValueSimplified; |
4541 | if (AccumulatedSimplifiedValue.hasValue() && |
4542 | isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) |
4543 | return true; |
4544 | |
4545 | LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped << "\n"; } } while (false) |
4546 | << " is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped << "\n"; } } while (false) |
4547 | << QueryingValueSimplifiedUnwrapped << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << QueryingValue << " is assumed to be " << QueryingValueSimplifiedUnwrapped << "\n"; } } while (false); |
4548 | |
4549 | AccumulatedSimplifiedValue = QueryingValueSimplified; |
4550 | return true; |
4551 | } |
4552 | |
4553 | /// Returns a candidate is found or not |
4554 | template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { |
4555 | if (!getAssociatedValue().getType()->isIntegerTy()) |
4556 | return false; |
4557 | |
4558 | const auto &AA = |
4559 | A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false); |
4560 | |
4561 | Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); |
4562 | |
4563 | if (!COpt.hasValue()) { |
4564 | SimplifiedAssociatedValue = llvm::None; |
4565 | A.recordDependence(AA, *this, DepClassTy::OPTIONAL); |
4566 | return true; |
4567 | } |
4568 | if (auto *C = COpt.getValue()) { |
4569 | SimplifiedAssociatedValue = C; |
4570 | A.recordDependence(AA, *this, DepClassTy::OPTIONAL); |
4571 | return true; |
4572 | } |
4573 | return false; |
4574 | } |
4575 | |
4576 | bool askSimplifiedValueForOtherAAs(Attributor &A) { |
4577 | if (askSimplifiedValueFor<AAValueConstantRange>(A)) |
4578 | return true; |
4579 | if (askSimplifiedValueFor<AAPotentialValues>(A)) |
4580 | return true; |
4581 | return false; |
4582 | } |
4583 | |
4584 | /// See AbstractAttribute::manifest(...). |
4585 | ChangeStatus manifest(Attributor &A) override { |
4586 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
4587 | |
4588 | if (SimplifiedAssociatedValue.hasValue() && |
4589 | !SimplifiedAssociatedValue.getValue()) |
4590 | return Changed; |
4591 | |
4592 | Value &V = getAssociatedValue(); |
4593 | auto *C = SimplifiedAssociatedValue.hasValue() |
4594 | ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) |
4595 | : UndefValue::get(V.getType()); |
4596 | if (C) { |
4597 | // We can replace the AssociatedValue with the constant. |
4598 | if (!V.user_empty() && &V != C && V.getType() == C->getType()) { |
4599 | LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << V << " -> " << *C << " :: " << *this << "\n"; } } while (false) |
4600 | << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << V << " -> " << *C << " :: " << *this << "\n"; } } while (false); |
4601 | if (A.changeValueAfterManifest(V, *C)) |
4602 | Changed = ChangeStatus::CHANGED; |
4603 | } |
4604 | } |
4605 | |
4606 | return Changed | AAValueSimplify::manifest(A); |
4607 | } |
4608 | |
4609 | /// See AbstractState::indicatePessimisticFixpoint(...). |
4610 | ChangeStatus indicatePessimisticFixpoint() override { |
4611 | // NOTE: Associated value will be returned in a pessimistic fixpoint and is |
4612 | // regarded as known. That's why`indicateOptimisticFixpoint` is called. |
4613 | SimplifiedAssociatedValue = &getAssociatedValue(); |
4614 | indicateOptimisticFixpoint(); |
4615 | return ChangeStatus::CHANGED; |
4616 | } |
4617 | |
4618 | protected: |
4619 | // An assumed simplified value. Initially, it is set to Optional::None, which |
4620 | // means that the value is not clear under current assumption. If in the |
4621 | // pessimistic state, getAssumedSimplifiedValue doesn't return this value but |
4622 | // returns orignal associated value. |
4623 | Optional<Value *> SimplifiedAssociatedValue; |
4624 | }; |
4625 | |
4626 | struct AAValueSimplifyArgument final : AAValueSimplifyImpl { |
4627 | AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) |
4628 | : AAValueSimplifyImpl(IRP, A) {} |
4629 | |
4630 | void initialize(Attributor &A) override { |
4631 | AAValueSimplifyImpl::initialize(A); |
4632 | if (!getAnchorScope() || getAnchorScope()->isDeclaration()) |
4633 | indicatePessimisticFixpoint(); |
4634 | if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, |
4635 | Attribute::StructRet, Attribute::Nest}, |
4636 | /* IgnoreSubsumingPositions */ true)) |
4637 | indicatePessimisticFixpoint(); |
4638 | |
4639 | // FIXME: This is a hack to prevent us from propagating function poiner in |
4640 | // the new pass manager CGSCC pass as it creates call edges the |
4641 | // CallGraphUpdater cannot handle yet. |
4642 | Value &V = getAssociatedValue(); |
4643 | if (V.getType()->isPointerTy() && |
4644 | V.getType()->getPointerElementType()->isFunctionTy() && |
4645 | !A.isModulePass()) |
4646 | indicatePessimisticFixpoint(); |
4647 | } |
4648 | |
4649 | /// See AbstractAttribute::updateImpl(...). |
4650 | ChangeStatus updateImpl(Attributor &A) override { |
4651 | // Byval is only replacable if it is readonly otherwise we would write into |
4652 | // the replaced value and not the copy that byval creates implicitly. |
4653 | Argument *Arg = getAssociatedArgument(); |
4654 | if (Arg->hasByValAttr()) { |
4655 | // TODO: We probably need to verify synchronization is not an issue, e.g., |
4656 | // there is no race by not copying a constant byval. |
4657 | const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); |
4658 | if (!MemAA.isAssumedReadOnly()) |
4659 | return indicatePessimisticFixpoint(); |
4660 | } |
4661 | |
4662 | bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); |
4663 | |
4664 | auto PredForCallSite = [&](AbstractCallSite ACS) { |
4665 | const IRPosition &ACSArgPos = |
4666 | IRPosition::callsite_argument(ACS, getCallSiteArgNo()); |
4667 | // Check if a coresponding argument was found or if it is on not |
4668 | // associated (which can happen for callback calls). |
4669 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
4670 | return false; |
4671 | |
4672 | // We can only propagate thread independent values through callbacks. |
4673 | // This is different to direct/indirect call sites because for them we |
4674 | // know the thread executing the caller and callee is the same. For |
4675 | // callbacks this is not guaranteed, thus a thread dependent value could |
4676 | // be different for the caller and callee, making it invalid to propagate. |
4677 | Value &ArgOp = ACSArgPos.getAssociatedValue(); |
4678 | if (ACS.isCallbackCall()) |
4679 | if (auto *C = dyn_cast<Constant>(&ArgOp)) |
4680 | if (C->isThreadDependent()) |
4681 | return false; |
4682 | return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); |
4683 | }; |
4684 | |
4685 | bool AllCallSitesKnown; |
4686 | if (!A.checkForAllCallSites(PredForCallSite, *this, true, |
4687 | AllCallSitesKnown)) |
4688 | if (!askSimplifiedValueForOtherAAs(A)) |
4689 | return indicatePessimisticFixpoint(); |
4690 | |
4691 | // If a candicate was found in this update, return CHANGED. |
4692 | return HasValueBefore == SimplifiedAssociatedValue.hasValue() |
4693 | ? ChangeStatus::UNCHANGED |
4694 | : ChangeStatus ::CHANGED; |
4695 | } |
4696 | |
4697 | /// See AbstractAttribute::trackStatistics() |
4698 | void trackStatistics() const override { |
4699 | STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor" , "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '" "value_simplify" "'")};; ++(NumIRArguments_value_simplify); } |
4700 | } |
4701 | }; |
4702 | |
4703 | struct AAValueSimplifyReturned : AAValueSimplifyImpl { |
4704 | AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) |
4705 | : AAValueSimplifyImpl(IRP, A) {} |
4706 | |
4707 | /// See AbstractAttribute::updateImpl(...). |
4708 | ChangeStatus updateImpl(Attributor &A) override { |
4709 | bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); |
4710 | |
4711 | auto PredForReturned = [&](Value &V) { |
4712 | return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); |
4713 | }; |
4714 | |
4715 | if (!A.checkForAllReturnedValues(PredForReturned, *this)) |
4716 | if (!askSimplifiedValueForOtherAAs(A)) |
4717 | return indicatePessimisticFixpoint(); |
4718 | |
4719 | // If a candicate was found in this update, return CHANGED. |
4720 | return HasValueBefore == SimplifiedAssociatedValue.hasValue() |
4721 | ? ChangeStatus::UNCHANGED |
4722 | : ChangeStatus ::CHANGED; |
4723 | } |
4724 | |
4725 | ChangeStatus manifest(Attributor &A) override { |
4726 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
4727 | |
4728 | if (SimplifiedAssociatedValue.hasValue() && |
4729 | !SimplifiedAssociatedValue.getValue()) |
4730 | return Changed; |
4731 | |
4732 | Value &V = getAssociatedValue(); |
4733 | auto *C = SimplifiedAssociatedValue.hasValue() |
4734 | ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) |
4735 | : UndefValue::get(V.getType()); |
4736 | if (C) { |
4737 | auto PredForReturned = |
4738 | [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { |
4739 | // We can replace the AssociatedValue with the constant. |
4740 | if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) |
4741 | return true; |
4742 | |
4743 | for (ReturnInst *RI : RetInsts) { |
4744 | if (RI->getFunction() != getAnchorScope()) |
4745 | continue; |
4746 | auto *RC = C; |
4747 | if (RC->getType() != RI->getReturnValue()->getType()) |
4748 | RC = ConstantExpr::getBitCast(RC, |
4749 | RI->getReturnValue()->getType()); |
4750 | LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RCdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << V << " -> " << *RC << " in " << * RI << " :: " << *this << "\n"; } } while (false ) |
4751 | << " in " << *RI << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << V << " -> " << *RC << " in " << * RI << " :: " << *this << "\n"; } } while (false ); |
4752 | if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) |
4753 | Changed = ChangeStatus::CHANGED; |
4754 | } |
4755 | return true; |
4756 | }; |
4757 | A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); |
4758 | } |
4759 | |
4760 | return Changed | AAValueSimplify::manifest(A); |
4761 | } |
4762 | |
4763 | /// See AbstractAttribute::trackStatistics() |
4764 | void trackStatistics() const override { |
4765 | STATS_DECLTRACK_FNRET_ATTR(value_simplify){ static llvm::Statistic NumIRFunctionReturn_value_simplify = {"attributor", "NumIRFunctionReturn_value_simplify", ("Number of " "function returns" " marked '" "value_simplify" "'")};; ++(NumIRFunctionReturn_value_simplify ); } |
4766 | } |
4767 | }; |
4768 | |
4769 | struct AAValueSimplifyFloating : AAValueSimplifyImpl { |
4770 | AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) |
4771 | : AAValueSimplifyImpl(IRP, A) {} |
4772 | |
4773 | /// See AbstractAttribute::initialize(...). |
4774 | void initialize(Attributor &A) override { |
4775 | // FIXME: This might have exposed a SCC iterator update bug in the old PM. |
4776 | // Needs investigation. |
4777 | // AAValueSimplifyImpl::initialize(A); |
4778 | Value &V = getAnchorValue(); |
4779 | |
4780 | // TODO: add other stuffs |
4781 | if (isa<Constant>(V)) |
4782 | indicatePessimisticFixpoint(); |
4783 | } |
4784 | |
4785 | /// Check if \p ICmp is an equality comparison (==/!=) with at least one |
4786 | /// nullptr. If so, try to simplify it using AANonNull on the other operand. |
4787 | /// Return true if successful, in that case SimplifiedAssociatedValue will be |
4788 | /// updated and \p Changed is set appropriately. |
4789 | bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp, |
4790 | ChangeStatus &Changed) { |
4791 | if (!ICmp) |
4792 | return false; |
4793 | if (!ICmp->isEquality()) |
4794 | return false; |
4795 | |
4796 | // This is a comparison with == or !-. We check for nullptr now. |
4797 | bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0)); |
4798 | bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1)); |
4799 | if (!Op0IsNull && !Op1IsNull) |
4800 | return false; |
4801 | |
4802 | LLVMContext &Ctx = ICmp->getContext(); |
4803 | // Check for `nullptr ==/!= nullptr` first: |
4804 | if (Op0IsNull && Op1IsNull) { |
4805 | Value *NewVal = ConstantInt::get( |
4806 | Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ); |
4807 | assert(!SimplifiedAssociatedValue.hasValue() &&((!SimplifiedAssociatedValue.hasValue() && "Did not expect non-fixed value for constant comparison" ) ? static_cast<void> (0) : __assert_fail ("!SimplifiedAssociatedValue.hasValue() && \"Did not expect non-fixed value for constant comparison\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4808, __PRETTY_FUNCTION__)) |
4808 | "Did not expect non-fixed value for constant comparison")((!SimplifiedAssociatedValue.hasValue() && "Did not expect non-fixed value for constant comparison" ) ? static_cast<void> (0) : __assert_fail ("!SimplifiedAssociatedValue.hasValue() && \"Did not expect non-fixed value for constant comparison\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4808, __PRETTY_FUNCTION__)); |
4809 | SimplifiedAssociatedValue = NewVal; |
4810 | indicateOptimisticFixpoint(); |
4811 | Changed = ChangeStatus::CHANGED; |
4812 | return true; |
4813 | } |
4814 | |
4815 | // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the |
4816 | // non-nullptr operand and if we assume it's non-null we can conclude the |
4817 | // result of the comparison. |
4818 | assert((Op0IsNull || Op1IsNull) &&(((Op0IsNull || Op1IsNull) && "Expected nullptr versus non-nullptr comparison at this point" ) ? static_cast<void> (0) : __assert_fail ("(Op0IsNull || Op1IsNull) && \"Expected nullptr versus non-nullptr comparison at this point\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4819, __PRETTY_FUNCTION__)) |
4819 | "Expected nullptr versus non-nullptr comparison at this point")(((Op0IsNull || Op1IsNull) && "Expected nullptr versus non-nullptr comparison at this point" ) ? static_cast<void> (0) : __assert_fail ("(Op0IsNull || Op1IsNull) && \"Expected nullptr versus non-nullptr comparison at this point\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4819, __PRETTY_FUNCTION__)); |
4820 | |
4821 | // The index is the operand that we assume is not null. |
4822 | unsigned PtrIdx = Op0IsNull; |
4823 | auto &PtrNonNullAA = A.getAAFor<AANonNull>( |
4824 | *this, IRPosition::value(*ICmp->getOperand(PtrIdx))); |
4825 | if (!PtrNonNullAA.isAssumedNonNull()) |
4826 | return false; |
4827 | |
4828 | // The new value depends on the predicate, true for != and false for ==. |
4829 | Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx), |
4830 | ICmp->getPredicate() == CmpInst::ICMP_NE); |
4831 | |
4832 | assert((!SimplifiedAssociatedValue.hasValue() ||(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && "Did not expect to change value for zero-comparison" ) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4834, __PRETTY_FUNCTION__)) |
4833 | SimplifiedAssociatedValue == NewVal) &&(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && "Did not expect to change value for zero-comparison" ) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4834, __PRETTY_FUNCTION__)) |
4834 | "Did not expect to change value for zero-comparison")(((!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && "Did not expect to change value for zero-comparison" ) ? static_cast<void> (0) : __assert_fail ("(!SimplifiedAssociatedValue.hasValue() || SimplifiedAssociatedValue == NewVal) && \"Did not expect to change value for zero-comparison\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4834, __PRETTY_FUNCTION__)); |
4835 | |
4836 | bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); |
4837 | SimplifiedAssociatedValue = NewVal; |
4838 | |
4839 | if (PtrNonNullAA.isKnownNonNull()) |
4840 | indicateOptimisticFixpoint(); |
4841 | |
4842 | Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED; |
4843 | return true; |
4844 | } |
4845 | |
4846 | /// See AbstractAttribute::updateImpl(...). |
4847 | ChangeStatus updateImpl(Attributor &A) override { |
4848 | bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); |
4849 | |
4850 | ChangeStatus Changed; |
4851 | if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()), |
4852 | Changed)) |
4853 | return Changed; |
4854 | |
4855 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, |
4856 | bool Stripped) -> bool { |
4857 | auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); |
4858 | if (!Stripped && this == &AA) { |
4859 | // TODO: Look the instruction and check recursively. |
4860 | |
4861 | LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : " << V << "\n"; } } while (false) |
4862 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] Can't be stripped more : " << V << "\n"; } } while (false); |
4863 | return false; |
4864 | } |
4865 | return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); |
4866 | }; |
4867 | |
4868 | bool Dummy = false; |
4869 | if (!genericValueTraversal<AAValueSimplify, bool>( |
4870 | A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), |
4871 | /* UseValueSimplify */ false)) |
4872 | if (!askSimplifiedValueForOtherAAs(A)) |
4873 | return indicatePessimisticFixpoint(); |
4874 | |
4875 | // If a candicate was found in this update, return CHANGED. |
4876 | |
4877 | return HasValueBefore == SimplifiedAssociatedValue.hasValue() |
4878 | ? ChangeStatus::UNCHANGED |
4879 | : ChangeStatus ::CHANGED; |
4880 | } |
4881 | |
4882 | /// See AbstractAttribute::trackStatistics() |
4883 | void trackStatistics() const override { |
4884 | STATS_DECLTRACK_FLOATING_ATTR(value_simplify){ static llvm::Statistic NumIRFloating_value_simplify = {"attributor" , "NumIRFloating_value_simplify", ("Number of floating values known to be '" "value_simplify" "'")};; ++(NumIRFloating_value_simplify); } |
4885 | } |
4886 | }; |
4887 | |
4888 | struct AAValueSimplifyFunction : AAValueSimplifyImpl { |
4889 | AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) |
4890 | : AAValueSimplifyImpl(IRP, A) {} |
4891 | |
4892 | /// See AbstractAttribute::initialize(...). |
4893 | void initialize(Attributor &A) override { |
4894 | SimplifiedAssociatedValue = &getAnchorValue(); |
4895 | indicateOptimisticFixpoint(); |
4896 | } |
4897 | /// See AbstractAttribute::initialize(...). |
4898 | ChangeStatus updateImpl(Attributor &A) override { |
4899 | llvm_unreachable(::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4900) |
4900 | "AAValueSimplify(Function|CallSite)::updateImpl will not be called")::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4900); |
4901 | } |
4902 | /// See AbstractAttribute::trackStatistics() |
4903 | void trackStatistics() const override { |
4904 | STATS_DECLTRACK_FN_ATTR(value_simplify){ static llvm::Statistic NumIRFunction_value_simplify = {"attributor" , "NumIRFunction_value_simplify", ("Number of " "functions" " marked '" "value_simplify" "'")};; ++(NumIRFunction_value_simplify); } |
4905 | } |
4906 | }; |
4907 | |
4908 | struct AAValueSimplifyCallSite : AAValueSimplifyFunction { |
4909 | AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) |
4910 | : AAValueSimplifyFunction(IRP, A) {} |
4911 | /// See AbstractAttribute::trackStatistics() |
4912 | void trackStatistics() const override { |
4913 | STATS_DECLTRACK_CS_ATTR(value_simplify){ static llvm::Statistic NumIRCS_value_simplify = {"attributor" , "NumIRCS_value_simplify", ("Number of " "call site" " marked '" "value_simplify" "'")};; ++(NumIRCS_value_simplify); } |
4914 | } |
4915 | }; |
4916 | |
4917 | struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { |
4918 | AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) |
4919 | : AAValueSimplifyReturned(IRP, A) {} |
4920 | |
4921 | /// See AbstractAttribute::manifest(...). |
4922 | ChangeStatus manifest(Attributor &A) override { |
4923 | return AAValueSimplifyImpl::manifest(A); |
4924 | } |
4925 | |
4926 | void trackStatistics() const override { |
4927 | STATS_DECLTRACK_CSRET_ATTR(value_simplify){ static llvm::Statistic NumIRCSReturn_value_simplify = {"attributor" , "NumIRCSReturn_value_simplify", ("Number of " "call site returns" " marked '" "value_simplify" "'")};; ++(NumIRCSReturn_value_simplify ); } |
4928 | } |
4929 | }; |
4930 | struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { |
4931 | AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) |
4932 | : AAValueSimplifyFloating(IRP, A) {} |
4933 | |
4934 | /// See AbstractAttribute::manifest(...). |
4935 | ChangeStatus manifest(Attributor &A) override { |
4936 | ChangeStatus Changed = ChangeStatus::UNCHANGED; |
4937 | |
4938 | if (SimplifiedAssociatedValue.hasValue() && |
4939 | !SimplifiedAssociatedValue.getValue()) |
4940 | return Changed; |
4941 | |
4942 | Value &V = getAssociatedValue(); |
4943 | auto *C = SimplifiedAssociatedValue.hasValue() |
4944 | ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) |
4945 | : UndefValue::get(V.getType()); |
4946 | if (C) { |
4947 | Use &U = cast<CallBase>(&getAnchorValue()) |
4948 | ->getArgOperandUse(getCallSiteArgNo()); |
4949 | // We can replace the AssociatedValue with the constant. |
4950 | if (&V != C && V.getType() == C->getType()) { |
4951 | if (A.changeUseAfterManifest(U, *C)) |
4952 | Changed = ChangeStatus::CHANGED; |
4953 | } |
4954 | } |
4955 | |
4956 | return Changed | AAValueSimplify::manifest(A); |
4957 | } |
4958 | |
4959 | void trackStatistics() const override { |
4960 | STATS_DECLTRACK_CSARG_ATTR(value_simplify){ static llvm::Statistic NumIRCSArguments_value_simplify = {"attributor" , "NumIRCSArguments_value_simplify", ("Number of " "call site arguments" " marked '" "value_simplify" "'")};; ++(NumIRCSArguments_value_simplify ); } |
4961 | } |
4962 | }; |
4963 | |
4964 | /// ----------------------- Heap-To-Stack Conversion --------------------------- |
4965 | struct AAHeapToStackImpl : public AAHeapToStack { |
4966 | AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) |
4967 | : AAHeapToStack(IRP, A) {} |
4968 | |
4969 | const std::string getAsStr() const override { |
4970 | return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); |
4971 | } |
4972 | |
4973 | ChangeStatus manifest(Attributor &A) override { |
4974 | assert(getState().isValidState() &&((getState().isValidState() && "Attempted to manifest an invalid state!" ) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4975, __PRETTY_FUNCTION__)) |
4975 | "Attempted to manifest an invalid state!")((getState().isValidState() && "Attempted to manifest an invalid state!" ) ? static_cast<void> (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4975, __PRETTY_FUNCTION__)); |
4976 | |
4977 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; |
4978 | Function *F = getAnchorScope(); |
4979 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); |
4980 | |
4981 | for (Instruction *MallocCall : MallocCalls) { |
4982 | // This malloc cannot be replaced. |
4983 | if (BadMallocCalls.count(MallocCall)) |
4984 | continue; |
4985 | |
4986 | for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { |
4987 | LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"; } } while (false); |
4988 | A.deleteAfterManifest(*FreeCall); |
4989 | HasChanged = ChangeStatus::CHANGED; |
Value stored to 'HasChanged' is never read | |
4990 | } |
4991 | |
4992 | LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCalldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing malloc call: " << *MallocCall << "\n"; } } while (false) |
4993 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing malloc call: " << *MallocCall << "\n"; } } while (false); |
4994 | |
4995 | Align Alignment; |
4996 | Constant *Size; |
4997 | if (isCallocLikeFn(MallocCall, TLI)) { |
4998 | auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); |
4999 | auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); |
5000 | APInt TotalSize = SizeT->getValue() * Num->getValue(); |
5001 | Size = |
5002 | ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); |
5003 | } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { |
5004 | Size = cast<ConstantInt>(MallocCall->getOperand(1)); |
5005 | Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) |
5006 | ->getValue() |
5007 | .getZExtValue()) |
5008 | .valueOrOne(); |
5009 | } else { |
5010 | Size = cast<ConstantInt>(MallocCall->getOperand(0)); |
5011 | } |
5012 | |
5013 | unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); |
5014 | Instruction *AI = |
5015 | new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, |
5016 | "", MallocCall->getNextNode()); |
5017 | |
5018 | if (AI->getType() != MallocCall->getType()) |
5019 | AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", |
5020 | AI->getNextNode()); |
5021 | |
5022 | A.changeValueAfterManifest(*MallocCall, *AI); |
5023 | |
5024 | if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { |
5025 | auto *NBB = II->getNormalDest(); |
5026 | BranchInst::Create(NBB, MallocCall->getParent()); |
5027 | A.deleteAfterManifest(*MallocCall); |
5028 | } else { |
5029 | A.deleteAfterManifest(*MallocCall); |
5030 | } |
5031 | |
5032 | // Zero out the allocated memory if it was a calloc. |
5033 | if (isCallocLikeFn(MallocCall, TLI)) { |
5034 | auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", |
5035 | AI->getNextNode()); |
5036 | Value *Ops[] = { |
5037 | BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, |
5038 | ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; |
5039 | |
5040 | Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; |
5041 | Module *M = F->getParent(); |
5042 | Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); |
5043 | CallInst::Create(Fn, Ops, "", BI->getNextNode()); |
5044 | } |
5045 | HasChanged = ChangeStatus::CHANGED; |
5046 | } |
5047 | |
5048 | return HasChanged; |
5049 | } |
5050 | |
5051 | /// Collection of all malloc calls in a function. |
5052 | SmallSetVector<Instruction *, 4> MallocCalls; |
5053 | |
5054 | /// Collection of malloc calls that cannot be converted. |
5055 | DenseSet<const Instruction *> BadMallocCalls; |
5056 | |
5057 | /// A map for each malloc call to the set of associated free calls. |
5058 | DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; |
5059 | |
5060 | ChangeStatus updateImpl(Attributor &A) override; |
5061 | }; |
5062 | |
5063 | ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { |
5064 | const Function *F = getAnchorScope(); |
5065 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); |
5066 | |
5067 | MustBeExecutedContextExplorer &Explorer = |
5068 | A.getInfoCache().getMustBeExecutedContextExplorer(); |
5069 | |
5070 | auto FreeCheck = [&](Instruction &I) { |
5071 | const auto &Frees = FreesForMalloc.lookup(&I); |
5072 | if (Frees.size() != 1) |
5073 | return false; |
5074 | Instruction *UniqueFree = *Frees.begin(); |
5075 | return Explorer.findInContextOf(UniqueFree, I.getNextNode()); |
5076 | }; |
5077 | |
5078 | auto UsesCheck = [&](Instruction &I) { |
5079 | bool ValidUsesOnly = true; |
5080 | bool MustUse = true; |
5081 | auto Pred = [&](const Use &U, bool &Follow) -> bool { |
5082 | Instruction *UserI = cast<Instruction>(U.getUser()); |
5083 | if (isa<LoadInst>(UserI)) |
5084 | return true; |
5085 | if (auto *SI = dyn_cast<StoreInst>(UserI)) { |
5086 | if (SI->getValueOperand() == U.get()) { |
5087 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n"; } } while (false) |
5088 | << "[H2S] escaping store to memory: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n"; } } while (false); |
5089 | ValidUsesOnly = false; |
5090 | } else { |
5091 | // A store into the malloc'ed memory is fine. |
5092 | } |
5093 | return true; |
5094 | } |
5095 | if (auto *CB = dyn_cast<CallBase>(UserI)) { |
5096 | if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) |
5097 | return true; |
5098 | // Record malloc. |
5099 | if (isFreeCall(UserI, TLI)) { |
5100 | if (MustUse) { |
5101 | FreesForMalloc[&I].insert(UserI); |
5102 | } else { |
5103 | LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: " << *UserI << "\n"; } } while (false) |
5104 | << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] free potentially on different mallocs: " << *UserI << "\n"; } } while (false); |
5105 | ValidUsesOnly = false; |
5106 | } |
5107 | return true; |
5108 | } |
5109 | |
5110 | unsigned ArgNo = CB->getArgOperandNo(&U); |
5111 | |
5112 | const auto &NoCaptureAA = A.getAAFor<AANoCapture>( |
5113 | *this, IRPosition::callsite_argument(*CB, ArgNo)); |
5114 | |
5115 | // If a callsite argument use is nofree, we are fine. |
5116 | const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( |
5117 | *this, IRPosition::callsite_argument(*CB, ArgNo)); |
5118 | |
5119 | if (!NoCaptureAA.isAssumedNoCapture() || |
5120 | !ArgNoFreeAA.isAssumedNoFree()) { |
5121 | LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Bad user: " << *UserI << "\n"; } } while (false); |
5122 | ValidUsesOnly = false; |
5123 | } |
5124 | return true; |
5125 | } |
5126 | |
5127 | if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || |
5128 | isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { |
5129 | MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); |
5130 | Follow = true; |
5131 | return true; |
5132 | } |
5133 | // Unknown user for which we can not track uses further (in a way that |
5134 | // makes sense). |
5135 | LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Unknown user: " << *UserI << "\n"; } } while (false); |
5136 | ValidUsesOnly = false; |
5137 | return true; |
5138 | }; |
5139 | A.checkForAllUses(Pred, *this, I); |
5140 | return ValidUsesOnly; |
5141 | }; |
5142 | |
5143 | auto MallocCallocCheck = [&](Instruction &I) { |
5144 | if (BadMallocCalls.count(&I)) |
5145 | return true; |
5146 | |
5147 | bool IsMalloc = isMallocLikeFn(&I, TLI); |
5148 | bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); |
5149 | bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); |
5150 | if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { |
5151 | BadMallocCalls.insert(&I); |
5152 | return true; |
5153 | } |
5154 | |
5155 | if (IsMalloc) { |
5156 | if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) |
5157 | if (Size->getValue().ule(MaxHeapToStackSize)) |
5158 | if (UsesCheck(I) || FreeCheck(I)) { |
5159 | MallocCalls.insert(&I); |
5160 | return true; |
5161 | } |
5162 | } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { |
5163 | // Only if the alignment and sizes are constant. |
5164 | if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) |
5165 | if (Size->getValue().ule(MaxHeapToStackSize)) |
5166 | if (UsesCheck(I) || FreeCheck(I)) { |
5167 | MallocCalls.insert(&I); |
5168 | return true; |
5169 | } |
5170 | } else if (IsCalloc) { |
5171 | bool Overflow = false; |
5172 | if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) |
5173 | if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) |
5174 | if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) |
5175 | .ule(MaxHeapToStackSize)) |
5176 | if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { |
5177 | MallocCalls.insert(&I); |
5178 | return true; |
5179 | } |
5180 | } |
5181 | |
5182 | BadMallocCalls.insert(&I); |
5183 | return true; |
5184 | }; |
5185 | |
5186 | size_t NumBadMallocs = BadMallocCalls.size(); |
5187 | |
5188 | A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); |
5189 | |
5190 | if (NumBadMallocs != BadMallocCalls.size()) |
5191 | return ChangeStatus::CHANGED; |
5192 | |
5193 | return ChangeStatus::UNCHANGED; |
5194 | } |
5195 | |
5196 | struct AAHeapToStackFunction final : public AAHeapToStackImpl { |
5197 | AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) |
5198 | : AAHeapToStackImpl(IRP, A) {} |
5199 | |
5200 | /// See AbstractAttribute::trackStatistics(). |
5201 | void trackStatistics() const override { |
5202 | STATS_DECL(static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };; |
5203 | MallocCalls, Function,static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };; |
5204 | "Number of malloc/calloc/aligned_alloc calls converted to allocas")static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };;; |
5205 | for (auto *C : MallocCalls) |
5206 | if (!BadMallocCalls.count(C)) |
5207 | ++BUILD_STAT_NAME(MallocCalls, Function)NumIRFunction_MallocCalls; |
5208 | } |
5209 | }; |
5210 | |
5211 | /// ----------------------- Privatizable Pointers ------------------------------ |
5212 | struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { |
5213 | AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) |
5214 | : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} |
5215 | |
5216 | ChangeStatus indicatePessimisticFixpoint() override { |
5217 | AAPrivatizablePtr::indicatePessimisticFixpoint(); |
5218 | PrivatizableType = nullptr; |
5219 | return ChangeStatus::CHANGED; |
5220 | } |
5221 | |
5222 | /// Identify the type we can chose for a private copy of the underlying |
5223 | /// argument. None means it is not clear yet, nullptr means there is none. |
5224 | virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; |
5225 | |
5226 | /// Return a privatizable type that encloses both T0 and T1. |
5227 | /// TODO: This is merely a stub for now as we should manage a mapping as well. |
5228 | Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { |
5229 | if (!T0.hasValue()) |
5230 | return T1; |
5231 | if (!T1.hasValue()) |
5232 | return T0; |
5233 | if (T0 == T1) |
5234 | return T0; |
5235 | return nullptr; |
5236 | } |
5237 | |
5238 | Optional<Type *> getPrivatizableType() const override { |
5239 | return PrivatizableType; |
5240 | } |
5241 | |
5242 | const std::string getAsStr() const override { |
5243 | return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; |
5244 | } |
5245 | |
5246 | protected: |
5247 | Optional<Type *> PrivatizableType; |
5248 | }; |
5249 | |
5250 | // TODO: Do this for call site arguments (probably also other values) as well. |
5251 | |
5252 | struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { |
5253 | AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) |
5254 | : AAPrivatizablePtrImpl(IRP, A) {} |
5255 | |
5256 | /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) |
5257 | Optional<Type *> identifyPrivatizableType(Attributor &A) override { |
5258 | // If this is a byval argument and we know all the call sites (so we can |
5259 | // rewrite them), there is no need to check them explicitly. |
5260 | bool AllCallSitesKnown; |
5261 | if (getIRPosition().hasAttr(Attribute::ByVal) && |
5262 | A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, |
5263 | true, AllCallSitesKnown)) |
5264 | return getAssociatedValue().getType()->getPointerElementType(); |
5265 | |
5266 | Optional<Type *> Ty; |
5267 | unsigned ArgNo = getIRPosition().getCallSiteArgNo(); |
5268 | |
5269 | // Make sure the associated call site argument has the same type at all call |
5270 | // sites and it is an allocation we know is safe to privatize, for now that |
5271 | // means we only allow alloca instructions. |
5272 | // TODO: We can additionally analyze the accesses in the callee to create |
5273 | // the type from that information instead. That is a little more |
5274 | // involved and will be done in a follow up patch. |
5275 | auto CallSiteCheck = [&](AbstractCallSite ACS) { |
5276 | IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); |
5277 | // Check if a coresponding argument was found or if it is one not |
5278 | // associated (which can happen for callback calls). |
5279 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) |
5280 | return false; |
5281 | |
5282 | // Check that all call sites agree on a type. |
5283 | auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); |
5284 | Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); |
5285 | |
5286 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5287 | dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5288 | if (CSTy.hasValue() && CSTy.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5289 | CSTy.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5290 | else if (CSTy.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5291 | dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5292 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5293 | dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false) |
5294 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy.hasValue() && CSTy.getValue()) CSTy.getValue()->print(dbgs()); else if ( CSTy.hasValue()) dbgs() << "<nullptr>"; else dbgs () << "<none>"; }; } } while (false); |
5295 | |
5296 | Ty = combineTypes(Ty, CSTy); |
5297 | |
5298 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5299 | dbgs() << " : New Type: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5300 | if (Ty.hasValue() && Ty.getValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5301 | Ty.getValue()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5302 | else if (Ty.hasValue())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5303 | dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5304 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5305 | dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5306 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false) |
5307 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty.hasValue () && Ty.getValue()) Ty.getValue()->print(dbgs()); else if (Ty.hasValue()) dbgs() << "<nullptr>"; else dbgs() << "<none>"; dbgs() << "\n"; }; } } while (false); |
5308 | |
5309 | return !Ty.hasValue() || Ty.getValue(); |
5310 | }; |
5311 | |
5312 | if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) |
5313 | return nullptr; |
5314 | return Ty; |
5315 | } |
5316 | |
5317 | /// See AbstractAttribute::updateImpl(...). |
5318 | ChangeStatus updateImpl(Attributor &A) override { |
5319 | PrivatizableType = identifyPrivatizableType(A); |
5320 | if (!PrivatizableType.hasValue()) |
5321 | return ChangeStatus::UNCHANGED; |
5322 | if (!PrivatizableType.getValue()) |
5323 | return indicatePessimisticFixpoint(); |
5324 | |
5325 | // The dependence is optional so we don't give up once we give up on the |
5326 | // alignment. |
5327 | A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), |
5328 | /* TrackDependence */ true, DepClassTy::OPTIONAL); |
5329 | |
5330 | // Avoid arguments with padding for now. |
5331 | if (!getIRPosition().hasAttr(Attribute::ByVal) && |
5332 | !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), |
5333 | A.getInfoCache().getDL())) { |
5334 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Padding detected\n" ; } } while (false); |
5335 | return indicatePessimisticFixpoint(); |
5336 | } |
5337 | |
5338 | // Verify callee and caller agree on how the promoted argument would be |
5339 | // passed. |
5340 | // TODO: The use of the ArgumentPromotion interface here is ugly, we need a |
5341 | // specialized form of TargetTransformInfo::areFunctionArgsABICompatible |
5342 | // which doesn't require the arguments ArgumentPromotion wanted to pass. |
5343 | Function &Fn = *getIRPosition().getAnchorScope(); |
5344 | SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; |
5345 | ArgsToPromote.insert(getAssociatedArgument()); |
5346 | const auto *TTI = |
5347 | A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); |
5348 | if (!TTI || |
5349 | !ArgumentPromotionPass::areFunctionArgsABICompatible( |
5350 | Fn, *TTI, ArgsToPromote, Dummy) || |
5351 | ArgsToPromote.empty()) { |
5352 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false) |
5353 | dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false) |
5354 | << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false); |
5355 | return indicatePessimisticFixpoint(); |
5356 | } |
5357 | |
5358 | // Collect the types that will replace the privatizable type in the function |
5359 | // signature. |
5360 | SmallVector<Type *, 16> ReplacementTypes; |
5361 | identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); |
5362 | |
5363 | // Register a rewrite of the argument. |
5364 | Argument *Arg = getAssociatedArgument(); |
5365 | if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { |
5366 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n" ; } } while (false); |
5367 | return indicatePessimisticFixpoint(); |
5368 | } |
5369 | |
5370 | unsigned ArgNo = Arg->getArgNo(); |
5371 | |
5372 | // Helper to check if for the given call site the associated argument is |
5373 | // passed to a callback where the privatization would be different. |
5374 | auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { |
5375 | SmallVector<const Use *, 4> CallbackUses; |
5376 | AbstractCallSite::getCallbackUses(CB, CallbackUses); |
5377 | for (const Use *U : CallbackUses) { |
5378 | AbstractCallSite CBACS(U); |
5379 | assert(CBACS && CBACS.isCallbackCall())((CBACS && CBACS.isCallbackCall()) ? static_cast<void > (0) : __assert_fail ("CBACS && CBACS.isCallbackCall()" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 5379, __PRETTY_FUNCTION__)); |
5380 | for (Argument &CBArg : CBACS.getCalledFunction()->args()) { |
5381 | int CBArgNo = CBACS.getCallArgOperandNo(CBArg); |
5382 | |
5383 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5384 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5385 | << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5386 | << "check if can be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5387 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5388 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5389 | "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5390 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5391 | << ")\n[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5392 | << CBACS.getCallArgOperand(CBArg) << " vs "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5393 | << CB.getArgOperand(ArgNo) << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5394 | << "[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5395 | << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) |
5396 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false); |
5397 | |
5398 | if (CBArgNo != int(ArgNo)) |
5399 | continue; |
5400 | const auto &CBArgPrivAA = |
5401 | A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); |
5402 | if (CBArgPrivAA.isValidState()) { |
5403 | auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); |
5404 | if (!CBArgPrivTy.hasValue()) |
5405 | continue; |
5406 | if (CBArgPrivTy.getValue() == PrivatizableType) |
5407 | continue; |
5408 | } |
5409 | |
5410 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5411 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5412 | << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5413 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5414 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5415 | "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5416 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5417 | << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5418 | "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5419 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false); |
5420 | return false; |
5421 | } |
5422 | } |
5423 | return true; |
5424 | }; |
5425 | |
5426 | // Helper to check if for the given call site the associated argument is |
5427 | // passed to a direct call where the privatization would be different. |
5428 | auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { |
5429 | CallBase *DC = cast<CallBase>(ACS.getInstruction()); |
5430 | int DCArgNo = ACS.getCallArgOperandNo(ArgNo); |
5431 | assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&((DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands () && "Expected a direct call operand for callback call operand" ) ? static_cast<void> (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && \"Expected a direct call operand for callback call operand\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 5432, __PRETTY_FUNCTION__)) |
5432 | "Expected a direct call operand for callback call operand")((DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands () && "Expected a direct call operand for callback call operand" ) ? static_cast<void> (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && \"Expected a direct call operand for callback call operand\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 5432, __PRETTY_FUNCTION__)); |
5433 | |
5434 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5435 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5436 | << " check if be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5437 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5438 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5439 | "direct call of ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5440 | << DCArgNo << "@" << DC->getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5441 | << ").\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) |
5442 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false); |
5443 | |
5444 | Function *DCCallee = DC->getCalledFunction(); |
5445 | if (unsigned(DCArgNo) < DCCallee->arg_size()) { |
5446 | const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( |
5447 | *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); |
5448 | if (DCArgPrivAA.isValidState()) { |
5449 | auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); |
5450 | if (!DCArgPrivTy.hasValue()) |
5451 | return true; |
5452 | if (DCArgPrivTy.getValue() == PrivatizableType) |
5453 | return true; |
5454 | } |
5455 | } |
5456 | |
5457 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) |
5458 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstru |