File: | build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/Analysis/ValueTracking.h |
Warning: | line 281, column 49 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // See the Attributor.h file comment and the class descriptions in that file for | ||||
10 | // more information. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "llvm/Transforms/IPO/Attributor.h" | ||||
15 | |||||
16 | #include "llvm/ADT/APInt.h" | ||||
17 | #include "llvm/ADT/DenseMapInfo.h" | ||||
18 | #include "llvm/ADT/MapVector.h" | ||||
19 | #include "llvm/ADT/SCCIterator.h" | ||||
20 | #include "llvm/ADT/STLExtras.h" | ||||
21 | #include "llvm/ADT/SetOperations.h" | ||||
22 | #include "llvm/ADT/SetVector.h" | ||||
23 | #include "llvm/ADT/SmallPtrSet.h" | ||||
24 | #include "llvm/ADT/SmallVector.h" | ||||
25 | #include "llvm/ADT/Statistic.h" | ||||
26 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
27 | #include "llvm/Analysis/AssumeBundleQueries.h" | ||||
28 | #include "llvm/Analysis/AssumptionCache.h" | ||||
29 | #include "llvm/Analysis/CaptureTracking.h" | ||||
30 | #include "llvm/Analysis/InstructionSimplify.h" | ||||
31 | #include "llvm/Analysis/LazyValueInfo.h" | ||||
32 | #include "llvm/Analysis/MemoryBuiltins.h" | ||||
33 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||
34 | #include "llvm/Analysis/ScalarEvolution.h" | ||||
35 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||
36 | #include "llvm/Analysis/ValueTracking.h" | ||||
37 | #include "llvm/IR/Argument.h" | ||||
38 | #include "llvm/IR/Assumptions.h" | ||||
39 | #include "llvm/IR/BasicBlock.h" | ||||
40 | #include "llvm/IR/Constant.h" | ||||
41 | #include "llvm/IR/Constants.h" | ||||
42 | #include "llvm/IR/DataLayout.h" | ||||
43 | #include "llvm/IR/DerivedTypes.h" | ||||
44 | #include "llvm/IR/GlobalValue.h" | ||||
45 | #include "llvm/IR/IRBuilder.h" | ||||
46 | #include "llvm/IR/InstrTypes.h" | ||||
47 | #include "llvm/IR/Instruction.h" | ||||
48 | #include "llvm/IR/Instructions.h" | ||||
49 | #include "llvm/IR/IntrinsicInst.h" | ||||
50 | #include "llvm/IR/NoFolder.h" | ||||
51 | #include "llvm/IR/Value.h" | ||||
52 | #include "llvm/IR/ValueHandle.h" | ||||
53 | #include "llvm/Support/Alignment.h" | ||||
54 | #include "llvm/Support/Casting.h" | ||||
55 | #include "llvm/Support/CommandLine.h" | ||||
56 | #include "llvm/Support/ErrorHandling.h" | ||||
57 | #include "llvm/Support/GraphWriter.h" | ||||
58 | #include "llvm/Support/MathExtras.h" | ||||
59 | #include "llvm/Support/raw_ostream.h" | ||||
60 | #include "llvm/Transforms/Utils/Local.h" | ||||
61 | #include "llvm/Transforms/Utils/ValueMapper.h" | ||||
62 | #include <cassert> | ||||
63 | #include <numeric> | ||||
64 | |||||
65 | using namespace llvm; | ||||
66 | |||||
67 | #define DEBUG_TYPE"attributor" "attributor" | ||||
68 | |||||
69 | static cl::opt<bool> ManifestInternal( | ||||
70 | "attributor-manifest-internal", cl::Hidden, | ||||
71 | cl::desc("Manifest Attributor internal string attributes."), | ||||
72 | cl::init(false)); | ||||
73 | |||||
74 | static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), | ||||
75 | cl::Hidden); | ||||
76 | |||||
77 | template <> | ||||
78 | unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; | ||||
79 | |||||
80 | template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1; | ||||
81 | |||||
82 | static cl::opt<unsigned, true> MaxPotentialValues( | ||||
83 | "attributor-max-potential-values", cl::Hidden, | ||||
84 | cl::desc("Maximum number of potential values to be " | ||||
85 | "tracked for each position."), | ||||
86 | cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), | ||||
87 | cl::init(7)); | ||||
88 | |||||
89 | static cl::opt<int> MaxPotentialValuesIterations( | ||||
90 | "attributor-max-potential-values-iterations", cl::Hidden, | ||||
91 | cl::desc( | ||||
92 | "Maximum number of iterations we keep dismantling potential values."), | ||||
93 | cl::init(64)); | ||||
94 | |||||
95 | static cl::opt<unsigned> MaxInterferingAccesses( | ||||
96 | "attributor-max-interfering-accesses", cl::Hidden, | ||||
97 | cl::desc("Maximum number of interfering accesses to " | ||||
98 | "check before assuming all might interfere."), | ||||
99 | cl::init(6)); | ||||
100 | |||||
101 | STATISTIC(NumAAs, "Number of abstract attributes created")static llvm::Statistic NumAAs = {"attributor", "NumAAs", "Number of abstract attributes created" }; | ||||
102 | |||||
103 | // Some helper macros to deal with statistics tracking. | ||||
104 | // | ||||
105 | // Usage: | ||||
106 | // For simple IR attribute tracking overload trackStatistics in the abstract | ||||
107 | // attribute and choose the right STATS_DECLTRACK_********* macro, | ||||
108 | // e.g.,: | ||||
109 | // void trackStatistics() const override { | ||||
110 | // STATS_DECLTRACK_ARG_ATTR(returned) | ||||
111 | // } | ||||
112 | // If there is a single "increment" side one can use the macro | ||||
113 | // STATS_DECLTRACK with a custom message. If there are multiple increment | ||||
114 | // sides, STATS_DECL and STATS_TRACK can also be used separately. | ||||
115 | // | ||||
116 | #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)("Number of " "TYPE" " marked '" "NAME" "'") \ | ||||
117 | ("Number of " #TYPE " marked '" #NAME "'") | ||||
118 | #define BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME NumIR##TYPE##_##NAME | ||||
119 | #define STATS_DECL_(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; STATISTIC(NAME, MSG)static llvm::Statistic NAME = {"attributor", "NAME", MSG}; | ||||
120 | #define STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; \ | ||||
121 | STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; | ||||
122 | #define STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); ++(BUILD_STAT_NAME(NAME, TYPE)NumIRTYPE_NAME); | ||||
123 | #define STATS_DECLTRACK(NAME, TYPE, MSG){ static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; ++(NumIRTYPE_NAME); } \ | ||||
124 | { \ | ||||
125 | STATS_DECL(NAME, TYPE, MSG)static llvm::Statistic NumIRTYPE_NAME = {"attributor", "NumIRTYPE_NAME" , MSG};; \ | ||||
126 | STATS_TRACK(NAME, TYPE)++(NumIRTYPE_NAME); \ | ||||
127 | } | ||||
128 | #define STATS_DECLTRACK_ARG_ATTR(NAME){ static llvm::Statistic NumIRArguments_NAME = {"attributor", "NumIRArguments_NAME", ("Number of " "arguments" " marked '" "NAME" "'")};; ++(NumIRArguments_NAME); } \ | ||||
129 | STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)){ static llvm::Statistic NumIRArguments_NAME = {"attributor", "NumIRArguments_NAME", ("Number of " "arguments" " marked '" "NAME" "'")};; ++(NumIRArguments_NAME); } | ||||
130 | #define STATS_DECLTRACK_CSARG_ATTR(NAME){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } \ | ||||
131 | STATS_DECLTRACK(NAME, CSArguments, \{ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } | ||||
132 | BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)){ static llvm::Statistic NumIRCSArguments_NAME = {"attributor" , "NumIRCSArguments_NAME", ("Number of " "call site arguments" " marked '" "NAME" "'")};; ++(NumIRCSArguments_NAME); } | ||||
133 | #define STATS_DECLTRACK_FN_ATTR(NAME){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME" , ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME ); } \ | ||||
134 | STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)){ static llvm::Statistic NumIRFunction_NAME = {"attributor", "NumIRFunction_NAME" , ("Number of " "functions" " marked '" "NAME" "'")};; ++(NumIRFunction_NAME ); } | ||||
135 | #define STATS_DECLTRACK_CS_ATTR(NAME){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME" , ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME ); } \ | ||||
136 | STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)){ static llvm::Statistic NumIRCS_NAME = {"attributor", "NumIRCS_NAME" , ("Number of " "call site" " marked '" "NAME" "'")};; ++(NumIRCS_NAME ); } | ||||
137 | #define STATS_DECLTRACK_FNRET_ATTR(NAME){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } \ | ||||
138 | STATS_DECLTRACK(NAME, FunctionReturn, \{ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } | ||||
139 | BUILD_STAT_MSG_IR_ATTR(function returns, NAME)){ static llvm::Statistic NumIRFunctionReturn_NAME = {"attributor" , "NumIRFunctionReturn_NAME", ("Number of " "function returns" " marked '" "NAME" "'")};; ++(NumIRFunctionReturn_NAME); } | ||||
140 | #define STATS_DECLTRACK_CSRET_ATTR(NAME){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } \ | ||||
141 | STATS_DECLTRACK(NAME, CSReturn, \{ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } | ||||
142 | BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)){ static llvm::Statistic NumIRCSReturn_NAME = {"attributor", "NumIRCSReturn_NAME" , ("Number of " "call site returns" " marked '" "NAME" "'")}; ; ++(NumIRCSReturn_NAME); } | ||||
143 | #define STATS_DECLTRACK_FLOATING_ATTR(NAME){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" "NAME" "'")};; ++ (NumIRFloating_NAME); } \ | ||||
144 | STATS_DECLTRACK(NAME, Floating, \{ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" #NAME "'")};; ++ (NumIRFloating_NAME); } | ||||
145 | ("Number of floating values known to be '" #NAME "'")){ static llvm::Statistic NumIRFloating_NAME = {"attributor", "NumIRFloating_NAME" , ("Number of floating values known to be '" #NAME "'")};; ++ (NumIRFloating_NAME); } | ||||
146 | |||||
147 | // Specialization of the operator<< for abstract attributes subclasses. This | ||||
148 | // disambiguates situations where multiple operators are applicable. | ||||
149 | namespace llvm { | ||||
150 | #define PIPE_OPERATOR(CLASS) \ | ||||
151 | raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ | ||||
152 | return OS << static_cast<const AbstractAttribute &>(AA); \ | ||||
153 | } | ||||
154 | |||||
155 | PIPE_OPERATOR(AAIsDead) | ||||
156 | PIPE_OPERATOR(AANoUnwind) | ||||
157 | PIPE_OPERATOR(AANoSync) | ||||
158 | PIPE_OPERATOR(AANoRecurse) | ||||
159 | PIPE_OPERATOR(AAWillReturn) | ||||
160 | PIPE_OPERATOR(AANoReturn) | ||||
161 | PIPE_OPERATOR(AAReturnedValues) | ||||
162 | PIPE_OPERATOR(AANonNull) | ||||
163 | PIPE_OPERATOR(AANoAlias) | ||||
164 | PIPE_OPERATOR(AADereferenceable) | ||||
165 | PIPE_OPERATOR(AAAlign) | ||||
166 | PIPE_OPERATOR(AAInstanceInfo) | ||||
167 | PIPE_OPERATOR(AANoCapture) | ||||
168 | PIPE_OPERATOR(AAValueSimplify) | ||||
169 | PIPE_OPERATOR(AANoFree) | ||||
170 | PIPE_OPERATOR(AAHeapToStack) | ||||
171 | PIPE_OPERATOR(AAReachability) | ||||
172 | PIPE_OPERATOR(AAMemoryBehavior) | ||||
173 | PIPE_OPERATOR(AAMemoryLocation) | ||||
174 | PIPE_OPERATOR(AAValueConstantRange) | ||||
175 | PIPE_OPERATOR(AAPrivatizablePtr) | ||||
176 | PIPE_OPERATOR(AAUndefinedBehavior) | ||||
177 | PIPE_OPERATOR(AAPotentialConstantValues) | ||||
178 | PIPE_OPERATOR(AAPotentialValues) | ||||
179 | PIPE_OPERATOR(AANoUndef) | ||||
180 | PIPE_OPERATOR(AACallEdges) | ||||
181 | PIPE_OPERATOR(AAFunctionReachability) | ||||
182 | PIPE_OPERATOR(AAPointerInfo) | ||||
183 | PIPE_OPERATOR(AAAssumptionInfo) | ||||
184 | |||||
185 | #undef PIPE_OPERATOR | ||||
186 | |||||
187 | template <> | ||||
188 | ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, | ||||
189 | const DerefState &R) { | ||||
190 | ChangeStatus CS0 = | ||||
191 | clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); | ||||
192 | ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); | ||||
193 | return CS0 | CS1; | ||||
194 | } | ||||
195 | |||||
196 | } // namespace llvm | ||||
197 | |||||
198 | /// Checks if a type could have padding bytes. | ||||
199 | static bool isDenselyPacked(Type *Ty, const DataLayout &DL) { | ||||
200 | // There is no size information, so be conservative. | ||||
201 | if (!Ty->isSized()) | ||||
202 | return false; | ||||
203 | |||||
204 | // If the alloc size is not equal to the storage size, then there are padding | ||||
205 | // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. | ||||
206 | if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty)) | ||||
207 | return false; | ||||
208 | |||||
209 | // FIXME: This isn't the right way to check for padding in vectors with | ||||
210 | // non-byte-size elements. | ||||
211 | if (VectorType *SeqTy = dyn_cast<VectorType>(Ty)) | ||||
212 | return isDenselyPacked(SeqTy->getElementType(), DL); | ||||
213 | |||||
214 | // For array types, check for padding within members. | ||||
215 | if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty)) | ||||
216 | return isDenselyPacked(SeqTy->getElementType(), DL); | ||||
217 | |||||
218 | if (!isa<StructType>(Ty)) | ||||
219 | return true; | ||||
220 | |||||
221 | // Check for padding within and between elements of a struct. | ||||
222 | StructType *StructTy = cast<StructType>(Ty); | ||||
223 | const StructLayout *Layout = DL.getStructLayout(StructTy); | ||||
224 | uint64_t StartPos = 0; | ||||
225 | for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) { | ||||
226 | Type *ElTy = StructTy->getElementType(I); | ||||
227 | if (!isDenselyPacked(ElTy, DL)) | ||||
228 | return false; | ||||
229 | if (StartPos != Layout->getElementOffsetInBits(I)) | ||||
230 | return false; | ||||
231 | StartPos += DL.getTypeAllocSizeInBits(ElTy); | ||||
232 | } | ||||
233 | |||||
234 | return true; | ||||
235 | } | ||||
236 | |||||
237 | /// Get pointer operand of memory accessing instruction. If \p I is | ||||
238 | /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, | ||||
239 | /// is set to false and the instruction is volatile, return nullptr. | ||||
240 | static const Value *getPointerOperand(const Instruction *I, | ||||
241 | bool AllowVolatile) { | ||||
242 | if (!AllowVolatile && I->isVolatile()) | ||||
243 | return nullptr; | ||||
244 | |||||
245 | if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
246 | return LI->getPointerOperand(); | ||||
247 | } | ||||
248 | |||||
249 | if (auto *SI = dyn_cast<StoreInst>(I)) { | ||||
250 | return SI->getPointerOperand(); | ||||
251 | } | ||||
252 | |||||
253 | if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { | ||||
254 | return CXI->getPointerOperand(); | ||||
255 | } | ||||
256 | |||||
257 | if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { | ||||
258 | return RMWI->getPointerOperand(); | ||||
259 | } | ||||
260 | |||||
261 | return nullptr; | ||||
262 | } | ||||
263 | |||||
264 | /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and | ||||
265 | /// advanced by \p Offset bytes. To aid later analysis the method tries to build | ||||
266 | /// getelement pointer instructions that traverse the natural type of \p Ptr if | ||||
267 | /// possible. If that fails, the remaining offset is adjusted byte-wise, hence | ||||
268 | /// through a cast to i8*. | ||||
269 | /// | ||||
270 | /// TODO: This could probably live somewhere more prominantly if it doesn't | ||||
271 | /// already exist. | ||||
272 | static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, | ||||
273 | int64_t Offset, IRBuilder<NoFolder> &IRB, | ||||
274 | const DataLayout &DL) { | ||||
275 | assert(Offset >= 0 && "Negative offset not supported yet!")(static_cast <bool> (Offset >= 0 && "Negative offset not supported yet!" ) ? void (0) : __assert_fail ("Offset >= 0 && \"Negative offset not supported yet!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 275, __extension__ __PRETTY_FUNCTION__)); | ||||
276 | LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offsetdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Construct pointer: " << *Ptr << " + " << Offset << "-bytes as " << *ResTy << "\n"; } } while (false) | ||||
277 | << "-bytes as " << *ResTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Construct pointer: " << *Ptr << " + " << Offset << "-bytes as " << *ResTy << "\n"; } } while (false); | ||||
278 | |||||
279 | if (Offset) { | ||||
280 | Type *Ty = PtrElemTy; | ||||
281 | APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); | ||||
282 | SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); | ||||
283 | |||||
284 | SmallVector<Value *, 4> ValIndices; | ||||
285 | std::string GEPName = Ptr->getName().str(); | ||||
286 | for (const APInt &Index : IntIndices) { | ||||
287 | ValIndices.push_back(IRB.getInt(Index)); | ||||
288 | GEPName += "." + std::to_string(Index.getZExtValue()); | ||||
289 | } | ||||
290 | |||||
291 | // Create a GEP for the indices collected above. | ||||
292 | Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); | ||||
293 | |||||
294 | // If an offset is left we use byte-wise adjustment. | ||||
295 | if (IntOffset != 0) { | ||||
296 | Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); | ||||
297 | Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), | ||||
298 | GEPName + ".b" + Twine(IntOffset.getZExtValue())); | ||||
299 | } | ||||
300 | } | ||||
301 | |||||
302 | // Ensure the result has the requested type. | ||||
303 | Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, | ||||
304 | Ptr->getName() + ".cast"); | ||||
305 | |||||
306 | LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Constructed pointer: " << *Ptr << "\n"; } } while (false); | ||||
307 | return Ptr; | ||||
308 | } | ||||
309 | |||||
310 | bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, | ||||
311 | SmallSetVector<Value *, 8> &Objects, | ||||
312 | const AbstractAttribute &QueryingAA, | ||||
313 | const Instruction *CtxI, | ||||
314 | bool &UsedAssumedInformation, | ||||
315 | AA::ValueScope S, | ||||
316 | SmallPtrSetImpl<Value *> *SeenObjects) { | ||||
317 | SmallPtrSet<Value *, 8> LocalSeenObjects; | ||||
318 | if (!SeenObjects) | ||||
319 | SeenObjects = &LocalSeenObjects; | ||||
320 | |||||
321 | SmallVector<AA::ValueAndContext> Values; | ||||
322 | if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values, | ||||
323 | S, UsedAssumedInformation)) { | ||||
324 | Objects.insert(const_cast<Value *>(&Ptr)); | ||||
325 | return true; | ||||
326 | } | ||||
327 | |||||
328 | for (auto &VAC : Values) { | ||||
329 | Value *UO = getUnderlyingObject(VAC.getValue()); | ||||
330 | if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) { | ||||
331 | if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA, | ||||
332 | VAC.getCtxI(), UsedAssumedInformation, S, | ||||
333 | SeenObjects)) | ||||
334 | return false; | ||||
335 | continue; | ||||
336 | } | ||||
337 | Objects.insert(VAC.getValue()); | ||||
338 | } | ||||
339 | return true; | ||||
340 | } | ||||
341 | |||||
342 | static const Value * | ||||
343 | stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, | ||||
344 | const Value *Val, const DataLayout &DL, APInt &Offset, | ||||
345 | bool GetMinOffset, bool AllowNonInbounds, | ||||
346 | bool UseAssumed = false) { | ||||
347 | |||||
348 | auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { | ||||
349 | const IRPosition &Pos = IRPosition::value(V); | ||||
350 | // Only track dependence if we are going to use the assumed info. | ||||
351 | const AAValueConstantRange &ValueConstantRangeAA = | ||||
352 | A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, | ||||
353 | UseAssumed ? DepClassTy::OPTIONAL | ||||
354 | : DepClassTy::NONE); | ||||
355 | ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() | ||||
356 | : ValueConstantRangeAA.getKnown(); | ||||
357 | if (Range.isFullSet()) | ||||
358 | return false; | ||||
359 | |||||
360 | // We can only use the lower part of the range because the upper part can | ||||
361 | // be higher than what the value can really be. | ||||
362 | if (GetMinOffset) | ||||
363 | ROffset = Range.getSignedMin(); | ||||
364 | else | ||||
365 | ROffset = Range.getSignedMax(); | ||||
366 | return true; | ||||
367 | }; | ||||
368 | |||||
369 | return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, | ||||
370 | /* AllowInvariant */ true, | ||||
371 | AttributorAnalysis); | ||||
372 | } | ||||
373 | |||||
374 | static const Value * | ||||
375 | getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, | ||||
376 | const Value *Ptr, int64_t &BytesOffset, | ||||
377 | const DataLayout &DL, bool AllowNonInbounds = false) { | ||||
378 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); | ||||
379 | const Value *Base = | ||||
380 | stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, | ||||
381 | /* GetMinOffset */ true, AllowNonInbounds); | ||||
382 | |||||
383 | BytesOffset = OffsetAPInt.getSExtValue(); | ||||
384 | return Base; | ||||
385 | } | ||||
386 | |||||
387 | /// Clamp the information known for all returned values of a function | ||||
388 | /// (identified by \p QueryingAA) into \p S. | ||||
389 | template <typename AAType, typename StateType = typename AAType::StateType> | ||||
390 | static void clampReturnedValueStates( | ||||
391 | Attributor &A, const AAType &QueryingAA, StateType &S, | ||||
392 | const IRPosition::CallBaseContext *CBContext = nullptr) { | ||||
393 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp return value states for " << QueryingAA << " into " << S << "\n" ; } } while (false) | ||||
394 | << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp return value states for " << QueryingAA << " into " << S << "\n" ; } } while (false); | ||||
395 | |||||
396 | assert((QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)) | ||||
397 | IRPosition::IRP_RETURNED ||(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)) | ||||
398 | QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)) | ||||
399 | IRPosition::IRP_CALL_SITE_RETURNED) &&(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)) | ||||
400 | "Can only clamp returned value states for a function returned or call "(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)) | ||||
401 | "site returned position!")(static_cast <bool> ((QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition(). getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && "Can only clamp returned value states for a function returned or call " "site returned position!") ? void (0) : __assert_fail ("(QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_RETURNED || QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) && \"Can only clamp returned value states for a function returned or call \" \"site returned position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 401, __extension__ __PRETTY_FUNCTION__)); | ||||
402 | |||||
403 | // Use an optional state as there might not be any return values and we want | ||||
404 | // to join (IntegerState::operator&) the state of all there are. | ||||
405 | Optional<StateType> T; | ||||
406 | |||||
407 | // Callback for each possibly returned value. | ||||
408 | auto CheckReturnValue = [&](Value &RV) -> bool { | ||||
409 | const IRPosition &RVPos = IRPosition::value(RV, CBContext); | ||||
410 | const AAType &AA = | ||||
411 | A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); | ||||
412 | LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() << " @ " << RVPos << "\n"; } } while (false) | ||||
413 | << " @ " << RVPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() << " @ " << RVPos << "\n"; } } while (false); | ||||
414 | const StateType &AAS = AA.getState(); | ||||
415 | if (!T) | ||||
416 | T = StateType::getBestState(AAS); | ||||
417 | *T &= AAS; | ||||
418 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T << "\n"; } } while (false) | ||||
419 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T << "\n"; } } while (false); | ||||
420 | return T->isValidState(); | ||||
421 | }; | ||||
422 | |||||
423 | if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) | ||||
424 | S.indicatePessimisticFixpoint(); | ||||
425 | else if (T) | ||||
426 | S ^= *T; | ||||
427 | } | ||||
428 | |||||
429 | namespace { | ||||
430 | /// Helper class for generic deduction: return value -> returned position. | ||||
431 | template <typename AAType, typename BaseType, | ||||
432 | typename StateType = typename BaseType::StateType, | ||||
433 | bool PropagateCallBaseContext = false> | ||||
434 | struct AAReturnedFromReturnedValues : public BaseType { | ||||
435 | AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) | ||||
436 | : BaseType(IRP, A) {} | ||||
437 | |||||
438 | /// See AbstractAttribute::updateImpl(...). | ||||
439 | ChangeStatus updateImpl(Attributor &A) override { | ||||
440 | StateType S(StateType::getBestState(this->getState())); | ||||
441 | clampReturnedValueStates<AAType, StateType>( | ||||
442 | A, *this, S, | ||||
443 | PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); | ||||
444 | // TODO: If we know we visited all returned values, thus no are assumed | ||||
445 | // dead, we can take the known information from the state T. | ||||
446 | return clampStateAndIndicateChange<StateType>(this->getState(), S); | ||||
447 | } | ||||
448 | }; | ||||
449 | |||||
450 | /// Clamp the information known at all call sites for a given argument | ||||
451 | /// (identified by \p QueryingAA) into \p S. | ||||
452 | template <typename AAType, typename StateType = typename AAType::StateType> | ||||
453 | static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, | ||||
454 | StateType &S) { | ||||
455 | LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for " << QueryingAA << " into " << S << "\n" ; } } while (false) | ||||
456 | << QueryingAA << " into " << S << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Clamp call site argument states for " << QueryingAA << " into " << S << "\n" ; } } while (false); | ||||
457 | |||||
458 | assert(QueryingAA.getIRPosition().getPositionKind() ==(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__ __PRETTY_FUNCTION__)) | ||||
459 | IRPosition::IRP_ARGUMENT &&(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__ __PRETTY_FUNCTION__)) | ||||
460 | "Can only clamp call site argument states for an argument position!")(static_cast <bool> (QueryingAA.getIRPosition().getPositionKind () == IRPosition::IRP_ARGUMENT && "Can only clamp call site argument states for an argument position!" ) ? void (0) : __assert_fail ("QueryingAA.getIRPosition().getPositionKind() == IRPosition::IRP_ARGUMENT && \"Can only clamp call site argument states for an argument position!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 460, __extension__ __PRETTY_FUNCTION__)); | ||||
461 | |||||
462 | // Use an optional state as there might not be any return values and we want | ||||
463 | // to join (IntegerState::operator&) the state of all there are. | ||||
464 | Optional<StateType> T; | ||||
465 | |||||
466 | // The argument number which is also the call site argument number. | ||||
467 | unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); | ||||
468 | |||||
469 | auto CallSiteCheck = [&](AbstractCallSite ACS) { | ||||
470 | const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); | ||||
471 | // Check if a coresponding argument was found or if it is on not associated | ||||
472 | // (which can happen for callback calls). | ||||
473 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) | ||||
474 | return false; | ||||
475 | |||||
476 | const AAType &AA = | ||||
477 | A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); | ||||
478 | LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() << " AA: " << AA.getAsStr( ) << " @" << ACSArgPos << "\n"; } } while ( false) | ||||
479 | << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() << " AA: " << AA.getAsStr( ) << " @" << ACSArgPos << "\n"; } } while ( false); | ||||
480 | const StateType &AAS = AA.getState(); | ||||
481 | if (!T) | ||||
482 | T = StateType::getBestState(AAS); | ||||
483 | *T &= AAS; | ||||
484 | LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << Tdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T << "\n"; } } while (false) | ||||
485 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T << "\n"; } } while (false); | ||||
486 | return T->isValidState(); | ||||
487 | }; | ||||
488 | |||||
489 | bool UsedAssumedInformation = false; | ||||
490 | if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, | ||||
491 | UsedAssumedInformation)) | ||||
492 | S.indicatePessimisticFixpoint(); | ||||
493 | else if (T) | ||||
494 | S ^= *T; | ||||
495 | } | ||||
496 | |||||
497 | /// This function is the bridge between argument position and the call base | ||||
498 | /// context. | ||||
499 | template <typename AAType, typename BaseType, | ||||
500 | typename StateType = typename AAType::StateType> | ||||
501 | bool getArgumentStateFromCallBaseContext(Attributor &A, | ||||
502 | BaseType &QueryingAttribute, | ||||
503 | IRPosition &Pos, StateType &State) { | ||||
504 | assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&(static_cast <bool> ((Pos.getPositionKind() == IRPosition ::IRP_ARGUMENT) && "Expected an 'argument' position !" ) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 505, __extension__ __PRETTY_FUNCTION__)) | ||||
505 | "Expected an 'argument' position !")(static_cast <bool> ((Pos.getPositionKind() == IRPosition ::IRP_ARGUMENT) && "Expected an 'argument' position !" ) ? void (0) : __assert_fail ("(Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && \"Expected an 'argument' position !\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 505, __extension__ __PRETTY_FUNCTION__)); | ||||
506 | const CallBase *CBContext = Pos.getCallBaseContext(); | ||||
507 | if (!CBContext) | ||||
508 | return false; | ||||
509 | |||||
510 | int ArgNo = Pos.getCallSiteArgNo(); | ||||
511 | assert(ArgNo >= 0 && "Invalid Arg No!")(static_cast <bool> (ArgNo >= 0 && "Invalid Arg No!" ) ? void (0) : __assert_fail ("ArgNo >= 0 && \"Invalid Arg No!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 511, __extension__ __PRETTY_FUNCTION__)); | ||||
512 | |||||
513 | const auto &AA = A.getAAFor<AAType>( | ||||
514 | QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), | ||||
515 | DepClassTy::REQUIRED); | ||||
516 | const StateType &CBArgumentState = | ||||
517 | static_cast<const StateType &>(AA.getState()); | ||||
518 | |||||
519 | LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument" << "Position:" << Pos << "CB Arg state:" << CBArgumentState << "\n"; } } while (false) | ||||
520 | << "Position:" << Pos << "CB Arg state:" << CBArgumentStatedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument" << "Position:" << Pos << "CB Arg state:" << CBArgumentState << "\n"; } } while (false) | ||||
521 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Briding Call site context to argument" << "Position:" << Pos << "CB Arg state:" << CBArgumentState << "\n"; } } while (false); | ||||
522 | |||||
523 | // NOTE: If we want to do call site grouping it should happen here. | ||||
524 | State ^= CBArgumentState; | ||||
525 | return true; | ||||
526 | } | ||||
527 | |||||
528 | /// Helper class for generic deduction: call site argument -> argument position. | ||||
529 | template <typename AAType, typename BaseType, | ||||
530 | typename StateType = typename AAType::StateType, | ||||
531 | bool BridgeCallBaseContext = false> | ||||
532 | struct AAArgumentFromCallSiteArguments : public BaseType { | ||||
533 | AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) | ||||
534 | : BaseType(IRP, A) {} | ||||
535 | |||||
536 | /// See AbstractAttribute::updateImpl(...). | ||||
537 | ChangeStatus updateImpl(Attributor &A) override { | ||||
538 | StateType S = StateType::getBestState(this->getState()); | ||||
539 | |||||
540 | if (BridgeCallBaseContext) { | ||||
541 | bool Success = | ||||
542 | getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( | ||||
543 | A, *this, this->getIRPosition(), S); | ||||
544 | if (Success) | ||||
545 | return clampStateAndIndicateChange<StateType>(this->getState(), S); | ||||
546 | } | ||||
547 | clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); | ||||
548 | |||||
549 | // TODO: If we know we visited all incoming values, thus no are assumed | ||||
550 | // dead, we can take the known information from the state T. | ||||
551 | return clampStateAndIndicateChange<StateType>(this->getState(), S); | ||||
552 | } | ||||
553 | }; | ||||
554 | |||||
555 | /// Helper class for generic replication: function returned -> cs returned. | ||||
556 | template <typename AAType, typename BaseType, | ||||
557 | typename StateType = typename BaseType::StateType, | ||||
558 | bool IntroduceCallBaseContext = false> | ||||
559 | struct AACallSiteReturnedFromReturned : public BaseType { | ||||
560 | AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) | ||||
561 | : BaseType(IRP, A) {} | ||||
562 | |||||
563 | /// See AbstractAttribute::updateImpl(...). | ||||
564 | ChangeStatus updateImpl(Attributor &A) override { | ||||
565 | assert(this->getIRPosition().getPositionKind() ==(static_cast <bool> (this->getIRPosition().getPositionKind () == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 568, __extension__ __PRETTY_FUNCTION__)) | ||||
566 | IRPosition::IRP_CALL_SITE_RETURNED &&(static_cast <bool> (this->getIRPosition().getPositionKind () == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 568, __extension__ __PRETTY_FUNCTION__)) | ||||
567 | "Can only wrap function returned positions for call site returned "(static_cast <bool> (this->getIRPosition().getPositionKind () == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 568, __extension__ __PRETTY_FUNCTION__)) | ||||
568 | "positions!")(static_cast <bool> (this->getIRPosition().getPositionKind () == IRPosition::IRP_CALL_SITE_RETURNED && "Can only wrap function returned positions for call site returned " "positions!") ? void (0) : __assert_fail ("this->getIRPosition().getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED && \"Can only wrap function returned positions for call site returned \" \"positions!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 568, __extension__ __PRETTY_FUNCTION__)); | ||||
569 | auto &S = this->getState(); | ||||
570 | |||||
571 | const Function *AssociatedFunction = | ||||
572 | this->getIRPosition().getAssociatedFunction(); | ||||
573 | if (!AssociatedFunction) | ||||
574 | return S.indicatePessimisticFixpoint(); | ||||
575 | |||||
576 | CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); | ||||
577 | if (IntroduceCallBaseContext) | ||||
578 | LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Introducing call base context:" << CBContext << "\n"; } } while (false) | ||||
579 | << CBContext << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[Attributor] Introducing call base context:" << CBContext << "\n"; } } while (false); | ||||
580 | |||||
581 | IRPosition FnPos = IRPosition::returned( | ||||
582 | *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); | ||||
583 | const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); | ||||
584 | return clampStateAndIndicateChange(S, AA.getState()); | ||||
585 | } | ||||
586 | }; | ||||
587 | |||||
588 | /// Helper function to accumulate uses. | ||||
589 | template <class AAType, typename StateType = typename AAType::StateType> | ||||
590 | static void followUsesInContext(AAType &AA, Attributor &A, | ||||
591 | MustBeExecutedContextExplorer &Explorer, | ||||
592 | const Instruction *CtxI, | ||||
593 | SetVector<const Use *> &Uses, | ||||
594 | StateType &State) { | ||||
595 | auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); | ||||
596 | for (unsigned u = 0; u < Uses.size(); ++u) { | ||||
597 | const Use *U = Uses[u]; | ||||
598 | if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { | ||||
599 | bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); | ||||
600 | if (Found && AA.followUseInMBEC(A, U, UserI, State)) | ||||
601 | for (const Use &Us : UserI->uses()) | ||||
602 | Uses.insert(&Us); | ||||
603 | } | ||||
604 | } | ||||
605 | } | ||||
606 | |||||
607 | /// Use the must-be-executed-context around \p I to add information into \p S. | ||||
608 | /// The AAType class is required to have `followUseInMBEC` method with the | ||||
609 | /// following signature and behaviour: | ||||
610 | /// | ||||
611 | /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) | ||||
612 | /// U - Underlying use. | ||||
613 | /// I - The user of the \p U. | ||||
614 | /// Returns true if the value should be tracked transitively. | ||||
615 | /// | ||||
616 | template <class AAType, typename StateType = typename AAType::StateType> | ||||
617 | static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, | ||||
618 | Instruction &CtxI) { | ||||
619 | |||||
620 | // Container for (transitive) uses of the associated value. | ||||
621 | SetVector<const Use *> Uses; | ||||
622 | for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) | ||||
623 | Uses.insert(&U); | ||||
624 | |||||
625 | MustBeExecutedContextExplorer &Explorer = | ||||
626 | A.getInfoCache().getMustBeExecutedContextExplorer(); | ||||
627 | |||||
628 | followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); | ||||
629 | |||||
630 | if (S.isAtFixpoint()) | ||||
631 | return; | ||||
632 | |||||
633 | SmallVector<const BranchInst *, 4> BrInsts; | ||||
634 | auto Pred = [&](const Instruction *I) { | ||||
635 | if (const BranchInst *Br = dyn_cast<BranchInst>(I)) | ||||
636 | if (Br->isConditional()) | ||||
637 | BrInsts.push_back(Br); | ||||
638 | return true; | ||||
639 | }; | ||||
640 | |||||
641 | // Here, accumulate conditional branch instructions in the context. We | ||||
642 | // explore the child paths and collect the known states. The disjunction of | ||||
643 | // those states can be merged to its own state. Let ParentState_i be a state | ||||
644 | // to indicate the known information for an i-th branch instruction in the | ||||
645 | // context. ChildStates are created for its successors respectively. | ||||
646 | // | ||||
647 | // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} | ||||
648 | // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} | ||||
649 | // ... | ||||
650 | // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} | ||||
651 | // | ||||
652 | // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m | ||||
653 | // | ||||
654 | // FIXME: Currently, recursive branches are not handled. For example, we | ||||
655 | // can't deduce that ptr must be dereferenced in below function. | ||||
656 | // | ||||
657 | // void f(int a, int c, int *ptr) { | ||||
658 | // if(a) | ||||
659 | // if (b) { | ||||
660 | // *ptr = 0; | ||||
661 | // } else { | ||||
662 | // *ptr = 1; | ||||
663 | // } | ||||
664 | // else { | ||||
665 | // if (b) { | ||||
666 | // *ptr = 0; | ||||
667 | // } else { | ||||
668 | // *ptr = 1; | ||||
669 | // } | ||||
670 | // } | ||||
671 | // } | ||||
672 | |||||
673 | Explorer.checkForAllContext(&CtxI, Pred); | ||||
674 | for (const BranchInst *Br : BrInsts) { | ||||
675 | StateType ParentState; | ||||
676 | |||||
677 | // The known state of the parent state is a conjunction of children's | ||||
678 | // known states so it is initialized with a best state. | ||||
679 | ParentState.indicateOptimisticFixpoint(); | ||||
680 | |||||
681 | for (const BasicBlock *BB : Br->successors()) { | ||||
682 | StateType ChildState; | ||||
683 | |||||
684 | size_t BeforeSize = Uses.size(); | ||||
685 | followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); | ||||
686 | |||||
687 | // Erase uses which only appear in the child. | ||||
688 | for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) | ||||
689 | It = Uses.erase(It); | ||||
690 | |||||
691 | ParentState &= ChildState; | ||||
692 | } | ||||
693 | |||||
694 | // Use only known state. | ||||
695 | S += ParentState; | ||||
696 | } | ||||
697 | } | ||||
698 | } // namespace | ||||
699 | |||||
700 | /// ------------------------ PointerInfo --------------------------------------- | ||||
701 | |||||
702 | namespace llvm { | ||||
703 | namespace AA { | ||||
704 | namespace PointerInfo { | ||||
705 | |||||
706 | struct State; | ||||
707 | |||||
708 | } // namespace PointerInfo | ||||
709 | } // namespace AA | ||||
710 | |||||
711 | /// Helper for AA::PointerInfo::Access DenseMap/Set usage. | ||||
712 | template <> | ||||
713 | struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { | ||||
714 | using Access = AAPointerInfo::Access; | ||||
715 | static inline Access getEmptyKey(); | ||||
716 | static inline Access getTombstoneKey(); | ||||
717 | static unsigned getHashValue(const Access &A); | ||||
718 | static bool isEqual(const Access &LHS, const Access &RHS); | ||||
719 | }; | ||||
720 | |||||
721 | /// Helper that allows OffsetAndSize as a key in a DenseMap. | ||||
722 | template <> | ||||
723 | struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> | ||||
724 | : DenseMapInfo<std::pair<int64_t, int64_t>> {}; | ||||
725 | |||||
726 | /// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign | ||||
727 | /// but the instruction | ||||
728 | struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { | ||||
729 | using Base = DenseMapInfo<Instruction *>; | ||||
730 | using Access = AAPointerInfo::Access; | ||||
731 | static inline Access getEmptyKey(); | ||||
732 | static inline Access getTombstoneKey(); | ||||
733 | static unsigned getHashValue(const Access &A); | ||||
734 | static bool isEqual(const Access &LHS, const Access &RHS); | ||||
735 | }; | ||||
736 | |||||
737 | } // namespace llvm | ||||
738 | |||||
739 | /// A type to track pointer/struct usage and accesses for AAPointerInfo. | ||||
740 | struct AA::PointerInfo::State : public AbstractState { | ||||
741 | |||||
742 | ~State() { | ||||
743 | // We do not delete the Accesses objects but need to destroy them still. | ||||
744 | for (auto &It : AccessBins) | ||||
745 | It.second->~Accesses(); | ||||
746 | } | ||||
747 | |||||
748 | /// Return the best possible representable state. | ||||
749 | static State getBestState(const State &SIS) { return State(); } | ||||
750 | |||||
751 | /// Return the worst possible representable state. | ||||
752 | static State getWorstState(const State &SIS) { | ||||
753 | State R; | ||||
754 | R.indicatePessimisticFixpoint(); | ||||
755 | return R; | ||||
756 | } | ||||
757 | |||||
758 | State() = default; | ||||
759 | State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) { | ||||
760 | SIS.AccessBins.clear(); | ||||
761 | } | ||||
762 | |||||
763 | const State &getAssumed() const { return *this; } | ||||
764 | |||||
765 | /// See AbstractState::isValidState(). | ||||
766 | bool isValidState() const override { return BS.isValidState(); } | ||||
767 | |||||
768 | /// See AbstractState::isAtFixpoint(). | ||||
769 | bool isAtFixpoint() const override { return BS.isAtFixpoint(); } | ||||
770 | |||||
771 | /// See AbstractState::indicateOptimisticFixpoint(). | ||||
772 | ChangeStatus indicateOptimisticFixpoint() override { | ||||
773 | BS.indicateOptimisticFixpoint(); | ||||
774 | return ChangeStatus::UNCHANGED; | ||||
775 | } | ||||
776 | |||||
777 | /// See AbstractState::indicatePessimisticFixpoint(). | ||||
778 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
779 | BS.indicatePessimisticFixpoint(); | ||||
780 | return ChangeStatus::CHANGED; | ||||
781 | } | ||||
782 | |||||
783 | State &operator=(const State &R) { | ||||
784 | if (this == &R) | ||||
785 | return *this; | ||||
786 | BS = R.BS; | ||||
787 | AccessBins = R.AccessBins; | ||||
788 | return *this; | ||||
789 | } | ||||
790 | |||||
791 | State &operator=(State &&R) { | ||||
792 | if (this == &R) | ||||
793 | return *this; | ||||
794 | std::swap(BS, R.BS); | ||||
795 | std::swap(AccessBins, R.AccessBins); | ||||
796 | return *this; | ||||
797 | } | ||||
798 | |||||
799 | bool operator==(const State &R) const { | ||||
800 | if (BS != R.BS) | ||||
801 | return false; | ||||
802 | if (AccessBins.size() != R.AccessBins.size()) | ||||
803 | return false; | ||||
804 | auto It = begin(), RIt = R.begin(), E = end(); | ||||
805 | while (It != E) { | ||||
806 | if (It->getFirst() != RIt->getFirst()) | ||||
807 | return false; | ||||
808 | auto &Accs = It->getSecond(); | ||||
809 | auto &RAccs = RIt->getSecond(); | ||||
810 | if (Accs->size() != RAccs->size()) | ||||
811 | return false; | ||||
812 | for (const auto &ZipIt : llvm::zip(*Accs, *RAccs)) | ||||
813 | if (std::get<0>(ZipIt) != std::get<1>(ZipIt)) | ||||
814 | return false; | ||||
815 | ++It; | ||||
816 | ++RIt; | ||||
817 | } | ||||
818 | return true; | ||||
819 | } | ||||
820 | bool operator!=(const State &R) const { return !(*this == R); } | ||||
821 | |||||
822 | /// We store accesses in a set with the instruction as key. | ||||
823 | struct Accesses { | ||||
824 | SmallVector<AAPointerInfo::Access, 4> Accesses; | ||||
825 | DenseMap<const Instruction *, unsigned> Map; | ||||
826 | |||||
827 | unsigned size() const { return Accesses.size(); } | ||||
828 | |||||
829 | using vec_iterator = decltype(Accesses)::iterator; | ||||
830 | vec_iterator begin() { return Accesses.begin(); } | ||||
831 | vec_iterator end() { return Accesses.end(); } | ||||
832 | |||||
833 | using iterator = decltype(Map)::const_iterator; | ||||
834 | iterator find(AAPointerInfo::Access &Acc) { | ||||
835 | return Map.find(Acc.getRemoteInst()); | ||||
836 | } | ||||
837 | iterator find_end() { return Map.end(); } | ||||
838 | |||||
839 | AAPointerInfo::Access &get(iterator &It) { | ||||
840 | return Accesses[It->getSecond()]; | ||||
841 | } | ||||
842 | |||||
843 | void insert(AAPointerInfo::Access &Acc) { | ||||
844 | Map[Acc.getRemoteInst()] = Accesses.size(); | ||||
845 | Accesses.push_back(Acc); | ||||
846 | } | ||||
847 | }; | ||||
848 | |||||
849 | /// We store all accesses in bins denoted by their offset and size. | ||||
850 | using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>; | ||||
851 | |||||
852 | AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } | ||||
853 | AccessBinsTy::const_iterator end() const { return AccessBins.end(); } | ||||
854 | |||||
855 | protected: | ||||
856 | /// The bins with all the accesses for the associated pointer. | ||||
857 | AccessBinsTy AccessBins; | ||||
858 | |||||
859 | /// Add a new access to the state at offset \p Offset and with size \p Size. | ||||
860 | /// The access is associated with \p I, writes \p Content (if anything), and | ||||
861 | /// is of kind \p Kind. | ||||
862 | /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. | ||||
863 | ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size, | ||||
864 | Instruction &I, Optional<Value *> Content, | ||||
865 | AAPointerInfo::AccessKind Kind, Type *Ty, | ||||
866 | Instruction *RemoteI = nullptr, | ||||
867 | Accesses *BinPtr = nullptr) { | ||||
868 | AAPointerInfo::OffsetAndSize Key{Offset, Size}; | ||||
869 | Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key]; | ||||
870 | if (!Bin) | ||||
871 | Bin = new (A.Allocator) Accesses; | ||||
872 | AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); | ||||
873 | // Check if we have an access for this instruction in this bin, if not, | ||||
874 | // simply add it. | ||||
875 | auto It = Bin->find(Acc); | ||||
876 | if (It == Bin->find_end()) { | ||||
877 | Bin->insert(Acc); | ||||
878 | return ChangeStatus::CHANGED; | ||||
879 | } | ||||
880 | // If the existing access is the same as then new one, nothing changed. | ||||
881 | AAPointerInfo::Access &Current = Bin->get(It); | ||||
882 | AAPointerInfo::Access Before = Current; | ||||
883 | // The new one will be combined with the existing one. | ||||
884 | Current &= Acc; | ||||
885 | return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; | ||||
886 | } | ||||
887 | |||||
888 | /// See AAPointerInfo::forallInterferingAccesses. | ||||
889 | bool forallInterferingAccesses( | ||||
890 | AAPointerInfo::OffsetAndSize OAS, | ||||
891 | function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { | ||||
892 | if (!isValidState()) | ||||
893 | return false; | ||||
894 | |||||
895 | for (const auto &It : AccessBins) { | ||||
896 | AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); | ||||
897 | if (!OAS.mayOverlap(ItOAS)) | ||||
898 | continue; | ||||
899 | bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); | ||||
900 | for (auto &Access : *It.getSecond()) | ||||
901 | if (!CB(Access, IsExact)) | ||||
902 | return false; | ||||
903 | } | ||||
904 | return true; | ||||
905 | } | ||||
906 | |||||
907 | /// See AAPointerInfo::forallInterferingAccesses. | ||||
908 | bool forallInterferingAccesses( | ||||
909 | Instruction &I, | ||||
910 | function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { | ||||
911 | if (!isValidState()) | ||||
912 | return false; | ||||
913 | |||||
914 | // First find the offset and size of I. | ||||
915 | AAPointerInfo::OffsetAndSize OAS(-1, -1); | ||||
916 | for (const auto &It : AccessBins) { | ||||
917 | for (auto &Access : *It.getSecond()) { | ||||
918 | if (Access.getRemoteInst() == &I) { | ||||
919 | OAS = It.getFirst(); | ||||
920 | break; | ||||
921 | } | ||||
922 | } | ||||
923 | if (OAS.getSize() != -1) | ||||
924 | break; | ||||
925 | } | ||||
926 | // No access for I was found, we are done. | ||||
927 | if (OAS.getSize() == -1) | ||||
928 | return true; | ||||
929 | |||||
930 | // Now that we have an offset and size, find all overlapping ones and use | ||||
931 | // the callback on the accesses. | ||||
932 | return forallInterferingAccesses(OAS, CB); | ||||
933 | } | ||||
934 | |||||
935 | private: | ||||
936 | /// State to track fixpoint and validity. | ||||
937 | BooleanState BS; | ||||
938 | }; | ||||
939 | |||||
940 | namespace { | ||||
941 | struct AAPointerInfoImpl | ||||
942 | : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { | ||||
943 | using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; | ||||
944 | AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} | ||||
945 | |||||
946 | /// See AbstractAttribute::getAsStr(). | ||||
947 | const std::string getAsStr() const override { | ||||
948 | return std::string("PointerInfo ") + | ||||
949 | (isValidState() ? (std::string("#") + | ||||
950 | std::to_string(AccessBins.size()) + " bins") | ||||
951 | : "<invalid>"); | ||||
952 | } | ||||
953 | |||||
954 | /// See AbstractAttribute::manifest(...). | ||||
955 | ChangeStatus manifest(Attributor &A) override { | ||||
956 | return AAPointerInfo::manifest(A); | ||||
957 | } | ||||
958 | |||||
959 | bool forallInterferingAccesses( | ||||
960 | OffsetAndSize OAS, | ||||
961 | function_ref<bool(const AAPointerInfo::Access &, bool)> CB) | ||||
962 | const override { | ||||
963 | return State::forallInterferingAccesses(OAS, CB); | ||||
964 | } | ||||
965 | |||||
966 | bool | ||||
967 | forallInterferingAccesses(Attributor &A, const AbstractAttribute &QueryingAA, | ||||
968 | Instruction &I, | ||||
969 | function_ref<bool(const Access &, bool)> UserCB, | ||||
970 | bool &HasBeenWrittenTo) const override { | ||||
971 | HasBeenWrittenTo = false; | ||||
972 | |||||
973 | SmallPtrSet<const Access *, 8> DominatingWrites; | ||||
974 | SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; | ||||
975 | |||||
976 | Function &Scope = *I.getFunction(); | ||||
977 | const auto &NoSyncAA = A.getAAFor<AANoSync>( | ||||
978 | QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); | ||||
979 | const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( | ||||
980 | IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); | ||||
981 | const bool NoSync = NoSyncAA.isAssumedNoSync(); | ||||
982 | |||||
983 | // Helper to determine if we need to consider threading, which we cannot | ||||
984 | // right now. However, if the function is (assumed) nosync or the thread | ||||
985 | // executing all instructions is the main thread only we can ignore | ||||
986 | // threading. | ||||
987 | auto CanIgnoreThreading = [&](const Instruction &I) -> bool { | ||||
988 | if (NoSync) | ||||
989 | return true; | ||||
990 | if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) | ||||
991 | return true; | ||||
992 | return false; | ||||
993 | }; | ||||
994 | |||||
995 | // Helper to determine if the access is executed by the same thread as the | ||||
996 | // load, for now it is sufficient to avoid any potential threading effects | ||||
997 | // as we cannot deal with them anyway. | ||||
998 | auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { | ||||
999 | return CanIgnoreThreading(*Acc.getLocalInst()); | ||||
1000 | }; | ||||
1001 | |||||
1002 | // TODO: Use inter-procedural reachability and dominance. | ||||
1003 | const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( | ||||
1004 | QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); | ||||
1005 | |||||
1006 | const bool FindInterferingWrites = I.mayReadFromMemory(); | ||||
1007 | const bool FindInterferingReads = I.mayWriteToMemory(); | ||||
1008 | const bool UseDominanceReasoning = | ||||
1009 | FindInterferingWrites && NoRecurseAA.isKnownNoRecurse(); | ||||
1010 | const bool CanUseCFGResoning = CanIgnoreThreading(I); | ||||
1011 | InformationCache &InfoCache = A.getInfoCache(); | ||||
1012 | const DominatorTree *DT = | ||||
1013 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(Scope); | ||||
1014 | |||||
1015 | enum GPUAddressSpace : unsigned { | ||||
1016 | Generic = 0, | ||||
1017 | Global = 1, | ||||
1018 | Shared = 3, | ||||
1019 | Constant = 4, | ||||
1020 | Local = 5, | ||||
1021 | }; | ||||
1022 | |||||
1023 | // Helper to check if a value has "kernel lifetime", that is it will not | ||||
1024 | // outlive a GPU kernel. This is true for shared, constant, and local | ||||
1025 | // globals on AMD and NVIDIA GPUs. | ||||
1026 | auto HasKernelLifetime = [&](Value *V, Module &M) { | ||||
1027 | Triple T(M.getTargetTriple()); | ||||
1028 | if (!(T.isAMDGPU() || T.isNVPTX())) | ||||
1029 | return false; | ||||
1030 | switch (V->getType()->getPointerAddressSpace()) { | ||||
1031 | case GPUAddressSpace::Shared: | ||||
1032 | case GPUAddressSpace::Constant: | ||||
1033 | case GPUAddressSpace::Local: | ||||
1034 | return true; | ||||
1035 | default: | ||||
1036 | return false; | ||||
1037 | }; | ||||
1038 | }; | ||||
1039 | |||||
1040 | // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query | ||||
1041 | // to determine if we should look at reachability from the callee. For | ||||
1042 | // certain pointers we know the lifetime and we do not have to step into the | ||||
1043 | // callee to determine reachability as the pointer would be dead in the | ||||
1044 | // callee. See the conditional initialization below. | ||||
1045 | std::function<bool(const Function &)> IsLiveInCalleeCB; | ||||
1046 | |||||
1047 | if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { | ||||
1048 | // If the alloca containing function is not recursive the alloca | ||||
1049 | // must be dead in the callee. | ||||
1050 | const Function *AIFn = AI->getFunction(); | ||||
1051 | const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( | ||||
1052 | *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); | ||||
1053 | if (NoRecurseAA.isAssumedNoRecurse()) { | ||||
1054 | IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; | ||||
1055 | } | ||||
1056 | } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { | ||||
1057 | // If the global has kernel lifetime we can stop if we reach a kernel | ||||
1058 | // as it is "dead" in the (unknown) callees. | ||||
1059 | if (HasKernelLifetime(GV, *GV->getParent())) | ||||
1060 | IsLiveInCalleeCB = [](const Function &Fn) { | ||||
1061 | return !Fn.hasFnAttribute("kernel"); | ||||
1062 | }; | ||||
1063 | } | ||||
1064 | |||||
1065 | auto AccessCB = [&](const Access &Acc, bool Exact) { | ||||
1066 | if ((!FindInterferingWrites || !Acc.isWrite()) && | ||||
1067 | (!FindInterferingReads || !Acc.isRead())) | ||||
1068 | return true; | ||||
1069 | |||||
1070 | bool Dominates = DT && Exact && Acc.isMustAccess() && | ||||
1071 | (Acc.getLocalInst()->getFunction() == &Scope) && | ||||
1072 | DT->dominates(Acc.getRemoteInst(), &I); | ||||
1073 | if (FindInterferingWrites && Dominates) | ||||
1074 | HasBeenWrittenTo = true; | ||||
1075 | |||||
1076 | // For now we only filter accesses based on CFG reasoning which does not | ||||
1077 | // work yet if we have threading effects, or the access is complicated. | ||||
1078 | if (CanUseCFGResoning && Dominates && UseDominanceReasoning && | ||||
1079 | IsSameThreadAsLoad(Acc)) | ||||
1080 | DominatingWrites.insert(&Acc); | ||||
1081 | |||||
1082 | InterferingAccesses.push_back({&Acc, Exact}); | ||||
1083 | return true; | ||||
1084 | }; | ||||
1085 | if (!State::forallInterferingAccesses(I, AccessCB)) | ||||
1086 | return false; | ||||
1087 | |||||
1088 | if (HasBeenWrittenTo) { | ||||
1089 | const Function *ScopePtr = &Scope; | ||||
1090 | IsLiveInCalleeCB = [ScopePtr](const Function &Fn) { | ||||
1091 | return ScopePtr != &Fn; | ||||
1092 | }; | ||||
1093 | } | ||||
1094 | |||||
1095 | // Helper to determine if we can skip a specific write access. This is in | ||||
1096 | // the worst case quadratic as we are looking for another write that will | ||||
1097 | // hide the effect of this one. | ||||
1098 | auto CanSkipAccess = [&](const Access &Acc, bool Exact) { | ||||
1099 | if ((!Acc.isWrite() || | ||||
1100 | !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, | ||||
1101 | IsLiveInCalleeCB)) && | ||||
1102 | (!Acc.isRead() || | ||||
1103 | !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, | ||||
1104 | IsLiveInCalleeCB))) | ||||
1105 | return true; | ||||
1106 | |||||
1107 | if (!DT || !UseDominanceReasoning) | ||||
1108 | return false; | ||||
1109 | if (!IsSameThreadAsLoad(Acc)) | ||||
1110 | return false; | ||||
1111 | if (!DominatingWrites.count(&Acc)) | ||||
1112 | return false; | ||||
1113 | for (const Access *DomAcc : DominatingWrites) { | ||||
1114 | assert(Acc.getLocalInst()->getFunction() ==(static_cast <bool> (Acc.getLocalInst()->getFunction () == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!" ) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1116, __extension__ __PRETTY_FUNCTION__)) | ||||
1115 | DomAcc->getLocalInst()->getFunction() &&(static_cast <bool> (Acc.getLocalInst()->getFunction () == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!" ) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1116, __extension__ __PRETTY_FUNCTION__)) | ||||
1116 | "Expected dominating writes to be in the same function!")(static_cast <bool> (Acc.getLocalInst()->getFunction () == DomAcc->getLocalInst()->getFunction() && "Expected dominating writes to be in the same function!" ) ? void (0) : __assert_fail ("Acc.getLocalInst()->getFunction() == DomAcc->getLocalInst()->getFunction() && \"Expected dominating writes to be in the same function!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1116, __extension__ __PRETTY_FUNCTION__)); | ||||
1117 | |||||
1118 | if (DomAcc != &Acc && | ||||
1119 | DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { | ||||
1120 | return true; | ||||
1121 | } | ||||
1122 | } | ||||
1123 | return false; | ||||
1124 | }; | ||||
1125 | |||||
1126 | // Run the user callback on all accesses we cannot skip and return if that | ||||
1127 | // succeeded for all or not. | ||||
1128 | unsigned NumInterferingAccesses = InterferingAccesses.size(); | ||||
1129 | for (auto &It : InterferingAccesses) { | ||||
1130 | if (NumInterferingAccesses > MaxInterferingAccesses || | ||||
1131 | !CanSkipAccess(*It.first, It.second)) { | ||||
1132 | if (!UserCB(*It.first, It.second)) | ||||
1133 | return false; | ||||
1134 | } | ||||
1135 | } | ||||
1136 | return true; | ||||
1137 | } | ||||
1138 | |||||
1139 | ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA, | ||||
1140 | int64_t Offset, CallBase &CB, | ||||
1141 | bool FromCallee = false) { | ||||
1142 | using namespace AA::PointerInfo; | ||||
1143 | if (!OtherAA.getState().isValidState() || !isValidState()) | ||||
1144 | return indicatePessimisticFixpoint(); | ||||
1145 | |||||
1146 | const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA); | ||||
1147 | bool IsByval = | ||||
1148 | FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr(); | ||||
1149 | |||||
1150 | // Combine the accesses bin by bin. | ||||
1151 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
1152 | for (const auto &It : OtherAAImpl.getState()) { | ||||
1153 | OffsetAndSize OAS = OffsetAndSize::getUnknown(); | ||||
1154 | if (Offset != OffsetAndSize::Unknown) | ||||
1155 | OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize()); | ||||
1156 | Accesses *Bin = AccessBins.lookup(OAS); | ||||
1157 | for (const AAPointerInfo::Access &RAcc : *It.second) { | ||||
1158 | if (IsByval && !RAcc.isRead()) | ||||
1159 | continue; | ||||
1160 | bool UsedAssumedInformation = false; | ||||
1161 | AccessKind AK = RAcc.getKind(); | ||||
1162 | Optional<Value *> Content = RAcc.getContent(); | ||||
1163 | if (FromCallee) { | ||||
1164 | Content = A.translateArgumentToCallSiteContent( | ||||
1165 | RAcc.getContent(), CB, *this, UsedAssumedInformation); | ||||
1166 | AK = | ||||
1167 | AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW)); | ||||
1168 | AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST)); | ||||
1169 | } | ||||
1170 | Changed = | ||||
1171 | Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content, | ||||
1172 | AK, RAcc.getType(), RAcc.getRemoteInst(), Bin); | ||||
1173 | } | ||||
1174 | } | ||||
1175 | return Changed; | ||||
1176 | } | ||||
1177 | |||||
1178 | /// Statistic tracking for all AAPointerInfo implementations. | ||||
1179 | /// See AbstractAttribute::trackStatistics(). | ||||
1180 | void trackPointerInfoStatistics(const IRPosition &IRP) const {} | ||||
1181 | |||||
1182 | /// Dump the state into \p O. | ||||
1183 | void dumpState(raw_ostream &O) { | ||||
1184 | for (auto &It : AccessBins) { | ||||
1185 | O << "[" << It.first.getOffset() << "-" | ||||
1186 | << It.first.getOffset() + It.first.getSize() | ||||
1187 | << "] : " << It.getSecond()->size() << "\n"; | ||||
1188 | for (auto &Acc : *It.getSecond()) { | ||||
1189 | O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n"; | ||||
1190 | if (Acc.getLocalInst() != Acc.getRemoteInst()) | ||||
1191 | O << " --> " << *Acc.getRemoteInst() | ||||
1192 | << "\n"; | ||||
1193 | if (!Acc.isWrittenValueYetUndetermined()) { | ||||
1194 | if (Acc.getWrittenValue()) | ||||
1195 | O << " - c: " << *Acc.getWrittenValue() << "\n"; | ||||
1196 | else | ||||
1197 | O << " - c: <unknown>\n"; | ||||
1198 | } | ||||
1199 | } | ||||
1200 | } | ||||
1201 | } | ||||
1202 | }; | ||||
1203 | |||||
1204 | struct AAPointerInfoFloating : public AAPointerInfoImpl { | ||||
1205 | using AccessKind = AAPointerInfo::AccessKind; | ||||
1206 | AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) | ||||
1207 | : AAPointerInfoImpl(IRP, A) {} | ||||
1208 | |||||
1209 | /// Deal with an access and signal if it was handled successfully. | ||||
1210 | bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, | ||||
1211 | Optional<Value *> Content, AccessKind Kind, int64_t Offset, | ||||
1212 | ChangeStatus &Changed, Type *Ty, | ||||
1213 | int64_t Size = OffsetAndSize::Unknown) { | ||||
1214 | using namespace AA::PointerInfo; | ||||
1215 | // No need to find a size if one is given or the offset is unknown. | ||||
1216 | if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && | ||||
1217 | Ty) { | ||||
1218 | const DataLayout &DL = A.getDataLayout(); | ||||
1219 | TypeSize AccessSize = DL.getTypeStoreSize(Ty); | ||||
1220 | if (!AccessSize.isScalable()) | ||||
1221 | Size = AccessSize.getFixedSize(); | ||||
1222 | } | ||||
1223 | Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty); | ||||
1224 | return true; | ||||
1225 | }; | ||||
1226 | |||||
1227 | /// Helper struct, will support ranges eventually. | ||||
1228 | struct OffsetInfo { | ||||
1229 | int64_t Offset = OffsetAndSize::Unknown; | ||||
1230 | |||||
1231 | bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } | ||||
1232 | }; | ||||
1233 | |||||
1234 | /// See AbstractAttribute::updateImpl(...). | ||||
1235 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1236 | using namespace AA::PointerInfo; | ||||
1237 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
1238 | Value &AssociatedValue = getAssociatedValue(); | ||||
1239 | |||||
1240 | const DataLayout &DL = A.getDataLayout(); | ||||
1241 | DenseMap<Value *, OffsetInfo> OffsetInfoMap; | ||||
1242 | OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; | ||||
1243 | |||||
1244 | auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI, | ||||
1245 | bool &Follow) { | ||||
1246 | OffsetInfo &UsrOI = OffsetInfoMap[Usr]; | ||||
1247 | UsrOI = PtrOI; | ||||
1248 | Follow = true; | ||||
1249 | return true; | ||||
1250 | }; | ||||
1251 | |||||
1252 | const auto *TLI = getAnchorScope() | ||||
1253 | ? A.getInfoCache().getTargetLibraryInfoForFunction( | ||||
1254 | *getAnchorScope()) | ||||
1255 | : nullptr; | ||||
1256 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { | ||||
1257 | Value *CurPtr = U.get(); | ||||
1258 | User *Usr = U.getUser(); | ||||
1259 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr << "\n"; } } while (false) | ||||
1260 | << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr << "\n"; } } while (false); | ||||
1261 | assert(OffsetInfoMap.count(CurPtr) &&(static_cast <bool> (OffsetInfoMap.count(CurPtr) && "The current pointer offset should have been seeded!") ? void (0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1262, __extension__ __PRETTY_FUNCTION__)) | ||||
1262 | "The current pointer offset should have been seeded!")(static_cast <bool> (OffsetInfoMap.count(CurPtr) && "The current pointer offset should have been seeded!") ? void (0) : __assert_fail ("OffsetInfoMap.count(CurPtr) && \"The current pointer offset should have been seeded!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1262, __extension__ __PRETTY_FUNCTION__)); | ||||
1263 | |||||
1264 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { | ||||
1265 | if (CE->isCast()) | ||||
1266 | return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); | ||||
1267 | if (CE->isCompare()) | ||||
1268 | return true; | ||||
1269 | if (!isa<GEPOperator>(CE)) { | ||||
1270 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CEdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE << "\n"; } } while (false) | ||||
1271 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE << "\n"; } } while (false); | ||||
1272 | return false; | ||||
1273 | } | ||||
1274 | } | ||||
1275 | if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { | ||||
1276 | // Note the order here, the Usr access might change the map, CurPtr is | ||||
1277 | // already in it though. | ||||
1278 | OffsetInfo &UsrOI = OffsetInfoMap[Usr]; | ||||
1279 | OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; | ||||
1280 | UsrOI = PtrOI; | ||||
1281 | |||||
1282 | // TODO: Use range information. | ||||
1283 | APInt GEPOffset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); | ||||
1284 | if (PtrOI.Offset == OffsetAndSize::Unknown || | ||||
1285 | !GEP->accumulateConstantOffset(DL, GEPOffset)) { | ||||
1286 | UsrOI.Offset = OffsetAndSize::Unknown; | ||||
1287 | Follow = true; | ||||
1288 | return true; | ||||
1289 | } | ||||
1290 | |||||
1291 | UsrOI.Offset = PtrOI.Offset + GEPOffset.getZExtValue(); | ||||
1292 | Follow = true; | ||||
1293 | return true; | ||||
1294 | } | ||||
1295 | if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr)) | ||||
1296 | return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); | ||||
1297 | |||||
1298 | // For PHIs we need to take care of the recurrence explicitly as the value | ||||
1299 | // might change while we iterate through a loop. For now, we give up if | ||||
1300 | // the PHI is not invariant. | ||||
1301 | if (isa<PHINode>(Usr)) { | ||||
1302 | // Note the order here, the Usr access might change the map, CurPtr is | ||||
1303 | // already in it though. | ||||
1304 | bool IsFirstPHIUser = !OffsetInfoMap.count(Usr); | ||||
1305 | OffsetInfo &UsrOI = OffsetInfoMap[Usr]; | ||||
1306 | OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; | ||||
1307 | // Check if the PHI is invariant (so far). | ||||
1308 | if (UsrOI == PtrOI) | ||||
1309 | return true; | ||||
1310 | |||||
1311 | // Check if the PHI operand has already an unknown offset as we can't | ||||
1312 | // improve on that anymore. | ||||
1313 | if (PtrOI.Offset == OffsetAndSize::Unknown) { | ||||
1314 | UsrOI = PtrOI; | ||||
1315 | Follow = true; | ||||
1316 | return true; | ||||
1317 | } | ||||
1318 | |||||
1319 | // Check if the PHI operand is not dependent on the PHI itself. | ||||
1320 | APInt Offset( | ||||
1321 | DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), | ||||
1322 | 0); | ||||
1323 | Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets( | ||||
1324 | DL, Offset, /* AllowNonInbounds */ true); | ||||
1325 | auto It = OffsetInfoMap.find(CurPtrBase); | ||||
1326 | if (It != OffsetInfoMap.end()) { | ||||
1327 | Offset += It->getSecond().Offset; | ||||
1328 | if (IsFirstPHIUser || Offset == UsrOI.Offset) | ||||
1329 | return HandlePassthroughUser(Usr, PtrOI, Follow); | ||||
1330 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch " << *CurPtr << " in " << *Usr << "\n" ; } } while (false) | ||||
1331 | << "[AAPointerInfo] PHI operand pointer offset mismatch "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch " << *CurPtr << " in " << *Usr << "\n" ; } } while (false) | ||||
1332 | << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch " << *CurPtr << " in " << *Usr << "\n" ; } } while (false); | ||||
1333 | } else { | ||||
1334 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex " << *CurPtr << " in " << *Usr << "\n" ; } } while (false) | ||||
1335 | << *CurPtr << " in " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] PHI operand is too complex " << *CurPtr << " in " << *Usr << "\n" ; } } while (false); | ||||
1336 | } | ||||
1337 | |||||
1338 | // TODO: Approximate in case we know the direction of the recurrence. | ||||
1339 | UsrOI = PtrOI; | ||||
1340 | UsrOI.Offset = OffsetAndSize::Unknown; | ||||
1341 | Follow = true; | ||||
1342 | return true; | ||||
1343 | } | ||||
1344 | |||||
1345 | if (auto *LoadI = dyn_cast<LoadInst>(Usr)) { | ||||
1346 | // If the access is to a pointer that may or may not be the associated | ||||
1347 | // value, e.g. due to a PHI, we cannot assume it will be read. | ||||
1348 | AccessKind AK = AccessKind::AK_R; | ||||
1349 | if (getUnderlyingObject(CurPtr) == &AssociatedValue) | ||||
1350 | AK = AccessKind(AK | AccessKind::AK_MUST); | ||||
1351 | else | ||||
1352 | AK = AccessKind(AK | AccessKind::AK_MAY); | ||||
1353 | return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, AK, | ||||
1354 | OffsetInfoMap[CurPtr].Offset, Changed, | ||||
1355 | LoadI->getType()); | ||||
1356 | } | ||||
1357 | |||||
1358 | if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { | ||||
1359 | if (StoreI->getValueOperand() == CurPtr) { | ||||
1360 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store " << *StoreI << "\n"; } } while (false) | ||||
1361 | << *StoreI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Escaping use in store " << *StoreI << "\n"; } } while (false); | ||||
1362 | return false; | ||||
1363 | } | ||||
1364 | // If the access is to a pointer that may or may not be the associated | ||||
1365 | // value, e.g. due to a PHI, we cannot assume it will be written. | ||||
1366 | AccessKind AK = AccessKind::AK_W; | ||||
1367 | if (getUnderlyingObject(CurPtr) == &AssociatedValue) | ||||
1368 | AK = AccessKind(AK | AccessKind::AK_MUST); | ||||
1369 | else | ||||
1370 | AK = AccessKind(AK | AccessKind::AK_MAY); | ||||
1371 | bool UsedAssumedInformation = false; | ||||
1372 | Optional<Value *> Content = | ||||
1373 | A.getAssumedSimplified(*StoreI->getValueOperand(), *this, | ||||
1374 | UsedAssumedInformation, AA::Interprocedural); | ||||
1375 | return handleAccess(A, *StoreI, *CurPtr, Content, AK, | ||||
1376 | OffsetInfoMap[CurPtr].Offset, Changed, | ||||
1377 | StoreI->getValueOperand()->getType()); | ||||
1378 | } | ||||
1379 | if (auto *CB = dyn_cast<CallBase>(Usr)) { | ||||
1380 | if (CB->isLifetimeStartOrEnd()) | ||||
1381 | return true; | ||||
1382 | if (getFreedOperand(CB, TLI) == U) | ||||
1383 | return true; | ||||
1384 | if (CB->isArgOperand(&U)) { | ||||
1385 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
1386 | const auto &CSArgPI = A.getAAFor<AAPointerInfo>( | ||||
1387 | *this, IRPosition::callsite_argument(*CB, ArgNo), | ||||
1388 | DepClassTy::REQUIRED); | ||||
1389 | Changed = translateAndAddState(A, CSArgPI, | ||||
1390 | OffsetInfoMap[CurPtr].Offset, *CB) | | ||||
1391 | Changed; | ||||
1392 | return isValidState(); | ||||
1393 | } | ||||
1394 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled " << *CB << "\n"; } } while (false) | ||||
1395 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Call user not handled " << *CB << "\n"; } } while (false); | ||||
1396 | // TODO: Allow some call uses | ||||
1397 | return false; | ||||
1398 | } | ||||
1399 | |||||
1400 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"; } } while (false); | ||||
1401 | return false; | ||||
1402 | }; | ||||
1403 | auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { | ||||
1404 | if (OffsetInfoMap.count(NewU)) { | ||||
1405 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1406 | if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1407 | dbgs() << "[AAPointerInfo] Equivalent use callback failed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1408 | << OffsetInfoMap[NewU].Offset << " vs "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1409 | << OffsetInfoMap[OldU].Offset << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1410 | }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ) | ||||
1411 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!(OffsetInfoMap[NewU] == OffsetInfoMap [OldU])) { dbgs() << "[AAPointerInfo] Equivalent use callback failed: " << OffsetInfoMap[NewU].Offset << " vs " << OffsetInfoMap[OldU].Offset << "\n"; } }; } } while (false ); | ||||
1412 | return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; | ||||
1413 | } | ||||
1414 | OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; | ||||
1415 | return true; | ||||
1416 | }; | ||||
1417 | if (!A.checkForAllUses(UsePred, *this, AssociatedValue, | ||||
1418 | /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, | ||||
1419 | /* IgnoreDroppableUses */ true, EquivalentUseCB)) { | ||||
1420 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n" ; } } while (false) | ||||
1421 | dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n" ; } } while (false); | ||||
1422 | return indicatePessimisticFixpoint(); | ||||
1423 | } | ||||
1424 | |||||
1425 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1426 | dbgs() << "Accesses by bin after update:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1427 | dumpState(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1428 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false); | ||||
1429 | |||||
1430 | return Changed; | ||||
1431 | } | ||||
1432 | |||||
1433 | /// See AbstractAttribute::trackStatistics() | ||||
1434 | void trackStatistics() const override { | ||||
1435 | AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); | ||||
1436 | } | ||||
1437 | }; | ||||
1438 | |||||
1439 | struct AAPointerInfoReturned final : AAPointerInfoImpl { | ||||
1440 | AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) | ||||
1441 | : AAPointerInfoImpl(IRP, A) {} | ||||
1442 | |||||
1443 | /// See AbstractAttribute::updateImpl(...). | ||||
1444 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1445 | return indicatePessimisticFixpoint(); | ||||
1446 | } | ||||
1447 | |||||
1448 | /// See AbstractAttribute::trackStatistics() | ||||
1449 | void trackStatistics() const override { | ||||
1450 | AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); | ||||
1451 | } | ||||
1452 | }; | ||||
1453 | |||||
1454 | struct AAPointerInfoArgument final : AAPointerInfoFloating { | ||||
1455 | AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) | ||||
1456 | : AAPointerInfoFloating(IRP, A) {} | ||||
1457 | |||||
1458 | /// See AbstractAttribute::initialize(...). | ||||
1459 | void initialize(Attributor &A) override { | ||||
1460 | AAPointerInfoFloating::initialize(A); | ||||
1461 | if (getAnchorScope()->isDeclaration()) | ||||
1462 | indicatePessimisticFixpoint(); | ||||
1463 | } | ||||
1464 | |||||
1465 | /// See AbstractAttribute::trackStatistics() | ||||
1466 | void trackStatistics() const override { | ||||
1467 | AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); | ||||
1468 | } | ||||
1469 | }; | ||||
1470 | |||||
1471 | struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { | ||||
1472 | AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
1473 | : AAPointerInfoFloating(IRP, A) {} | ||||
1474 | |||||
1475 | /// See AbstractAttribute::updateImpl(...). | ||||
1476 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1477 | using namespace AA::PointerInfo; | ||||
1478 | // We handle memory intrinsics explicitly, at least the first (= | ||||
1479 | // destination) and second (=source) arguments as we know how they are | ||||
1480 | // accessed. | ||||
1481 | if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { | ||||
1482 | ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); | ||||
1483 | int64_t LengthVal = OffsetAndSize::Unknown; | ||||
1484 | if (Length) | ||||
1485 | LengthVal = Length->getSExtValue(); | ||||
1486 | Value &Ptr = getAssociatedValue(); | ||||
1487 | unsigned ArgNo = getIRPosition().getCallSiteArgNo(); | ||||
1488 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
1489 | if (ArgNo == 0) { | ||||
1490 | handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_WRITE, 0, | ||||
1491 | Changed, nullptr, LengthVal); | ||||
1492 | } else if (ArgNo == 1) { | ||||
1493 | handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_READ, 0, Changed, | ||||
1494 | nullptr, LengthVal); | ||||
1495 | } else { | ||||
1496 | LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " << *MI << "\n"; } } while (false) | ||||
1497 | << *MI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " << *MI << "\n"; } } while (false); | ||||
1498 | return indicatePessimisticFixpoint(); | ||||
1499 | } | ||||
1500 | |||||
1501 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1502 | dbgs() << "Accesses by bin after update:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1503 | dumpState(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false) | ||||
1504 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "Accesses by bin after update:\n" ; dumpState(dbgs()); }; } } while (false); | ||||
1505 | |||||
1506 | return Changed; | ||||
1507 | } | ||||
1508 | |||||
1509 | // TODO: Once we have call site specific value information we can provide | ||||
1510 | // call site specific liveness information and then it makes | ||||
1511 | // sense to specialize attributes for call sites arguments instead of | ||||
1512 | // redirecting requests to the callee argument. | ||||
1513 | Argument *Arg = getAssociatedArgument(); | ||||
1514 | if (!Arg) | ||||
1515 | return indicatePessimisticFixpoint(); | ||||
1516 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
1517 | auto &ArgAA = | ||||
1518 | A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
1519 | return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()), | ||||
1520 | /* FromCallee */ true); | ||||
1521 | } | ||||
1522 | |||||
1523 | /// See AbstractAttribute::trackStatistics() | ||||
1524 | void trackStatistics() const override { | ||||
1525 | AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); | ||||
1526 | } | ||||
1527 | }; | ||||
1528 | |||||
1529 | struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { | ||||
1530 | AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
1531 | : AAPointerInfoFloating(IRP, A) {} | ||||
1532 | |||||
1533 | /// See AbstractAttribute::trackStatistics() | ||||
1534 | void trackStatistics() const override { | ||||
1535 | AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); | ||||
1536 | } | ||||
1537 | }; | ||||
1538 | } // namespace | ||||
1539 | |||||
1540 | /// -----------------------NoUnwind Function Attribute-------------------------- | ||||
1541 | |||||
1542 | namespace { | ||||
1543 | struct AANoUnwindImpl : AANoUnwind { | ||||
1544 | AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} | ||||
1545 | |||||
1546 | const std::string getAsStr() const override { | ||||
1547 | return getAssumed() ? "nounwind" : "may-unwind"; | ||||
1548 | } | ||||
1549 | |||||
1550 | /// See AbstractAttribute::updateImpl(...). | ||||
1551 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1552 | auto Opcodes = { | ||||
1553 | (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, | ||||
1554 | (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, | ||||
1555 | (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; | ||||
1556 | |||||
1557 | auto CheckForNoUnwind = [&](Instruction &I) { | ||||
1558 | if (!I.mayThrow()) | ||||
1559 | return true; | ||||
1560 | |||||
1561 | if (const auto *CB = dyn_cast<CallBase>(&I)) { | ||||
1562 | const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( | ||||
1563 | *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); | ||||
1564 | return NoUnwindAA.isAssumedNoUnwind(); | ||||
1565 | } | ||||
1566 | return false; | ||||
1567 | }; | ||||
1568 | |||||
1569 | bool UsedAssumedInformation = false; | ||||
1570 | if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, | ||||
1571 | UsedAssumedInformation)) | ||||
1572 | return indicatePessimisticFixpoint(); | ||||
1573 | |||||
1574 | return ChangeStatus::UNCHANGED; | ||||
1575 | } | ||||
1576 | }; | ||||
1577 | |||||
1578 | struct AANoUnwindFunction final : public AANoUnwindImpl { | ||||
1579 | AANoUnwindFunction(const IRPosition &IRP, Attributor &A) | ||||
1580 | : AANoUnwindImpl(IRP, A) {} | ||||
1581 | |||||
1582 | /// See AbstractAttribute::trackStatistics() | ||||
1583 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind){ static llvm::Statistic NumIRFunction_nounwind = {"attributor" , "NumIRFunction_nounwind", ("Number of " "functions" " marked '" "nounwind" "'")};; ++(NumIRFunction_nounwind); } } | ||||
1584 | }; | ||||
1585 | |||||
1586 | /// NoUnwind attribute deduction for a call sites. | ||||
1587 | struct AANoUnwindCallSite final : AANoUnwindImpl { | ||||
1588 | AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) | ||||
1589 | : AANoUnwindImpl(IRP, A) {} | ||||
1590 | |||||
1591 | /// See AbstractAttribute::initialize(...). | ||||
1592 | void initialize(Attributor &A) override { | ||||
1593 | AANoUnwindImpl::initialize(A); | ||||
1594 | Function *F = getAssociatedFunction(); | ||||
1595 | if (!F || F->isDeclaration()) | ||||
1596 | indicatePessimisticFixpoint(); | ||||
1597 | } | ||||
1598 | |||||
1599 | /// See AbstractAttribute::updateImpl(...). | ||||
1600 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1601 | // TODO: Once we have call site specific value information we can provide | ||||
1602 | // call site specific liveness information and then it makes | ||||
1603 | // sense to specialize attributes for call sites arguments instead of | ||||
1604 | // redirecting requests to the callee argument. | ||||
1605 | Function *F = getAssociatedFunction(); | ||||
1606 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
1607 | auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); | ||||
1608 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
1609 | } | ||||
1610 | |||||
1611 | /// See AbstractAttribute::trackStatistics() | ||||
1612 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind){ static llvm::Statistic NumIRCS_nounwind = {"attributor", "NumIRCS_nounwind" , ("Number of " "call site" " marked '" "nounwind" "'")};; ++ (NumIRCS_nounwind); }; } | ||||
1613 | }; | ||||
1614 | } // namespace | ||||
1615 | |||||
1616 | /// --------------------- Function Return Values ------------------------------- | ||||
1617 | |||||
1618 | namespace { | ||||
1619 | /// "Attribute" that collects all potential returned values and the return | ||||
1620 | /// instructions that they arise from. | ||||
1621 | /// | ||||
1622 | /// If there is a unique returned value R, the manifest method will: | ||||
1623 | /// - mark R with the "returned" attribute, if R is an argument. | ||||
1624 | class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { | ||||
1625 | |||||
1626 | /// Mapping of values potentially returned by the associated function to the | ||||
1627 | /// return instructions that might return them. | ||||
1628 | MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; | ||||
1629 | |||||
1630 | /// State flags | ||||
1631 | /// | ||||
1632 | ///{ | ||||
1633 | bool IsFixed = false; | ||||
1634 | bool IsValidState = true; | ||||
1635 | ///} | ||||
1636 | |||||
1637 | public: | ||||
1638 | AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) | ||||
1639 | : AAReturnedValues(IRP, A) {} | ||||
1640 | |||||
1641 | /// See AbstractAttribute::initialize(...). | ||||
1642 | void initialize(Attributor &A) override { | ||||
1643 | // Reset the state. | ||||
1644 | IsFixed = false; | ||||
1645 | IsValidState = true; | ||||
1646 | ReturnedValues.clear(); | ||||
1647 | |||||
1648 | Function *F = getAssociatedFunction(); | ||||
1649 | if (!F || F->isDeclaration()) { | ||||
1650 | indicatePessimisticFixpoint(); | ||||
1651 | return; | ||||
1652 | } | ||||
1653 | assert(!F->getReturnType()->isVoidTy() &&(static_cast <bool> (!F->getReturnType()->isVoidTy () && "Did not expect a void return type!") ? void (0 ) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1654, __extension__ __PRETTY_FUNCTION__)) | ||||
1654 | "Did not expect a void return type!")(static_cast <bool> (!F->getReturnType()->isVoidTy () && "Did not expect a void return type!") ? void (0 ) : __assert_fail ("!F->getReturnType()->isVoidTy() && \"Did not expect a void return type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1654, __extension__ __PRETTY_FUNCTION__)); | ||||
1655 | |||||
1656 | // The map from instruction opcodes to those instructions in the function. | ||||
1657 | auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); | ||||
1658 | |||||
1659 | // Look through all arguments, if one is marked as returned we are done. | ||||
1660 | for (Argument &Arg : F->args()) { | ||||
1661 | if (Arg.hasReturnedAttr()) { | ||||
1662 | auto &ReturnInstSet = ReturnedValues[&Arg]; | ||||
1663 | if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) | ||||
1664 | for (Instruction *RI : *Insts) | ||||
1665 | ReturnInstSet.insert(cast<ReturnInst>(RI)); | ||||
1666 | |||||
1667 | indicateOptimisticFixpoint(); | ||||
1668 | return; | ||||
1669 | } | ||||
1670 | } | ||||
1671 | |||||
1672 | if (!A.isFunctionIPOAmendable(*F)) | ||||
1673 | indicatePessimisticFixpoint(); | ||||
1674 | } | ||||
1675 | |||||
1676 | /// See AbstractAttribute::manifest(...). | ||||
1677 | ChangeStatus manifest(Attributor &A) override; | ||||
1678 | |||||
1679 | /// See AbstractAttribute::getState(...). | ||||
1680 | AbstractState &getState() override { return *this; } | ||||
1681 | |||||
1682 | /// See AbstractAttribute::getState(...). | ||||
1683 | const AbstractState &getState() const override { return *this; } | ||||
1684 | |||||
1685 | /// See AbstractAttribute::updateImpl(Attributor &A). | ||||
1686 | ChangeStatus updateImpl(Attributor &A) override; | ||||
1687 | |||||
1688 | llvm::iterator_range<iterator> returned_values() override { | ||||
1689 | return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); | ||||
1690 | } | ||||
1691 | |||||
1692 | llvm::iterator_range<const_iterator> returned_values() const override { | ||||
1693 | return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); | ||||
1694 | } | ||||
1695 | |||||
1696 | /// Return the number of potential return values, -1 if unknown. | ||||
1697 | size_t getNumReturnValues() const override { | ||||
1698 | return isValidState() ? ReturnedValues.size() : -1; | ||||
1699 | } | ||||
1700 | |||||
1701 | /// Return an assumed unique return value if a single candidate is found. If | ||||
1702 | /// there cannot be one, return a nullptr. If it is not clear yet, return the | ||||
1703 | /// Optional::NoneType. | ||||
1704 | Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; | ||||
1705 | |||||
1706 | /// See AbstractState::checkForAllReturnedValues(...). | ||||
1707 | bool checkForAllReturnedValuesAndReturnInsts( | ||||
1708 | function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) | ||||
1709 | const override; | ||||
1710 | |||||
1711 | /// Pretty print the attribute similar to the IR representation. | ||||
1712 | const std::string getAsStr() const override; | ||||
1713 | |||||
1714 | /// See AbstractState::isAtFixpoint(). | ||||
1715 | bool isAtFixpoint() const override { return IsFixed; } | ||||
1716 | |||||
1717 | /// See AbstractState::isValidState(). | ||||
1718 | bool isValidState() const override { return IsValidState; } | ||||
1719 | |||||
1720 | /// See AbstractState::indicateOptimisticFixpoint(...). | ||||
1721 | ChangeStatus indicateOptimisticFixpoint() override { | ||||
1722 | IsFixed = true; | ||||
1723 | return ChangeStatus::UNCHANGED; | ||||
1724 | } | ||||
1725 | |||||
1726 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
1727 | IsFixed = true; | ||||
1728 | IsValidState = false; | ||||
1729 | return ChangeStatus::CHANGED; | ||||
1730 | } | ||||
1731 | }; | ||||
1732 | |||||
1733 | ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { | ||||
1734 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
1735 | |||||
1736 | // Bookkeeping. | ||||
1737 | assert(isValidState())(static_cast <bool> (isValidState()) ? void (0) : __assert_fail ("isValidState()", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1737, __extension__ __PRETTY_FUNCTION__)); | ||||
1738 | STATS_DECLTRACK(KnownReturnValues, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues = {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values" };; ++(NumIRFunctionReturn_KnownReturnValues); } | ||||
1739 | "Number of function with known return values"){ static llvm::Statistic NumIRFunctionReturn_KnownReturnValues = {"attributor", "NumIRFunctionReturn_KnownReturnValues", "Number of function with known return values" };; ++(NumIRFunctionReturn_KnownReturnValues); }; | ||||
1740 | |||||
1741 | // Check if we have an assumed unique return value that we could manifest. | ||||
1742 | Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); | ||||
1743 | |||||
1744 | if (!UniqueRV || !UniqueRV.value()) | ||||
1745 | return Changed; | ||||
1746 | |||||
1747 | // Bookkeeping. | ||||
1748 | STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,{ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue = {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return" };; ++(NumIRFunctionReturn_UniqueReturnValue); } | ||||
1749 | "Number of function with unique return"){ static llvm::Statistic NumIRFunctionReturn_UniqueReturnValue = {"attributor", "NumIRFunctionReturn_UniqueReturnValue", "Number of function with unique return" };; ++(NumIRFunctionReturn_UniqueReturnValue); }; | ||||
1750 | // If the assumed unique return value is an argument, annotate it. | ||||
1751 | if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) { | ||||
1752 | if (UniqueRVArg->getType()->canLosslesslyBitCastTo( | ||||
1753 | getAssociatedFunction()->getReturnType())) { | ||||
1754 | getIRPosition() = IRPosition::argument(*UniqueRVArg); | ||||
1755 | Changed = IRAttribute::manifest(A); | ||||
1756 | } | ||||
1757 | } | ||||
1758 | return Changed; | ||||
1759 | } | ||||
1760 | |||||
1761 | const std::string AAReturnedValuesImpl::getAsStr() const { | ||||
1762 | return (isAtFixpoint() ? "returns(#" : "may-return(#") + | ||||
1763 | (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; | ||||
1764 | } | ||||
1765 | |||||
1766 | Optional<Value *> | ||||
1767 | AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { | ||||
1768 | // If checkForAllReturnedValues provides a unique value, ignoring potential | ||||
1769 | // undef values that can also be present, it is assumed to be the actual | ||||
1770 | // return value and forwarded to the caller of this method. If there are | ||||
1771 | // multiple, a nullptr is returned indicating there cannot be a unique | ||||
1772 | // returned value. | ||||
1773 | Optional<Value *> UniqueRV; | ||||
1774 | Type *Ty = getAssociatedFunction()->getReturnType(); | ||||
1775 | |||||
1776 | auto Pred = [&](Value &RV) -> bool { | ||||
1777 | UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); | ||||
1778 | return UniqueRV != Optional<Value *>(nullptr); | ||||
1779 | }; | ||||
1780 | |||||
1781 | if (!A.checkForAllReturnedValues(Pred, *this)) | ||||
1782 | UniqueRV = nullptr; | ||||
1783 | |||||
1784 | return UniqueRV; | ||||
1785 | } | ||||
1786 | |||||
1787 | bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( | ||||
1788 | function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) | ||||
1789 | const { | ||||
1790 | if (!isValidState()) | ||||
1791 | return false; | ||||
1792 | |||||
1793 | // Check all returned values but ignore call sites as long as we have not | ||||
1794 | // encountered an overdefined one during an update. | ||||
1795 | for (const auto &It : ReturnedValues) { | ||||
1796 | Value *RV = It.first; | ||||
1797 | if (!Pred(*RV, It.second)) | ||||
1798 | return false; | ||||
1799 | } | ||||
1800 | |||||
1801 | return true; | ||||
1802 | } | ||||
1803 | |||||
1804 | ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { | ||||
1805 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
1806 | |||||
1807 | SmallVector<AA::ValueAndContext> Values; | ||||
1808 | bool UsedAssumedInformation = false; | ||||
1809 | auto ReturnInstCB = [&](Instruction &I) { | ||||
1810 | ReturnInst &Ret = cast<ReturnInst>(I); | ||||
1811 | Values.clear(); | ||||
1812 | if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()), | ||||
1813 | *this, Values, AA::Intraprocedural, | ||||
1814 | UsedAssumedInformation)) | ||||
1815 | Values.push_back({*Ret.getReturnValue(), Ret}); | ||||
1816 | |||||
1817 | for (auto &VAC : Values) { | ||||
1818 | assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&(static_cast <bool> (AA::isValidInScope(*VAC.getValue() , Ret.getFunction()) && "Assumed returned value should be valid in function scope!" ) ? void (0) : __assert_fail ("AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1819, __extension__ __PRETTY_FUNCTION__)) | ||||
1819 | "Assumed returned value should be valid in function scope!")(static_cast <bool> (AA::isValidInScope(*VAC.getValue() , Ret.getFunction()) && "Assumed returned value should be valid in function scope!" ) ? void (0) : __assert_fail ("AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) && \"Assumed returned value should be valid in function scope!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1819, __extension__ __PRETTY_FUNCTION__)); | ||||
1820 | if (ReturnedValues[VAC.getValue()].insert(&Ret)) | ||||
1821 | Changed = ChangeStatus::CHANGED; | ||||
1822 | } | ||||
1823 | return true; | ||||
1824 | }; | ||||
1825 | |||||
1826 | // Discover returned values from all live returned instructions in the | ||||
1827 | // associated function. | ||||
1828 | if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, | ||||
1829 | UsedAssumedInformation)) | ||||
1830 | return indicatePessimisticFixpoint(); | ||||
1831 | return Changed; | ||||
1832 | } | ||||
1833 | |||||
1834 | struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { | ||||
1835 | AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) | ||||
1836 | : AAReturnedValuesImpl(IRP, A) {} | ||||
1837 | |||||
1838 | /// See AbstractAttribute::trackStatistics() | ||||
1839 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned){ static llvm::Statistic NumIRArguments_returned = {"attributor" , "NumIRArguments_returned", ("Number of " "arguments" " marked '" "returned" "'")};; ++(NumIRArguments_returned); } } | ||||
1840 | }; | ||||
1841 | |||||
1842 | /// Returned values information for a call sites. | ||||
1843 | struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { | ||||
1844 | AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) | ||||
1845 | : AAReturnedValuesImpl(IRP, A) {} | ||||
1846 | |||||
1847 | /// See AbstractAttribute::initialize(...). | ||||
1848 | void initialize(Attributor &A) override { | ||||
1849 | // TODO: Once we have call site specific value information we can provide | ||||
1850 | // call site specific liveness information and then it makes | ||||
1851 | // sense to specialize attributes for call sites instead of | ||||
1852 | // redirecting requests to the callee. | ||||
1853 | llvm_unreachable("Abstract attributes for returned values are not "::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not " "supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1854) | ||||
1854 | "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for returned values are not " "supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 1854); | ||||
1855 | } | ||||
1856 | |||||
1857 | /// See AbstractAttribute::updateImpl(...). | ||||
1858 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1859 | return indicatePessimisticFixpoint(); | ||||
1860 | } | ||||
1861 | |||||
1862 | /// See AbstractAttribute::trackStatistics() | ||||
1863 | void trackStatistics() const override {} | ||||
1864 | }; | ||||
1865 | } // namespace | ||||
1866 | |||||
1867 | /// ------------------------ NoSync Function Attribute ------------------------- | ||||
1868 | |||||
1869 | bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { | ||||
1870 | if (!I->isAtomic()) | ||||
1871 | return false; | ||||
1872 | |||||
1873 | if (auto *FI = dyn_cast<FenceInst>(I)) | ||||
1874 | // All legal orderings for fence are stronger than monotonic. | ||||
1875 | return FI->getSyncScopeID() != SyncScope::SingleThread; | ||||
1876 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { | ||||
1877 | // Unordered is not a legal ordering for cmpxchg. | ||||
1878 | return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || | ||||
1879 | AI->getFailureOrdering() != AtomicOrdering::Monotonic); | ||||
1880 | } | ||||
1881 | |||||
1882 | AtomicOrdering Ordering; | ||||
1883 | switch (I->getOpcode()) { | ||||
1884 | case Instruction::AtomicRMW: | ||||
1885 | Ordering = cast<AtomicRMWInst>(I)->getOrdering(); | ||||
1886 | break; | ||||
1887 | case Instruction::Store: | ||||
1888 | Ordering = cast<StoreInst>(I)->getOrdering(); | ||||
1889 | break; | ||||
1890 | case Instruction::Load: | ||||
1891 | Ordering = cast<LoadInst>(I)->getOrdering(); | ||||
1892 | break; | ||||
1893 | default: | ||||
1894 | llvm_unreachable(::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor." , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1895) | ||||
1895 | "New atomic operations need to be known in the attributor.")::llvm::llvm_unreachable_internal("New atomic operations need to be known in the attributor." , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 1895); | ||||
1896 | } | ||||
1897 | |||||
1898 | return (Ordering != AtomicOrdering::Unordered && | ||||
1899 | Ordering != AtomicOrdering::Monotonic); | ||||
1900 | } | ||||
1901 | |||||
1902 | /// Return true if this intrinsic is nosync. This is only used for intrinsics | ||||
1903 | /// which would be nosync except that they have a volatile flag. All other | ||||
1904 | /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. | ||||
1905 | bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { | ||||
1906 | if (auto *MI = dyn_cast<MemIntrinsic>(I)) | ||||
1907 | return !MI->isVolatile(); | ||||
1908 | return false; | ||||
1909 | } | ||||
1910 | |||||
1911 | namespace { | ||||
1912 | struct AANoSyncImpl : AANoSync { | ||||
1913 | AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} | ||||
1914 | |||||
1915 | const std::string getAsStr() const override { | ||||
1916 | return getAssumed() ? "nosync" : "may-sync"; | ||||
1917 | } | ||||
1918 | |||||
1919 | /// See AbstractAttribute::updateImpl(...). | ||||
1920 | ChangeStatus updateImpl(Attributor &A) override; | ||||
1921 | }; | ||||
1922 | |||||
1923 | ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { | ||||
1924 | |||||
1925 | auto CheckRWInstForNoSync = [&](Instruction &I) { | ||||
1926 | return AA::isNoSyncInst(A, I, *this); | ||||
1927 | }; | ||||
1928 | |||||
1929 | auto CheckForNoSync = [&](Instruction &I) { | ||||
1930 | // At this point we handled all read/write effects and they are all | ||||
1931 | // nosync, so they can be skipped. | ||||
1932 | if (I.mayReadOrWriteMemory()) | ||||
1933 | return true; | ||||
1934 | |||||
1935 | // non-convergent and readnone imply nosync. | ||||
1936 | return !cast<CallBase>(I).isConvergent(); | ||||
1937 | }; | ||||
1938 | |||||
1939 | bool UsedAssumedInformation = false; | ||||
1940 | if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, | ||||
1941 | UsedAssumedInformation) || | ||||
1942 | !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, | ||||
1943 | UsedAssumedInformation)) | ||||
1944 | return indicatePessimisticFixpoint(); | ||||
1945 | |||||
1946 | return ChangeStatus::UNCHANGED; | ||||
1947 | } | ||||
1948 | |||||
1949 | struct AANoSyncFunction final : public AANoSyncImpl { | ||||
1950 | AANoSyncFunction(const IRPosition &IRP, Attributor &A) | ||||
1951 | : AANoSyncImpl(IRP, A) {} | ||||
1952 | |||||
1953 | /// See AbstractAttribute::trackStatistics() | ||||
1954 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync){ static llvm::Statistic NumIRFunction_nosync = {"attributor" , "NumIRFunction_nosync", ("Number of " "functions" " marked '" "nosync" "'")};; ++(NumIRFunction_nosync); } } | ||||
1955 | }; | ||||
1956 | |||||
1957 | /// NoSync attribute deduction for a call sites. | ||||
1958 | struct AANoSyncCallSite final : AANoSyncImpl { | ||||
1959 | AANoSyncCallSite(const IRPosition &IRP, Attributor &A) | ||||
1960 | : AANoSyncImpl(IRP, A) {} | ||||
1961 | |||||
1962 | /// See AbstractAttribute::initialize(...). | ||||
1963 | void initialize(Attributor &A) override { | ||||
1964 | AANoSyncImpl::initialize(A); | ||||
1965 | Function *F = getAssociatedFunction(); | ||||
1966 | if (!F || F->isDeclaration()) | ||||
1967 | indicatePessimisticFixpoint(); | ||||
1968 | } | ||||
1969 | |||||
1970 | /// See AbstractAttribute::updateImpl(...). | ||||
1971 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1972 | // TODO: Once we have call site specific value information we can provide | ||||
1973 | // call site specific liveness information and then it makes | ||||
1974 | // sense to specialize attributes for call sites arguments instead of | ||||
1975 | // redirecting requests to the callee argument. | ||||
1976 | Function *F = getAssociatedFunction(); | ||||
1977 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
1978 | auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); | ||||
1979 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
1980 | } | ||||
1981 | |||||
1982 | /// See AbstractAttribute::trackStatistics() | ||||
1983 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync){ static llvm::Statistic NumIRCS_nosync = {"attributor", "NumIRCS_nosync" , ("Number of " "call site" " marked '" "nosync" "'")};; ++(NumIRCS_nosync ); }; } | ||||
1984 | }; | ||||
1985 | } // namespace | ||||
1986 | |||||
1987 | /// ------------------------ No-Free Attributes ---------------------------- | ||||
1988 | |||||
1989 | namespace { | ||||
1990 | struct AANoFreeImpl : public AANoFree { | ||||
1991 | AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} | ||||
1992 | |||||
1993 | /// See AbstractAttribute::updateImpl(...). | ||||
1994 | ChangeStatus updateImpl(Attributor &A) override { | ||||
1995 | auto CheckForNoFree = [&](Instruction &I) { | ||||
1996 | const auto &CB = cast<CallBase>(I); | ||||
1997 | if (CB.hasFnAttr(Attribute::NoFree)) | ||||
1998 | return true; | ||||
1999 | |||||
2000 | const auto &NoFreeAA = A.getAAFor<AANoFree>( | ||||
2001 | *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); | ||||
2002 | return NoFreeAA.isAssumedNoFree(); | ||||
2003 | }; | ||||
2004 | |||||
2005 | bool UsedAssumedInformation = false; | ||||
2006 | if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, | ||||
2007 | UsedAssumedInformation)) | ||||
2008 | return indicatePessimisticFixpoint(); | ||||
2009 | return ChangeStatus::UNCHANGED; | ||||
2010 | } | ||||
2011 | |||||
2012 | /// See AbstractAttribute::getAsStr(). | ||||
2013 | const std::string getAsStr() const override { | ||||
2014 | return getAssumed() ? "nofree" : "may-free"; | ||||
2015 | } | ||||
2016 | }; | ||||
2017 | |||||
2018 | struct AANoFreeFunction final : public AANoFreeImpl { | ||||
2019 | AANoFreeFunction(const IRPosition &IRP, Attributor &A) | ||||
2020 | : AANoFreeImpl(IRP, A) {} | ||||
2021 | |||||
2022 | /// See AbstractAttribute::trackStatistics() | ||||
2023 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree){ static llvm::Statistic NumIRFunction_nofree = {"attributor" , "NumIRFunction_nofree", ("Number of " "functions" " marked '" "nofree" "'")};; ++(NumIRFunction_nofree); } } | ||||
2024 | }; | ||||
2025 | |||||
2026 | /// NoFree attribute deduction for a call sites. | ||||
2027 | struct AANoFreeCallSite final : AANoFreeImpl { | ||||
2028 | AANoFreeCallSite(const IRPosition &IRP, Attributor &A) | ||||
2029 | : AANoFreeImpl(IRP, A) {} | ||||
2030 | |||||
2031 | /// See AbstractAttribute::initialize(...). | ||||
2032 | void initialize(Attributor &A) override { | ||||
2033 | AANoFreeImpl::initialize(A); | ||||
2034 | Function *F = getAssociatedFunction(); | ||||
2035 | if (!F || F->isDeclaration()) | ||||
2036 | indicatePessimisticFixpoint(); | ||||
2037 | } | ||||
2038 | |||||
2039 | /// See AbstractAttribute::updateImpl(...). | ||||
2040 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2041 | // TODO: Once we have call site specific value information we can provide | ||||
2042 | // call site specific liveness information and then it makes | ||||
2043 | // sense to specialize attributes for call sites arguments instead of | ||||
2044 | // redirecting requests to the callee argument. | ||||
2045 | Function *F = getAssociatedFunction(); | ||||
2046 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
2047 | auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); | ||||
2048 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
2049 | } | ||||
2050 | |||||
2051 | /// See AbstractAttribute::trackStatistics() | ||||
2052 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree){ static llvm::Statistic NumIRCS_nofree = {"attributor", "NumIRCS_nofree" , ("Number of " "call site" " marked '" "nofree" "'")};; ++(NumIRCS_nofree ); }; } | ||||
2053 | }; | ||||
2054 | |||||
2055 | /// NoFree attribute for floating values. | ||||
2056 | struct AANoFreeFloating : AANoFreeImpl { | ||||
2057 | AANoFreeFloating(const IRPosition &IRP, Attributor &A) | ||||
2058 | : AANoFreeImpl(IRP, A) {} | ||||
2059 | |||||
2060 | /// See AbstractAttribute::trackStatistics() | ||||
2061 | void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree){ static llvm::Statistic NumIRFloating_nofree = {"attributor" , "NumIRFloating_nofree", ("Number of floating values known to be '" "nofree" "'")};; ++(NumIRFloating_nofree); }} | ||||
2062 | |||||
2063 | /// See Abstract Attribute::updateImpl(...). | ||||
2064 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2065 | const IRPosition &IRP = getIRPosition(); | ||||
2066 | |||||
2067 | const auto &NoFreeAA = A.getAAFor<AANoFree>( | ||||
2068 | *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); | ||||
2069 | if (NoFreeAA.isAssumedNoFree()) | ||||
2070 | return ChangeStatus::UNCHANGED; | ||||
2071 | |||||
2072 | Value &AssociatedValue = getIRPosition().getAssociatedValue(); | ||||
2073 | auto Pred = [&](const Use &U, bool &Follow) -> bool { | ||||
2074 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
2075 | if (auto *CB = dyn_cast<CallBase>(UserI)) { | ||||
2076 | if (CB->isBundleOperand(&U)) | ||||
2077 | return false; | ||||
2078 | if (!CB->isArgOperand(&U)) | ||||
2079 | return true; | ||||
2080 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
2081 | |||||
2082 | const auto &NoFreeArg = A.getAAFor<AANoFree>( | ||||
2083 | *this, IRPosition::callsite_argument(*CB, ArgNo), | ||||
2084 | DepClassTy::REQUIRED); | ||||
2085 | return NoFreeArg.isAssumedNoFree(); | ||||
2086 | } | ||||
2087 | |||||
2088 | if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || | ||||
2089 | isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { | ||||
2090 | Follow = true; | ||||
2091 | return true; | ||||
2092 | } | ||||
2093 | if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || | ||||
2094 | isa<ReturnInst>(UserI)) | ||||
2095 | return true; | ||||
2096 | |||||
2097 | // Unknown user. | ||||
2098 | return false; | ||||
2099 | }; | ||||
2100 | if (!A.checkForAllUses(Pred, *this, AssociatedValue)) | ||||
2101 | return indicatePessimisticFixpoint(); | ||||
2102 | |||||
2103 | return ChangeStatus::UNCHANGED; | ||||
2104 | } | ||||
2105 | }; | ||||
2106 | |||||
2107 | /// NoFree attribute for a call site argument. | ||||
2108 | struct AANoFreeArgument final : AANoFreeFloating { | ||||
2109 | AANoFreeArgument(const IRPosition &IRP, Attributor &A) | ||||
2110 | : AANoFreeFloating(IRP, A) {} | ||||
2111 | |||||
2112 | /// See AbstractAttribute::trackStatistics() | ||||
2113 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree){ static llvm::Statistic NumIRArguments_nofree = {"attributor" , "NumIRArguments_nofree", ("Number of " "arguments" " marked '" "nofree" "'")};; ++(NumIRArguments_nofree); } } | ||||
2114 | }; | ||||
2115 | |||||
2116 | /// NoFree attribute for call site arguments. | ||||
2117 | struct AANoFreeCallSiteArgument final : AANoFreeFloating { | ||||
2118 | AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
2119 | : AANoFreeFloating(IRP, A) {} | ||||
2120 | |||||
2121 | /// See AbstractAttribute::updateImpl(...). | ||||
2122 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2123 | // TODO: Once we have call site specific value information we can provide | ||||
2124 | // call site specific liveness information and then it makes | ||||
2125 | // sense to specialize attributes for call sites arguments instead of | ||||
2126 | // redirecting requests to the callee argument. | ||||
2127 | Argument *Arg = getAssociatedArgument(); | ||||
2128 | if (!Arg) | ||||
2129 | return indicatePessimisticFixpoint(); | ||||
2130 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
2131 | auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
2132 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); | ||||
2133 | } | ||||
2134 | |||||
2135 | /// See AbstractAttribute::trackStatistics() | ||||
2136 | void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree){ static llvm::Statistic NumIRCSArguments_nofree = {"attributor" , "NumIRCSArguments_nofree", ("Number of " "call site arguments" " marked '" "nofree" "'")};; ++(NumIRCSArguments_nofree); }}; | ||||
2137 | }; | ||||
2138 | |||||
2139 | /// NoFree attribute for function return value. | ||||
2140 | struct AANoFreeReturned final : AANoFreeFloating { | ||||
2141 | AANoFreeReturned(const IRPosition &IRP, Attributor &A) | ||||
2142 | : AANoFreeFloating(IRP, A) { | ||||
2143 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2143); | ||||
2144 | } | ||||
2145 | |||||
2146 | /// See AbstractAttribute::initialize(...). | ||||
2147 | void initialize(Attributor &A) override { | ||||
2148 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2148); | ||||
2149 | } | ||||
2150 | |||||
2151 | /// See AbstractAttribute::updateImpl(...). | ||||
2152 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2153 | llvm_unreachable("NoFree is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoFree is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2153); | ||||
2154 | } | ||||
2155 | |||||
2156 | /// See AbstractAttribute::trackStatistics() | ||||
2157 | void trackStatistics() const override {} | ||||
2158 | }; | ||||
2159 | |||||
2160 | /// NoFree attribute deduction for a call site return value. | ||||
2161 | struct AANoFreeCallSiteReturned final : AANoFreeFloating { | ||||
2162 | AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
2163 | : AANoFreeFloating(IRP, A) {} | ||||
2164 | |||||
2165 | ChangeStatus manifest(Attributor &A) override { | ||||
2166 | return ChangeStatus::UNCHANGED; | ||||
2167 | } | ||||
2168 | /// See AbstractAttribute::trackStatistics() | ||||
2169 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree){ static llvm::Statistic NumIRCSReturn_nofree = {"attributor" , "NumIRCSReturn_nofree", ("Number of " "call site returns" " marked '" "nofree" "'")};; ++(NumIRCSReturn_nofree); } } | ||||
2170 | }; | ||||
2171 | } // namespace | ||||
2172 | |||||
2173 | /// ------------------------ NonNull Argument Attribute ------------------------ | ||||
2174 | namespace { | ||||
2175 | static int64_t getKnownNonNullAndDerefBytesForUse( | ||||
2176 | Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, | ||||
2177 | const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { | ||||
2178 | TrackUse = false; | ||||
2179 | |||||
2180 | const Value *UseV = U->get(); | ||||
2181 | if (!UseV->getType()->isPointerTy()) | ||||
2182 | return 0; | ||||
2183 | |||||
2184 | // We need to follow common pointer manipulation uses to the accesses they | ||||
2185 | // feed into. We can try to be smart to avoid looking through things we do not | ||||
2186 | // like for now, e.g., non-inbounds GEPs. | ||||
2187 | if (isa<CastInst>(I)) { | ||||
2188 | TrackUse = true; | ||||
2189 | return 0; | ||||
2190 | } | ||||
2191 | |||||
2192 | if (isa<GetElementPtrInst>(I)) { | ||||
2193 | TrackUse = true; | ||||
2194 | return 0; | ||||
2195 | } | ||||
2196 | |||||
2197 | Type *PtrTy = UseV->getType(); | ||||
2198 | const Function *F = I->getFunction(); | ||||
2199 | bool NullPointerIsDefined = | ||||
2200 | F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; | ||||
2201 | const DataLayout &DL = A.getInfoCache().getDL(); | ||||
2202 | if (const auto *CB = dyn_cast<CallBase>(I)) { | ||||
2203 | if (CB->isBundleOperand(U)) { | ||||
2204 | if (RetainedKnowledge RK = getKnowledgeFromUse( | ||||
2205 | U, {Attribute::NonNull, Attribute::Dereferenceable})) { | ||||
2206 | IsNonNull |= | ||||
2207 | (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); | ||||
2208 | return RK.ArgValue; | ||||
2209 | } | ||||
2210 | return 0; | ||||
2211 | } | ||||
2212 | |||||
2213 | if (CB->isCallee(U)) { | ||||
2214 | IsNonNull |= !NullPointerIsDefined; | ||||
2215 | return 0; | ||||
2216 | } | ||||
2217 | |||||
2218 | unsigned ArgNo = CB->getArgOperandNo(U); | ||||
2219 | IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); | ||||
2220 | // As long as we only use known information there is no need to track | ||||
2221 | // dependences here. | ||||
2222 | auto &DerefAA = | ||||
2223 | A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); | ||||
2224 | IsNonNull |= DerefAA.isKnownNonNull(); | ||||
2225 | return DerefAA.getKnownDereferenceableBytes(); | ||||
2226 | } | ||||
2227 | |||||
2228 | Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); | ||||
2229 | if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) | ||||
2230 | return 0; | ||||
2231 | |||||
2232 | int64_t Offset; | ||||
2233 | const Value *Base = | ||||
2234 | getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); | ||||
2235 | if (Base && Base == &AssociatedValue) { | ||||
2236 | int64_t DerefBytes = Loc->Size.getValue() + Offset; | ||||
2237 | IsNonNull |= !NullPointerIsDefined; | ||||
2238 | return std::max(int64_t(0), DerefBytes); | ||||
2239 | } | ||||
2240 | |||||
2241 | /// Corner case when an offset is 0. | ||||
2242 | Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, | ||||
2243 | /*AllowNonInbounds*/ true); | ||||
2244 | if (Base && Base == &AssociatedValue && Offset == 0) { | ||||
2245 | int64_t DerefBytes = Loc->Size.getValue(); | ||||
2246 | IsNonNull |= !NullPointerIsDefined; | ||||
2247 | return std::max(int64_t(0), DerefBytes); | ||||
2248 | } | ||||
2249 | |||||
2250 | return 0; | ||||
2251 | } | ||||
2252 | |||||
2253 | struct AANonNullImpl : AANonNull { | ||||
2254 | AANonNullImpl(const IRPosition &IRP, Attributor &A) | ||||
2255 | : AANonNull(IRP, A), | ||||
2256 | NullIsDefined(NullPointerIsDefined( | ||||
2257 | getAnchorScope(), | ||||
2258 | getAssociatedValue().getType()->getPointerAddressSpace())) {} | ||||
2259 | |||||
2260 | /// See AbstractAttribute::initialize(...). | ||||
2261 | void initialize(Attributor &A) override { | ||||
2262 | Value &V = *getAssociatedValue().stripPointerCasts(); | ||||
2263 | if (!NullIsDefined && | ||||
2264 | hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, | ||||
2265 | /* IgnoreSubsumingPositions */ false, &A)) { | ||||
2266 | indicateOptimisticFixpoint(); | ||||
2267 | return; | ||||
2268 | } | ||||
2269 | |||||
2270 | if (isa<ConstantPointerNull>(V)) { | ||||
2271 | indicatePessimisticFixpoint(); | ||||
2272 | return; | ||||
2273 | } | ||||
2274 | |||||
2275 | AANonNull::initialize(A); | ||||
2276 | |||||
2277 | bool CanBeNull, CanBeFreed; | ||||
2278 | if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, | ||||
2279 | CanBeFreed)) { | ||||
2280 | if (!CanBeNull) { | ||||
2281 | indicateOptimisticFixpoint(); | ||||
2282 | return; | ||||
2283 | } | ||||
2284 | } | ||||
2285 | |||||
2286 | if (isa<GlobalValue>(V)) { | ||||
2287 | indicatePessimisticFixpoint(); | ||||
2288 | return; | ||||
2289 | } | ||||
2290 | |||||
2291 | if (Instruction *CtxI = getCtxI()) | ||||
2292 | followUsesInMBEC(*this, A, getState(), *CtxI); | ||||
2293 | } | ||||
2294 | |||||
2295 | /// See followUsesInMBEC | ||||
2296 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, | ||||
2297 | AANonNull::StateType &State) { | ||||
2298 | bool IsNonNull = false; | ||||
2299 | bool TrackUse = false; | ||||
2300 | getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, | ||||
2301 | IsNonNull, TrackUse); | ||||
2302 | State.setKnown(IsNonNull); | ||||
2303 | return TrackUse; | ||||
2304 | } | ||||
2305 | |||||
2306 | /// See AbstractAttribute::getAsStr(). | ||||
2307 | const std::string getAsStr() const override { | ||||
2308 | return getAssumed() ? "nonnull" : "may-null"; | ||||
2309 | } | ||||
2310 | |||||
2311 | /// Flag to determine if the underlying value can be null and still allow | ||||
2312 | /// valid accesses. | ||||
2313 | const bool NullIsDefined; | ||||
2314 | }; | ||||
2315 | |||||
2316 | /// NonNull attribute for a floating value. | ||||
2317 | struct AANonNullFloating : public AANonNullImpl { | ||||
2318 | AANonNullFloating(const IRPosition &IRP, Attributor &A) | ||||
2319 | : AANonNullImpl(IRP, A) {} | ||||
2320 | |||||
2321 | /// See AbstractAttribute::updateImpl(...). | ||||
2322 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2323 | const DataLayout &DL = A.getDataLayout(); | ||||
2324 | |||||
2325 | bool Stripped; | ||||
2326 | bool UsedAssumedInformation = false; | ||||
2327 | SmallVector<AA::ValueAndContext> Values; | ||||
2328 | if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, | ||||
2329 | AA::AnyScope, UsedAssumedInformation)) { | ||||
2330 | Values.push_back({getAssociatedValue(), getCtxI()}); | ||||
2331 | Stripped = false; | ||||
2332 | } else { | ||||
2333 | Stripped = Values.size() != 1 || | ||||
2334 | Values.front().getValue() != &getAssociatedValue(); | ||||
2335 | } | ||||
2336 | |||||
2337 | DominatorTree *DT = nullptr; | ||||
2338 | AssumptionCache *AC = nullptr; | ||||
2339 | InformationCache &InfoCache = A.getInfoCache(); | ||||
2340 | if (const Function *Fn = getAnchorScope()) { | ||||
2341 | DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); | ||||
2342 | AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); | ||||
2343 | } | ||||
2344 | |||||
2345 | AANonNull::StateType T; | ||||
2346 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { | ||||
2347 | const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), | ||||
2348 | DepClassTy::REQUIRED); | ||||
2349 | if (!Stripped && this == &AA) { | ||||
2350 | if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) | ||||
2351 | T.indicatePessimisticFixpoint(); | ||||
2352 | } else { | ||||
2353 | // Use abstract attribute information. | ||||
2354 | const AANonNull::StateType &NS = AA.getState(); | ||||
2355 | T ^= NS; | ||||
2356 | } | ||||
2357 | return T.isValidState(); | ||||
2358 | }; | ||||
2359 | |||||
2360 | for (const auto &VAC : Values) | ||||
2361 | if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI())) | ||||
2362 | return indicatePessimisticFixpoint(); | ||||
2363 | |||||
2364 | return clampStateAndIndicateChange(getState(), T); | ||||
2365 | } | ||||
2366 | |||||
2367 | /// See AbstractAttribute::trackStatistics() | ||||
2368 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor" , "NumIRFunctionReturn_nonnull", ("Number of " "function returns" " marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull ); } } | ||||
2369 | }; | ||||
2370 | |||||
2371 | /// NonNull attribute for function return value. | ||||
2372 | struct AANonNullReturned final | ||||
2373 | : AAReturnedFromReturnedValues<AANonNull, AANonNull> { | ||||
2374 | AANonNullReturned(const IRPosition &IRP, Attributor &A) | ||||
2375 | : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} | ||||
2376 | |||||
2377 | /// See AbstractAttribute::getAsStr(). | ||||
2378 | const std::string getAsStr() const override { | ||||
2379 | return getAssumed() ? "nonnull" : "may-null"; | ||||
2380 | } | ||||
2381 | |||||
2382 | /// See AbstractAttribute::trackStatistics() | ||||
2383 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull){ static llvm::Statistic NumIRFunctionReturn_nonnull = {"attributor" , "NumIRFunctionReturn_nonnull", ("Number of " "function returns" " marked '" "nonnull" "'")};; ++(NumIRFunctionReturn_nonnull ); } } | ||||
2384 | }; | ||||
2385 | |||||
2386 | /// NonNull attribute for function argument. | ||||
2387 | struct AANonNullArgument final | ||||
2388 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { | ||||
2389 | AANonNullArgument(const IRPosition &IRP, Attributor &A) | ||||
2390 | : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} | ||||
2391 | |||||
2392 | /// See AbstractAttribute::trackStatistics() | ||||
2393 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull){ static llvm::Statistic NumIRArguments_nonnull = {"attributor" , "NumIRArguments_nonnull", ("Number of " "arguments" " marked '" "nonnull" "'")};; ++(NumIRArguments_nonnull); } } | ||||
2394 | }; | ||||
2395 | |||||
2396 | struct AANonNullCallSiteArgument final : AANonNullFloating { | ||||
2397 | AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
2398 | : AANonNullFloating(IRP, A) {} | ||||
2399 | |||||
2400 | /// See AbstractAttribute::trackStatistics() | ||||
2401 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull){ static llvm::Statistic NumIRCSArguments_nonnull = {"attributor" , "NumIRCSArguments_nonnull", ("Number of " "call site arguments" " marked '" "nonnull" "'")};; ++(NumIRCSArguments_nonnull); } } | ||||
2402 | }; | ||||
2403 | |||||
2404 | /// NonNull attribute for a call site return position. | ||||
2405 | struct AANonNullCallSiteReturned final | ||||
2406 | : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { | ||||
2407 | AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
2408 | : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} | ||||
2409 | |||||
2410 | /// See AbstractAttribute::trackStatistics() | ||||
2411 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull){ static llvm::Statistic NumIRCSReturn_nonnull = {"attributor" , "NumIRCSReturn_nonnull", ("Number of " "call site returns" " marked '" "nonnull" "'")};; ++(NumIRCSReturn_nonnull); } } | ||||
2412 | }; | ||||
2413 | } // namespace | ||||
2414 | |||||
2415 | /// ------------------------ No-Recurse Attributes ---------------------------- | ||||
2416 | |||||
2417 | namespace { | ||||
2418 | struct AANoRecurseImpl : public AANoRecurse { | ||||
2419 | AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} | ||||
2420 | |||||
2421 | /// See AbstractAttribute::getAsStr() | ||||
2422 | const std::string getAsStr() const override { | ||||
2423 | return getAssumed() ? "norecurse" : "may-recurse"; | ||||
2424 | } | ||||
2425 | }; | ||||
2426 | |||||
2427 | struct AANoRecurseFunction final : AANoRecurseImpl { | ||||
2428 | AANoRecurseFunction(const IRPosition &IRP, Attributor &A) | ||||
2429 | : AANoRecurseImpl(IRP, A) {} | ||||
2430 | |||||
2431 | /// See AbstractAttribute::updateImpl(...). | ||||
2432 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2433 | |||||
2434 | // If all live call sites are known to be no-recurse, we are as well. | ||||
2435 | auto CallSitePred = [&](AbstractCallSite ACS) { | ||||
2436 | const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( | ||||
2437 | *this, IRPosition::function(*ACS.getInstruction()->getFunction()), | ||||
2438 | DepClassTy::NONE); | ||||
2439 | return NoRecurseAA.isKnownNoRecurse(); | ||||
2440 | }; | ||||
2441 | bool UsedAssumedInformation = false; | ||||
2442 | if (A.checkForAllCallSites(CallSitePred, *this, true, | ||||
2443 | UsedAssumedInformation)) { | ||||
2444 | // If we know all call sites and all are known no-recurse, we are done. | ||||
2445 | // If all known call sites, which might not be all that exist, are known | ||||
2446 | // to be no-recurse, we are not done but we can continue to assume | ||||
2447 | // no-recurse. If one of the call sites we have not visited will become | ||||
2448 | // live, another update is triggered. | ||||
2449 | if (!UsedAssumedInformation) | ||||
2450 | indicateOptimisticFixpoint(); | ||||
2451 | return ChangeStatus::UNCHANGED; | ||||
2452 | } | ||||
2453 | |||||
2454 | const AAFunctionReachability &EdgeReachability = | ||||
2455 | A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), | ||||
2456 | DepClassTy::REQUIRED); | ||||
2457 | if (EdgeReachability.canReach(A, *getAnchorScope())) | ||||
2458 | return indicatePessimisticFixpoint(); | ||||
2459 | return ChangeStatus::UNCHANGED; | ||||
2460 | } | ||||
2461 | |||||
2462 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse){ static llvm::Statistic NumIRFunction_norecurse = {"attributor" , "NumIRFunction_norecurse", ("Number of " "functions" " marked '" "norecurse" "'")};; ++(NumIRFunction_norecurse); } } | ||||
2463 | }; | ||||
2464 | |||||
2465 | /// NoRecurse attribute deduction for a call sites. | ||||
2466 | struct AANoRecurseCallSite final : AANoRecurseImpl { | ||||
2467 | AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) | ||||
2468 | : AANoRecurseImpl(IRP, A) {} | ||||
2469 | |||||
2470 | /// See AbstractAttribute::initialize(...). | ||||
2471 | void initialize(Attributor &A) override { | ||||
2472 | AANoRecurseImpl::initialize(A); | ||||
2473 | Function *F = getAssociatedFunction(); | ||||
2474 | if (!F || F->isDeclaration()) | ||||
2475 | indicatePessimisticFixpoint(); | ||||
2476 | } | ||||
2477 | |||||
2478 | /// See AbstractAttribute::updateImpl(...). | ||||
2479 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2480 | // TODO: Once we have call site specific value information we can provide | ||||
2481 | // call site specific liveness information and then it makes | ||||
2482 | // sense to specialize attributes for call sites arguments instead of | ||||
2483 | // redirecting requests to the callee argument. | ||||
2484 | Function *F = getAssociatedFunction(); | ||||
2485 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
2486 | auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); | ||||
2487 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
2488 | } | ||||
2489 | |||||
2490 | /// See AbstractAttribute::trackStatistics() | ||||
2491 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse){ static llvm::Statistic NumIRCS_norecurse = {"attributor", "NumIRCS_norecurse" , ("Number of " "call site" " marked '" "norecurse" "'")};; ++ (NumIRCS_norecurse); }; } | ||||
2492 | }; | ||||
2493 | } // namespace | ||||
2494 | |||||
2495 | /// -------------------- Undefined-Behavior Attributes ------------------------ | ||||
2496 | |||||
2497 | namespace { | ||||
2498 | struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { | ||||
2499 | AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) | ||||
2500 | : AAUndefinedBehavior(IRP, A) {} | ||||
2501 | |||||
2502 | /// See AbstractAttribute::updateImpl(...). | ||||
2503 | // through a pointer (i.e. also branches etc.) | ||||
2504 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2505 | const size_t UBPrevSize = KnownUBInsts.size(); | ||||
2506 | const size_t NoUBPrevSize = AssumedNoUBInsts.size(); | ||||
2507 | |||||
2508 | auto InspectMemAccessInstForUB = [&](Instruction &I) { | ||||
2509 | // Lang ref now states volatile store is not UB, let's skip them. | ||||
2510 | if (I.isVolatile() && I.mayWriteToMemory()) | ||||
2511 | return true; | ||||
2512 | |||||
2513 | // Skip instructions that are already saved. | ||||
2514 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) | ||||
2515 | return true; | ||||
2516 | |||||
2517 | // If we reach here, we know we have an instruction | ||||
2518 | // that accesses memory through a pointer operand, | ||||
2519 | // for which getPointerOperand() should give it to us. | ||||
2520 | Value *PtrOp = | ||||
2521 | const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); | ||||
2522 | assert(PtrOp &&(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction" ) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2523, __extension__ __PRETTY_FUNCTION__)) | ||||
2523 | "Expected pointer operand of memory accessing instruction")(static_cast <bool> (PtrOp && "Expected pointer operand of memory accessing instruction" ) ? void (0) : __assert_fail ("PtrOp && \"Expected pointer operand of memory accessing instruction\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 2523, __extension__ __PRETTY_FUNCTION__)); | ||||
2524 | |||||
2525 | // Either we stopped and the appropriate action was taken, | ||||
2526 | // or we got back a simplified value to continue. | ||||
2527 | Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); | ||||
2528 | if (!SimplifiedPtrOp || !SimplifiedPtrOp.value()) | ||||
2529 | return true; | ||||
2530 | const Value *PtrOpVal = SimplifiedPtrOp.value(); | ||||
2531 | |||||
2532 | // A memory access through a pointer is considered UB | ||||
2533 | // only if the pointer has constant null value. | ||||
2534 | // TODO: Expand it to not only check constant values. | ||||
2535 | if (!isa<ConstantPointerNull>(PtrOpVal)) { | ||||
2536 | AssumedNoUBInsts.insert(&I); | ||||
2537 | return true; | ||||
2538 | } | ||||
2539 | const Type *PtrTy = PtrOpVal->getType(); | ||||
2540 | |||||
2541 | // Because we only consider instructions inside functions, | ||||
2542 | // assume that a parent function exists. | ||||
2543 | const Function *F = I.getFunction(); | ||||
2544 | |||||
2545 | // A memory access using constant null pointer is only considered UB | ||||
2546 | // if null pointer is _not_ defined for the target platform. | ||||
2547 | if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) | ||||
2548 | AssumedNoUBInsts.insert(&I); | ||||
2549 | else | ||||
2550 | KnownUBInsts.insert(&I); | ||||
2551 | return true; | ||||
2552 | }; | ||||
2553 | |||||
2554 | auto InspectBrInstForUB = [&](Instruction &I) { | ||||
2555 | // A conditional branch instruction is considered UB if it has `undef` | ||||
2556 | // condition. | ||||
2557 | |||||
2558 | // Skip instructions that are already saved. | ||||
2559 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) | ||||
2560 | return true; | ||||
2561 | |||||
2562 | // We know we have a branch instruction. | ||||
2563 | auto *BrInst = cast<BranchInst>(&I); | ||||
2564 | |||||
2565 | // Unconditional branches are never considered UB. | ||||
2566 | if (BrInst->isUnconditional()) | ||||
2567 | return true; | ||||
2568 | |||||
2569 | // Either we stopped and the appropriate action was taken, | ||||
2570 | // or we got back a simplified value to continue. | ||||
2571 | Optional<Value *> SimplifiedCond = | ||||
2572 | stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); | ||||
2573 | if (!SimplifiedCond || !*SimplifiedCond) | ||||
2574 | return true; | ||||
2575 | AssumedNoUBInsts.insert(&I); | ||||
2576 | return true; | ||||
2577 | }; | ||||
2578 | |||||
2579 | auto InspectCallSiteForUB = [&](Instruction &I) { | ||||
2580 | // Check whether a callsite always cause UB or not | ||||
2581 | |||||
2582 | // Skip instructions that are already saved. | ||||
2583 | if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) | ||||
2584 | return true; | ||||
2585 | |||||
2586 | // Check nonnull and noundef argument attribute violation for each | ||||
2587 | // callsite. | ||||
2588 | CallBase &CB = cast<CallBase>(I); | ||||
2589 | Function *Callee = CB.getCalledFunction(); | ||||
2590 | if (!Callee) | ||||
2591 | return true; | ||||
2592 | for (unsigned idx = 0; idx < CB.arg_size(); idx++) { | ||||
2593 | // If current argument is known to be simplified to null pointer and the | ||||
2594 | // corresponding argument position is known to have nonnull attribute, | ||||
2595 | // the argument is poison. Furthermore, if the argument is poison and | ||||
2596 | // the position is known to have noundef attriubte, this callsite is | ||||
2597 | // considered UB. | ||||
2598 | if (idx >= Callee->arg_size()) | ||||
2599 | break; | ||||
2600 | Value *ArgVal = CB.getArgOperand(idx); | ||||
2601 | if (!ArgVal) | ||||
2602 | continue; | ||||
2603 | // Here, we handle three cases. | ||||
2604 | // (1) Not having a value means it is dead. (we can replace the value | ||||
2605 | // with undef) | ||||
2606 | // (2) Simplified to undef. The argument violate noundef attriubte. | ||||
2607 | // (3) Simplified to null pointer where known to be nonnull. | ||||
2608 | // The argument is a poison value and violate noundef attribute. | ||||
2609 | IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); | ||||
2610 | auto &NoUndefAA = | ||||
2611 | A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); | ||||
2612 | if (!NoUndefAA.isKnownNoUndef()) | ||||
2613 | continue; | ||||
2614 | bool UsedAssumedInformation = false; | ||||
2615 | Optional<Value *> SimplifiedVal = | ||||
2616 | A.getAssumedSimplified(IRPosition::value(*ArgVal), *this, | ||||
2617 | UsedAssumedInformation, AA::Interprocedural); | ||||
2618 | if (UsedAssumedInformation) | ||||
2619 | continue; | ||||
2620 | if (SimplifiedVal && !SimplifiedVal.value()) | ||||
2621 | return true; | ||||
2622 | if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) { | ||||
2623 | KnownUBInsts.insert(&I); | ||||
2624 | continue; | ||||
2625 | } | ||||
2626 | if (!ArgVal->getType()->isPointerTy() || | ||||
2627 | !isa<ConstantPointerNull>(*SimplifiedVal.value())) | ||||
2628 | continue; | ||||
2629 | auto &NonNullAA = | ||||
2630 | A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); | ||||
2631 | if (NonNullAA.isKnownNonNull()) | ||||
2632 | KnownUBInsts.insert(&I); | ||||
2633 | } | ||||
2634 | return true; | ||||
2635 | }; | ||||
2636 | |||||
2637 | auto InspectReturnInstForUB = [&](Instruction &I) { | ||||
2638 | auto &RI = cast<ReturnInst>(I); | ||||
2639 | // Either we stopped and the appropriate action was taken, | ||||
2640 | // or we got back a simplified return value to continue. | ||||
2641 | Optional<Value *> SimplifiedRetValue = | ||||
2642 | stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); | ||||
2643 | if (!SimplifiedRetValue || !*SimplifiedRetValue) | ||||
2644 | return true; | ||||
2645 | |||||
2646 | // Check if a return instruction always cause UB or not | ||||
2647 | // Note: It is guaranteed that the returned position of the anchor | ||||
2648 | // scope has noundef attribute when this is called. | ||||
2649 | // We also ensure the return position is not "assumed dead" | ||||
2650 | // because the returned value was then potentially simplified to | ||||
2651 | // `undef` in AAReturnedValues without removing the `noundef` | ||||
2652 | // attribute yet. | ||||
2653 | |||||
2654 | // When the returned position has noundef attriubte, UB occurs in the | ||||
2655 | // following cases. | ||||
2656 | // (1) Returned value is known to be undef. | ||||
2657 | // (2) The value is known to be a null pointer and the returned | ||||
2658 | // position has nonnull attribute (because the returned value is | ||||
2659 | // poison). | ||||
2660 | if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { | ||||
2661 | auto &NonNullAA = A.getAAFor<AANonNull>( | ||||
2662 | *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); | ||||
2663 | if (NonNullAA.isKnownNonNull()) | ||||
2664 | KnownUBInsts.insert(&I); | ||||
2665 | } | ||||
2666 | |||||
2667 | return true; | ||||
2668 | }; | ||||
2669 | |||||
2670 | bool UsedAssumedInformation = false; | ||||
2671 | A.checkForAllInstructions(InspectMemAccessInstForUB, *this, | ||||
2672 | {Instruction::Load, Instruction::Store, | ||||
2673 | Instruction::AtomicCmpXchg, | ||||
2674 | Instruction::AtomicRMW}, | ||||
2675 | UsedAssumedInformation, | ||||
2676 | /* CheckBBLivenessOnly */ true); | ||||
2677 | A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, | ||||
2678 | UsedAssumedInformation, | ||||
2679 | /* CheckBBLivenessOnly */ true); | ||||
2680 | A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, | ||||
2681 | UsedAssumedInformation); | ||||
2682 | |||||
2683 | // If the returned position of the anchor scope has noundef attriubte, check | ||||
2684 | // all returned instructions. | ||||
2685 | if (!getAnchorScope()->getReturnType()->isVoidTy()) { | ||||
2686 | const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); | ||||
2687 | if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { | ||||
2688 | auto &RetPosNoUndefAA = | ||||
2689 | A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); | ||||
2690 | if (RetPosNoUndefAA.isKnownNoUndef()) | ||||
2691 | A.checkForAllInstructions(InspectReturnInstForUB, *this, | ||||
2692 | {Instruction::Ret}, UsedAssumedInformation, | ||||
2693 | /* CheckBBLivenessOnly */ true); | ||||
2694 | } | ||||
2695 | } | ||||
2696 | |||||
2697 | if (NoUBPrevSize != AssumedNoUBInsts.size() || | ||||
2698 | UBPrevSize != KnownUBInsts.size()) | ||||
2699 | return ChangeStatus::CHANGED; | ||||
2700 | return ChangeStatus::UNCHANGED; | ||||
2701 | } | ||||
2702 | |||||
2703 | bool isKnownToCauseUB(Instruction *I) const override { | ||||
2704 | return KnownUBInsts.count(I); | ||||
2705 | } | ||||
2706 | |||||
2707 | bool isAssumedToCauseUB(Instruction *I) const override { | ||||
2708 | // In simple words, if an instruction is not in the assumed to _not_ | ||||
2709 | // cause UB, then it is assumed UB (that includes those | ||||
2710 | // in the KnownUBInsts set). The rest is boilerplate | ||||
2711 | // is to ensure that it is one of the instructions we test | ||||
2712 | // for UB. | ||||
2713 | |||||
2714 | switch (I->getOpcode()) { | ||||
2715 | case Instruction::Load: | ||||
2716 | case Instruction::Store: | ||||
2717 | case Instruction::AtomicCmpXchg: | ||||
2718 | case Instruction::AtomicRMW: | ||||
2719 | return !AssumedNoUBInsts.count(I); | ||||
2720 | case Instruction::Br: { | ||||
2721 | auto *BrInst = cast<BranchInst>(I); | ||||
2722 | if (BrInst->isUnconditional()) | ||||
2723 | return false; | ||||
2724 | return !AssumedNoUBInsts.count(I); | ||||
2725 | } break; | ||||
2726 | default: | ||||
2727 | return false; | ||||
2728 | } | ||||
2729 | return false; | ||||
2730 | } | ||||
2731 | |||||
2732 | ChangeStatus manifest(Attributor &A) override { | ||||
2733 | if (KnownUBInsts.empty()) | ||||
2734 | return ChangeStatus::UNCHANGED; | ||||
2735 | for (Instruction *I : KnownUBInsts) | ||||
2736 | A.changeToUnreachableAfterManifest(I); | ||||
2737 | return ChangeStatus::CHANGED; | ||||
2738 | } | ||||
2739 | |||||
2740 | /// See AbstractAttribute::getAsStr() | ||||
2741 | const std::string getAsStr() const override { | ||||
2742 | return getAssumed() ? "undefined-behavior" : "no-ub"; | ||||
2743 | } | ||||
2744 | |||||
2745 | /// Note: The correctness of this analysis depends on the fact that the | ||||
2746 | /// following 2 sets will stop changing after some point. | ||||
2747 | /// "Change" here means that their size changes. | ||||
2748 | /// The size of each set is monotonically increasing | ||||
2749 | /// (we only add items to them) and it is upper bounded by the number of | ||||
2750 | /// instructions in the processed function (we can never save more | ||||
2751 | /// elements in either set than this number). Hence, at some point, | ||||
2752 | /// they will stop increasing. | ||||
2753 | /// Consequently, at some point, both sets will have stopped | ||||
2754 | /// changing, effectively making the analysis reach a fixpoint. | ||||
2755 | |||||
2756 | /// Note: These 2 sets are disjoint and an instruction can be considered | ||||
2757 | /// one of 3 things: | ||||
2758 | /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in | ||||
2759 | /// the KnownUBInsts set. | ||||
2760 | /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior | ||||
2761 | /// has a reason to assume it). | ||||
2762 | /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior | ||||
2763 | /// could not find a reason to assume or prove that it can cause UB, | ||||
2764 | /// hence it assumes it doesn't. We have a set for these instructions | ||||
2765 | /// so that we don't reprocess them in every update. | ||||
2766 | /// Note however that instructions in this set may cause UB. | ||||
2767 | |||||
2768 | protected: | ||||
2769 | /// A set of all live instructions _known_ to cause UB. | ||||
2770 | SmallPtrSet<Instruction *, 8> KnownUBInsts; | ||||
2771 | |||||
2772 | private: | ||||
2773 | /// A set of all the (live) instructions that are assumed to _not_ cause UB. | ||||
2774 | SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; | ||||
2775 | |||||
2776 | // Should be called on updates in which if we're processing an instruction | ||||
2777 | // \p I that depends on a value \p V, one of the following has to happen: | ||||
2778 | // - If the value is assumed, then stop. | ||||
2779 | // - If the value is known but undef, then consider it UB. | ||||
2780 | // - Otherwise, do specific processing with the simplified value. | ||||
2781 | // We return None in the first 2 cases to signify that an appropriate | ||||
2782 | // action was taken and the caller should stop. | ||||
2783 | // Otherwise, we return the simplified value that the caller should | ||||
2784 | // use for specific processing. | ||||
2785 | Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, | ||||
2786 | Instruction *I) { | ||||
2787 | bool UsedAssumedInformation = false; | ||||
2788 | Optional<Value *> SimplifiedV = | ||||
2789 | A.getAssumedSimplified(IRPosition::value(*V), *this, | ||||
2790 | UsedAssumedInformation, AA::Interprocedural); | ||||
2791 | if (!UsedAssumedInformation) { | ||||
2792 | // Don't depend on assumed values. | ||||
2793 | if (!SimplifiedV) { | ||||
2794 | // If it is known (which we tested above) but it doesn't have a value, | ||||
2795 | // then we can assume `undef` and hence the instruction is UB. | ||||
2796 | KnownUBInsts.insert(I); | ||||
2797 | return llvm::None; | ||||
2798 | } | ||||
2799 | if (!*SimplifiedV) | ||||
2800 | return nullptr; | ||||
2801 | V = *SimplifiedV; | ||||
2802 | } | ||||
2803 | if (isa<UndefValue>(V)) { | ||||
2804 | KnownUBInsts.insert(I); | ||||
2805 | return llvm::None; | ||||
2806 | } | ||||
2807 | return V; | ||||
2808 | } | ||||
2809 | }; | ||||
2810 | |||||
2811 | struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { | ||||
2812 | AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) | ||||
2813 | : AAUndefinedBehaviorImpl(IRP, A) {} | ||||
2814 | |||||
2815 | /// See AbstractAttribute::trackStatistics() | ||||
2816 | void trackStatistics() const override { | ||||
2817 | STATS_DECL(UndefinedBehaviorInstruction, Instruction,static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction = {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction" , "Number of instructions known to have UB"};; | ||||
2818 | "Number of instructions known to have UB")static llvm::Statistic NumIRInstruction_UndefinedBehaviorInstruction = {"attributor", "NumIRInstruction_UndefinedBehaviorInstruction" , "Number of instructions known to have UB"};;; | ||||
2819 | BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction)NumIRInstruction_UndefinedBehaviorInstruction += | ||||
2820 | KnownUBInsts.size(); | ||||
2821 | } | ||||
2822 | }; | ||||
2823 | } // namespace | ||||
2824 | |||||
2825 | /// ------------------------ Will-Return Attributes ---------------------------- | ||||
2826 | |||||
2827 | namespace { | ||||
2828 | // Helper function that checks whether a function has any cycle which we don't | ||||
2829 | // know if it is bounded or not. | ||||
2830 | // Loops with maximum trip count are considered bounded, any other cycle not. | ||||
2831 | static bool mayContainUnboundedCycle(Function &F, Attributor &A) { | ||||
2832 | ScalarEvolution *SE = | ||||
2833 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); | ||||
2834 | LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); | ||||
2835 | // If either SCEV or LoopInfo is not available for the function then we assume | ||||
2836 | // any cycle to be unbounded cycle. | ||||
2837 | // We use scc_iterator which uses Tarjan algorithm to find all the maximal | ||||
2838 | // SCCs.To detect if there's a cycle, we only need to find the maximal ones. | ||||
2839 | if (!SE || !LI) { | ||||
2840 | for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) | ||||
2841 | if (SCCI.hasCycle()) | ||||
2842 | return true; | ||||
2843 | return false; | ||||
2844 | } | ||||
2845 | |||||
2846 | // If there's irreducible control, the function may contain non-loop cycles. | ||||
2847 | if (mayContainIrreducibleControl(F, LI)) | ||||
2848 | return true; | ||||
2849 | |||||
2850 | // Any loop that does not have a max trip count is considered unbounded cycle. | ||||
2851 | for (auto *L : LI->getLoopsInPreorder()) { | ||||
2852 | if (!SE->getSmallConstantMaxTripCount(L)) | ||||
2853 | return true; | ||||
2854 | } | ||||
2855 | return false; | ||||
2856 | } | ||||
2857 | |||||
2858 | struct AAWillReturnImpl : public AAWillReturn { | ||||
2859 | AAWillReturnImpl(const IRPosition &IRP, Attributor &A) | ||||
2860 | : AAWillReturn(IRP, A) {} | ||||
2861 | |||||
2862 | /// See AbstractAttribute::initialize(...). | ||||
2863 | void initialize(Attributor &A) override { | ||||
2864 | AAWillReturn::initialize(A); | ||||
2865 | |||||
2866 | if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { | ||||
2867 | indicateOptimisticFixpoint(); | ||||
2868 | return; | ||||
2869 | } | ||||
2870 | } | ||||
2871 | |||||
2872 | /// Check for `mustprogress` and `readonly` as they imply `willreturn`. | ||||
2873 | bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { | ||||
2874 | // Check for `mustprogress` in the scope and the associated function which | ||||
2875 | // might be different if this is a call site. | ||||
2876 | if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && | ||||
2877 | (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) | ||||
2878 | return false; | ||||
2879 | |||||
2880 | bool IsKnown; | ||||
2881 | if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) | ||||
2882 | return IsKnown || !KnownOnly; | ||||
2883 | return false; | ||||
2884 | } | ||||
2885 | |||||
2886 | /// See AbstractAttribute::updateImpl(...). | ||||
2887 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2888 | if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) | ||||
2889 | return ChangeStatus::UNCHANGED; | ||||
2890 | |||||
2891 | auto CheckForWillReturn = [&](Instruction &I) { | ||||
2892 | IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); | ||||
2893 | const auto &WillReturnAA = | ||||
2894 | A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); | ||||
2895 | if (WillReturnAA.isKnownWillReturn()) | ||||
2896 | return true; | ||||
2897 | if (!WillReturnAA.isAssumedWillReturn()) | ||||
2898 | return false; | ||||
2899 | const auto &NoRecurseAA = | ||||
2900 | A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); | ||||
2901 | return NoRecurseAA.isAssumedNoRecurse(); | ||||
2902 | }; | ||||
2903 | |||||
2904 | bool UsedAssumedInformation = false; | ||||
2905 | if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, | ||||
2906 | UsedAssumedInformation)) | ||||
2907 | return indicatePessimisticFixpoint(); | ||||
2908 | |||||
2909 | return ChangeStatus::UNCHANGED; | ||||
2910 | } | ||||
2911 | |||||
2912 | /// See AbstractAttribute::getAsStr() | ||||
2913 | const std::string getAsStr() const override { | ||||
2914 | return getAssumed() ? "willreturn" : "may-noreturn"; | ||||
2915 | } | ||||
2916 | }; | ||||
2917 | |||||
2918 | struct AAWillReturnFunction final : AAWillReturnImpl { | ||||
2919 | AAWillReturnFunction(const IRPosition &IRP, Attributor &A) | ||||
2920 | : AAWillReturnImpl(IRP, A) {} | ||||
2921 | |||||
2922 | /// See AbstractAttribute::initialize(...). | ||||
2923 | void initialize(Attributor &A) override { | ||||
2924 | AAWillReturnImpl::initialize(A); | ||||
2925 | |||||
2926 | Function *F = getAnchorScope(); | ||||
2927 | if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) | ||||
2928 | indicatePessimisticFixpoint(); | ||||
2929 | } | ||||
2930 | |||||
2931 | /// See AbstractAttribute::trackStatistics() | ||||
2932 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn){ static llvm::Statistic NumIRFunction_willreturn = {"attributor" , "NumIRFunction_willreturn", ("Number of " "functions" " marked '" "willreturn" "'")};; ++(NumIRFunction_willreturn); } } | ||||
2933 | }; | ||||
2934 | |||||
2935 | /// WillReturn attribute deduction for a call sites. | ||||
2936 | struct AAWillReturnCallSite final : AAWillReturnImpl { | ||||
2937 | AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) | ||||
2938 | : AAWillReturnImpl(IRP, A) {} | ||||
2939 | |||||
2940 | /// See AbstractAttribute::initialize(...). | ||||
2941 | void initialize(Attributor &A) override { | ||||
2942 | AAWillReturnImpl::initialize(A); | ||||
2943 | Function *F = getAssociatedFunction(); | ||||
2944 | if (!F || !A.isFunctionIPOAmendable(*F)) | ||||
2945 | indicatePessimisticFixpoint(); | ||||
2946 | } | ||||
2947 | |||||
2948 | /// See AbstractAttribute::updateImpl(...). | ||||
2949 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2950 | if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) | ||||
2951 | return ChangeStatus::UNCHANGED; | ||||
2952 | |||||
2953 | // TODO: Once we have call site specific value information we can provide | ||||
2954 | // call site specific liveness information and then it makes | ||||
2955 | // sense to specialize attributes for call sites arguments instead of | ||||
2956 | // redirecting requests to the callee argument. | ||||
2957 | Function *F = getAssociatedFunction(); | ||||
2958 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
2959 | auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); | ||||
2960 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
2961 | } | ||||
2962 | |||||
2963 | /// See AbstractAttribute::trackStatistics() | ||||
2964 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn){ static llvm::Statistic NumIRCS_willreturn = {"attributor", "NumIRCS_willreturn" , ("Number of " "call site" " marked '" "willreturn" "'")};; ++ (NumIRCS_willreturn); }; } | ||||
2965 | }; | ||||
2966 | } // namespace | ||||
2967 | |||||
2968 | /// -------------------AAReachability Attribute-------------------------- | ||||
2969 | |||||
2970 | namespace { | ||||
2971 | struct AAReachabilityImpl : AAReachability { | ||||
2972 | AAReachabilityImpl(const IRPosition &IRP, Attributor &A) | ||||
2973 | : AAReachability(IRP, A) {} | ||||
2974 | |||||
2975 | const std::string getAsStr() const override { | ||||
2976 | // TODO: Return the number of reachable queries. | ||||
2977 | return "reachable"; | ||||
2978 | } | ||||
2979 | |||||
2980 | /// See AbstractAttribute::updateImpl(...). | ||||
2981 | ChangeStatus updateImpl(Attributor &A) override { | ||||
2982 | return ChangeStatus::UNCHANGED; | ||||
2983 | } | ||||
2984 | }; | ||||
2985 | |||||
2986 | struct AAReachabilityFunction final : public AAReachabilityImpl { | ||||
2987 | AAReachabilityFunction(const IRPosition &IRP, Attributor &A) | ||||
2988 | : AAReachabilityImpl(IRP, A) {} | ||||
2989 | |||||
2990 | /// See AbstractAttribute::trackStatistics() | ||||
2991 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable){ static llvm::Statistic NumIRFunction_reachable = {"attributor" , "NumIRFunction_reachable", ("Number of " "functions" " marked '" "reachable" "'")};; ++(NumIRFunction_reachable); }; } | ||||
2992 | }; | ||||
2993 | } // namespace | ||||
2994 | |||||
2995 | /// ------------------------ NoAlias Argument Attribute ------------------------ | ||||
2996 | |||||
2997 | namespace { | ||||
2998 | struct AANoAliasImpl : AANoAlias { | ||||
2999 | AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { | ||||
3000 | assert(getAssociatedType()->isPointerTy() &&(static_cast <bool> (getAssociatedType()->isPointerTy () && "Noalias is a pointer attribute") ? void (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3001, __extension__ __PRETTY_FUNCTION__)) | ||||
3001 | "Noalias is a pointer attribute")(static_cast <bool> (getAssociatedType()->isPointerTy () && "Noalias is a pointer attribute") ? void (0) : __assert_fail ("getAssociatedType()->isPointerTy() && \"Noalias is a pointer attribute\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3001, __extension__ __PRETTY_FUNCTION__)); | ||||
3002 | } | ||||
3003 | |||||
3004 | const std::string getAsStr() const override { | ||||
3005 | return getAssumed() ? "noalias" : "may-alias"; | ||||
3006 | } | ||||
3007 | }; | ||||
3008 | |||||
3009 | /// NoAlias attribute for a floating value. | ||||
3010 | struct AANoAliasFloating final : AANoAliasImpl { | ||||
3011 | AANoAliasFloating(const IRPosition &IRP, Attributor &A) | ||||
3012 | : AANoAliasImpl(IRP, A) {} | ||||
3013 | |||||
3014 | /// See AbstractAttribute::initialize(...). | ||||
3015 | void initialize(Attributor &A) override { | ||||
3016 | AANoAliasImpl::initialize(A); | ||||
3017 | Value *Val = &getAssociatedValue(); | ||||
3018 | do { | ||||
3019 | CastInst *CI = dyn_cast<CastInst>(Val); | ||||
3020 | if (!CI) | ||||
3021 | break; | ||||
3022 | Value *Base = CI->getOperand(0); | ||||
3023 | if (!Base->hasOneUse()) | ||||
3024 | break; | ||||
3025 | Val = Base; | ||||
3026 | } while (true); | ||||
3027 | |||||
3028 | if (!Val->getType()->isPointerTy()) { | ||||
3029 | indicatePessimisticFixpoint(); | ||||
3030 | return; | ||||
3031 | } | ||||
3032 | |||||
3033 | if (isa<AllocaInst>(Val)) | ||||
3034 | indicateOptimisticFixpoint(); | ||||
3035 | else if (isa<ConstantPointerNull>(Val) && | ||||
3036 | !NullPointerIsDefined(getAnchorScope(), | ||||
3037 | Val->getType()->getPointerAddressSpace())) | ||||
3038 | indicateOptimisticFixpoint(); | ||||
3039 | else if (Val != &getAssociatedValue()) { | ||||
3040 | const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( | ||||
3041 | *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); | ||||
3042 | if (ValNoAliasAA.isKnownNoAlias()) | ||||
3043 | indicateOptimisticFixpoint(); | ||||
3044 | } | ||||
3045 | } | ||||
3046 | |||||
3047 | /// See AbstractAttribute::updateImpl(...). | ||||
3048 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3049 | // TODO: Implement this. | ||||
3050 | return indicatePessimisticFixpoint(); | ||||
3051 | } | ||||
3052 | |||||
3053 | /// See AbstractAttribute::trackStatistics() | ||||
3054 | void trackStatistics() const override { | ||||
3055 | STATS_DECLTRACK_FLOATING_ATTR(noalias){ static llvm::Statistic NumIRFloating_noalias = {"attributor" , "NumIRFloating_noalias", ("Number of floating values known to be '" "noalias" "'")};; ++(NumIRFloating_noalias); } | ||||
3056 | } | ||||
3057 | }; | ||||
3058 | |||||
3059 | /// NoAlias attribute for an argument. | ||||
3060 | struct AANoAliasArgument final | ||||
3061 | : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { | ||||
3062 | using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; | ||||
3063 | AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} | ||||
3064 | |||||
3065 | /// See AbstractAttribute::initialize(...). | ||||
3066 | void initialize(Attributor &A) override { | ||||
3067 | Base::initialize(A); | ||||
3068 | // See callsite argument attribute and callee argument attribute. | ||||
3069 | if (hasAttr({Attribute::ByVal})) | ||||
3070 | indicateOptimisticFixpoint(); | ||||
3071 | } | ||||
3072 | |||||
3073 | /// See AbstractAttribute::update(...). | ||||
3074 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3075 | // We have to make sure no-alias on the argument does not break | ||||
3076 | // synchronization when this is a callback argument, see also [1] below. | ||||
3077 | // If synchronization cannot be affected, we delegate to the base updateImpl | ||||
3078 | // function, otherwise we give up for now. | ||||
3079 | |||||
3080 | // If the function is no-sync, no-alias cannot break synchronization. | ||||
3081 | const auto &NoSyncAA = | ||||
3082 | A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), | ||||
3083 | DepClassTy::OPTIONAL); | ||||
3084 | if (NoSyncAA.isAssumedNoSync()) | ||||
3085 | return Base::updateImpl(A); | ||||
3086 | |||||
3087 | // If the argument is read-only, no-alias cannot break synchronization. | ||||
3088 | bool IsKnown; | ||||
3089 | if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) | ||||
3090 | return Base::updateImpl(A); | ||||
3091 | |||||
3092 | // If the argument is never passed through callbacks, no-alias cannot break | ||||
3093 | // synchronization. | ||||
3094 | bool UsedAssumedInformation = false; | ||||
3095 | if (A.checkForAllCallSites( | ||||
3096 | [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, | ||||
3097 | true, UsedAssumedInformation)) | ||||
3098 | return Base::updateImpl(A); | ||||
3099 | |||||
3100 | // TODO: add no-alias but make sure it doesn't break synchronization by | ||||
3101 | // introducing fake uses. See: | ||||
3102 | // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, | ||||
3103 | // International Workshop on OpenMP 2018, | ||||
3104 | // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf | ||||
3105 | |||||
3106 | return indicatePessimisticFixpoint(); | ||||
3107 | } | ||||
3108 | |||||
3109 | /// See AbstractAttribute::trackStatistics() | ||||
3110 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias){ static llvm::Statistic NumIRArguments_noalias = {"attributor" , "NumIRArguments_noalias", ("Number of " "arguments" " marked '" "noalias" "'")};; ++(NumIRArguments_noalias); } } | ||||
3111 | }; | ||||
3112 | |||||
3113 | struct AANoAliasCallSiteArgument final : AANoAliasImpl { | ||||
3114 | AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
3115 | : AANoAliasImpl(IRP, A) {} | ||||
3116 | |||||
3117 | /// See AbstractAttribute::initialize(...). | ||||
3118 | void initialize(Attributor &A) override { | ||||
3119 | // See callsite argument attribute and callee argument attribute. | ||||
3120 | const auto &CB = cast<CallBase>(getAnchorValue()); | ||||
3121 | if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) | ||||
3122 | indicateOptimisticFixpoint(); | ||||
3123 | Value &Val = getAssociatedValue(); | ||||
3124 | if (isa<ConstantPointerNull>(Val) && | ||||
3125 | !NullPointerIsDefined(getAnchorScope(), | ||||
3126 | Val.getType()->getPointerAddressSpace())) | ||||
3127 | indicateOptimisticFixpoint(); | ||||
3128 | } | ||||
3129 | |||||
3130 | /// Determine if the underlying value may alias with the call site argument | ||||
3131 | /// \p OtherArgNo of \p ICS (= the underlying call site). | ||||
3132 | bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, | ||||
3133 | const AAMemoryBehavior &MemBehaviorAA, | ||||
3134 | const CallBase &CB, unsigned OtherArgNo) { | ||||
3135 | // We do not need to worry about aliasing with the underlying IRP. | ||||
3136 | if (this->getCalleeArgNo() == (int)OtherArgNo) | ||||
3137 | return false; | ||||
3138 | |||||
3139 | // If it is not a pointer or pointer vector we do not alias. | ||||
3140 | const Value *ArgOp = CB.getArgOperand(OtherArgNo); | ||||
3141 | if (!ArgOp->getType()->isPtrOrPtrVectorTy()) | ||||
3142 | return false; | ||||
3143 | |||||
3144 | auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( | ||||
3145 | *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); | ||||
3146 | |||||
3147 | // If the argument is readnone, there is no read-write aliasing. | ||||
3148 | if (CBArgMemBehaviorAA.isAssumedReadNone()) { | ||||
3149 | A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); | ||||
3150 | return false; | ||||
3151 | } | ||||
3152 | |||||
3153 | // If the argument is readonly and the underlying value is readonly, there | ||||
3154 | // is no read-write aliasing. | ||||
3155 | bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); | ||||
3156 | if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { | ||||
3157 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); | ||||
3158 | A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); | ||||
3159 | return false; | ||||
3160 | } | ||||
3161 | |||||
3162 | // We have to utilize actual alias analysis queries so we need the object. | ||||
3163 | if (!AAR) | ||||
3164 | AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); | ||||
3165 | |||||
3166 | // Try to rule it out at the call site. | ||||
3167 | bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); | ||||
3168 | LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) | ||||
3169 | "callsite arguments: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) | ||||
3170 | << getAssociatedValue() << " " << *ArgOp << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false) | ||||
3171 | << (IsAliasing ? "" : "no-") << "alias \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[NoAliasCSArg] Check alias between " "callsite arguments: " << getAssociatedValue() << " " << *ArgOp << " => " << (IsAliasing ? "" : "no-") << "alias \n"; } } while (false); | ||||
3172 | |||||
3173 | return IsAliasing; | ||||
3174 | } | ||||
3175 | |||||
3176 | bool | ||||
3177 | isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, | ||||
3178 | const AAMemoryBehavior &MemBehaviorAA, | ||||
3179 | const AANoAlias &NoAliasAA) { | ||||
3180 | // We can deduce "noalias" if the following conditions hold. | ||||
3181 | // (i) Associated value is assumed to be noalias in the definition. | ||||
3182 | // (ii) Associated value is assumed to be no-capture in all the uses | ||||
3183 | // possibly executed before this callsite. | ||||
3184 | // (iii) There is no other pointer argument which could alias with the | ||||
3185 | // value. | ||||
3186 | |||||
3187 | bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); | ||||
3188 | if (!AssociatedValueIsNoAliasAtDef) { | ||||
3189 | LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue () << " is not no-alias at the definition\n"; } } while (false) | ||||
3190 | << " is not no-alias at the definition\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] " << getAssociatedValue () << " is not no-alias at the definition\n"; } } while (false); | ||||
3191 | return false; | ||||
3192 | } | ||||
3193 | |||||
3194 | auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { | ||||
3195 | const auto &DerefAA = A.getAAFor<AADereferenceable>( | ||||
3196 | *this, IRPosition::value(*O), DepClassTy::OPTIONAL); | ||||
3197 | return DerefAA.getAssumedDereferenceableBytes(); | ||||
3198 | }; | ||||
3199 | |||||
3200 | A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); | ||||
3201 | |||||
3202 | const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); | ||||
3203 | const Function *ScopeFn = VIRP.getAnchorScope(); | ||||
3204 | auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); | ||||
3205 | // Check whether the value is captured in the scope using AANoCapture. | ||||
3206 | // Look at CFG and check only uses possibly executed before this | ||||
3207 | // callsite. | ||||
3208 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { | ||||
3209 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
3210 | |||||
3211 | // If UserI is the curr instruction and there is a single potential use of | ||||
3212 | // the value in UserI we allow the use. | ||||
3213 | // TODO: We should inspect the operands and allow those that cannot alias | ||||
3214 | // with the value. | ||||
3215 | if (UserI == getCtxI() && UserI->getNumOperands() == 1) | ||||
3216 | return true; | ||||
3217 | |||||
3218 | if (ScopeFn) { | ||||
3219 | if (auto *CB = dyn_cast<CallBase>(UserI)) { | ||||
3220 | if (CB->isArgOperand(&U)) { | ||||
3221 | |||||
3222 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
3223 | |||||
3224 | const auto &NoCaptureAA = A.getAAFor<AANoCapture>( | ||||
3225 | *this, IRPosition::callsite_argument(*CB, ArgNo), | ||||
3226 | DepClassTy::OPTIONAL); | ||||
3227 | |||||
3228 | if (NoCaptureAA.isAssumedNoCapture()) | ||||
3229 | return true; | ||||
3230 | } | ||||
3231 | } | ||||
3232 | |||||
3233 | if (!AA::isPotentiallyReachable( | ||||
3234 | A, *UserI, *getCtxI(), *this, | ||||
3235 | [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; })) | ||||
3236 | return true; | ||||
3237 | } | ||||
3238 | |||||
3239 | // TODO: We should track the capturing uses in AANoCapture but the problem | ||||
3240 | // is CGSCC runs. For those we would need to "allow" AANoCapture for | ||||
3241 | // a value in the module slice. | ||||
3242 | switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { | ||||
3243 | case UseCaptureKind::NO_CAPTURE: | ||||
3244 | return true; | ||||
3245 | case UseCaptureKind::MAY_CAPTURE: | ||||
3246 | LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI << "\n"; } } while (false) | ||||
3247 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI << "\n"; } } while (false); | ||||
3248 | return false; | ||||
3249 | case UseCaptureKind::PASSTHROUGH: | ||||
3250 | Follow = true; | ||||
3251 | return true; | ||||
3252 | } | ||||
3253 | llvm_unreachable("unknown UseCaptureKind")::llvm::llvm_unreachable_internal("unknown UseCaptureKind", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 3253); | ||||
3254 | }; | ||||
3255 | |||||
3256 | if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { | ||||
3257 | if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { | ||||
3258 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false) | ||||
3259 | dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false) | ||||
3260 | << " cannot be noalias as it is potentially captured\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() << " cannot be noalias as it is potentially captured\n" ; } } while (false); | ||||
3261 | return false; | ||||
3262 | } | ||||
3263 | } | ||||
3264 | A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); | ||||
3265 | |||||
3266 | // Check there is no other pointer argument which could alias with the | ||||
3267 | // value passed at this call site. | ||||
3268 | // TODO: AbstractCallSite | ||||
3269 | const auto &CB = cast<CallBase>(getAnchorValue()); | ||||
3270 | for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) | ||||
3271 | if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) | ||||
3272 | return false; | ||||
3273 | |||||
3274 | return true; | ||||
3275 | } | ||||
3276 | |||||
3277 | /// See AbstractAttribute::updateImpl(...). | ||||
3278 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3279 | // If the argument is readnone we are done as there are no accesses via the | ||||
3280 | // argument. | ||||
3281 | auto &MemBehaviorAA = | ||||
3282 | A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); | ||||
3283 | if (MemBehaviorAA.isAssumedReadNone()) { | ||||
3284 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); | ||||
3285 | return ChangeStatus::UNCHANGED; | ||||
3286 | } | ||||
3287 | |||||
3288 | const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); | ||||
3289 | const auto &NoAliasAA = | ||||
3290 | A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); | ||||
3291 | |||||
3292 | AAResults *AAR = nullptr; | ||||
3293 | if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, | ||||
3294 | NoAliasAA)) { | ||||
3295 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n" ; } } while (false) | ||||
3296 | dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n" ; } } while (false); | ||||
3297 | return ChangeStatus::UNCHANGED; | ||||
3298 | } | ||||
3299 | |||||
3300 | return indicatePessimisticFixpoint(); | ||||
3301 | } | ||||
3302 | |||||
3303 | /// See AbstractAttribute::trackStatistics() | ||||
3304 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias){ static llvm::Statistic NumIRCSArguments_noalias = {"attributor" , "NumIRCSArguments_noalias", ("Number of " "call site arguments" " marked '" "noalias" "'")};; ++(NumIRCSArguments_noalias); } } | ||||
3305 | }; | ||||
3306 | |||||
3307 | /// NoAlias attribute for function return value. | ||||
3308 | struct AANoAliasReturned final : AANoAliasImpl { | ||||
3309 | AANoAliasReturned(const IRPosition &IRP, Attributor &A) | ||||
3310 | : AANoAliasImpl(IRP, A) {} | ||||
3311 | |||||
3312 | /// See AbstractAttribute::initialize(...). | ||||
3313 | void initialize(Attributor &A) override { | ||||
3314 | AANoAliasImpl::initialize(A); | ||||
3315 | Function *F = getAssociatedFunction(); | ||||
3316 | if (!F || F->isDeclaration()) | ||||
3317 | indicatePessimisticFixpoint(); | ||||
3318 | } | ||||
3319 | |||||
3320 | /// See AbstractAttribute::updateImpl(...). | ||||
3321 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3322 | |||||
3323 | auto CheckReturnValue = [&](Value &RV) -> bool { | ||||
3324 | if (Constant *C = dyn_cast<Constant>(&RV)) | ||||
3325 | if (C->isNullValue() || isa<UndefValue>(C)) | ||||
3326 | return true; | ||||
3327 | |||||
3328 | /// For now, we can only deduce noalias if we have call sites. | ||||
3329 | /// FIXME: add more support. | ||||
3330 | if (!isa<CallBase>(&RV)) | ||||
3331 | return false; | ||||
3332 | |||||
3333 | const IRPosition &RVPos = IRPosition::value(RV); | ||||
3334 | const auto &NoAliasAA = | ||||
3335 | A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); | ||||
3336 | if (!NoAliasAA.isAssumedNoAlias()) | ||||
3337 | return false; | ||||
3338 | |||||
3339 | const auto &NoCaptureAA = | ||||
3340 | A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); | ||||
3341 | return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); | ||||
3342 | }; | ||||
3343 | |||||
3344 | if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) | ||||
3345 | return indicatePessimisticFixpoint(); | ||||
3346 | |||||
3347 | return ChangeStatus::UNCHANGED; | ||||
3348 | } | ||||
3349 | |||||
3350 | /// See AbstractAttribute::trackStatistics() | ||||
3351 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias){ static llvm::Statistic NumIRFunctionReturn_noalias = {"attributor" , "NumIRFunctionReturn_noalias", ("Number of " "function returns" " marked '" "noalias" "'")};; ++(NumIRFunctionReturn_noalias ); } } | ||||
3352 | }; | ||||
3353 | |||||
3354 | /// NoAlias attribute deduction for a call site return value. | ||||
3355 | struct AANoAliasCallSiteReturned final : AANoAliasImpl { | ||||
3356 | AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
3357 | : AANoAliasImpl(IRP, A) {} | ||||
3358 | |||||
3359 | /// See AbstractAttribute::initialize(...). | ||||
3360 | void initialize(Attributor &A) override { | ||||
3361 | AANoAliasImpl::initialize(A); | ||||
3362 | Function *F = getAssociatedFunction(); | ||||
3363 | if (!F || F->isDeclaration()) | ||||
3364 | indicatePessimisticFixpoint(); | ||||
3365 | } | ||||
3366 | |||||
3367 | /// See AbstractAttribute::updateImpl(...). | ||||
3368 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3369 | // TODO: Once we have call site specific value information we can provide | ||||
3370 | // call site specific liveness information and then it makes | ||||
3371 | // sense to specialize attributes for call sites arguments instead of | ||||
3372 | // redirecting requests to the callee argument. | ||||
3373 | Function *F = getAssociatedFunction(); | ||||
3374 | const IRPosition &FnPos = IRPosition::returned(*F); | ||||
3375 | auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); | ||||
3376 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
3377 | } | ||||
3378 | |||||
3379 | /// See AbstractAttribute::trackStatistics() | ||||
3380 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias){ static llvm::Statistic NumIRCSReturn_noalias = {"attributor" , "NumIRCSReturn_noalias", ("Number of " "call site returns" " marked '" "noalias" "'")};; ++(NumIRCSReturn_noalias); }; } | ||||
3381 | }; | ||||
3382 | } // namespace | ||||
3383 | |||||
3384 | /// -------------------AAIsDead Function Attribute----------------------- | ||||
3385 | |||||
3386 | namespace { | ||||
3387 | struct AAIsDeadValueImpl : public AAIsDead { | ||||
3388 | AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} | ||||
3389 | |||||
3390 | /// See AbstractAttribute::initialize(...). | ||||
3391 | void initialize(Attributor &A) override { | ||||
3392 | if (auto *Scope = getAnchorScope()) | ||||
3393 | if (!A.isRunOn(*Scope)) | ||||
3394 | indicatePessimisticFixpoint(); | ||||
3395 | } | ||||
3396 | |||||
3397 | /// See AAIsDead::isAssumedDead(). | ||||
3398 | bool isAssumedDead() const override { return isAssumed(IS_DEAD); } | ||||
3399 | |||||
3400 | /// See AAIsDead::isKnownDead(). | ||||
3401 | bool isKnownDead() const override { return isKnown(IS_DEAD); } | ||||
3402 | |||||
3403 | /// See AAIsDead::isAssumedDead(BasicBlock *). | ||||
3404 | bool isAssumedDead(const BasicBlock *BB) const override { return false; } | ||||
3405 | |||||
3406 | /// See AAIsDead::isKnownDead(BasicBlock *). | ||||
3407 | bool isKnownDead(const BasicBlock *BB) const override { return false; } | ||||
3408 | |||||
3409 | /// See AAIsDead::isAssumedDead(Instruction *I). | ||||
3410 | bool isAssumedDead(const Instruction *I) const override { | ||||
3411 | return I == getCtxI() && isAssumedDead(); | ||||
3412 | } | ||||
3413 | |||||
3414 | /// See AAIsDead::isKnownDead(Instruction *I). | ||||
3415 | bool isKnownDead(const Instruction *I) const override { | ||||
3416 | return isAssumedDead(I) && isKnownDead(); | ||||
3417 | } | ||||
3418 | |||||
3419 | /// See AbstractAttribute::getAsStr(). | ||||
3420 | const std::string getAsStr() const override { | ||||
3421 | return isAssumedDead() ? "assumed-dead" : "assumed-live"; | ||||
3422 | } | ||||
3423 | |||||
3424 | /// Check if all uses are assumed dead. | ||||
3425 | bool areAllUsesAssumedDead(Attributor &A, Value &V) { | ||||
3426 | // Callers might not check the type, void has no uses. | ||||
3427 | if (V.getType()->isVoidTy() || V.use_empty()) | ||||
3428 | return true; | ||||
3429 | |||||
3430 | // If we replace a value with a constant there are no uses left afterwards. | ||||
3431 | if (!isa<Constant>(V)) { | ||||
3432 | if (auto *I = dyn_cast<Instruction>(&V)) | ||||
3433 | if (!A.isRunOn(*I->getFunction())) | ||||
3434 | return false; | ||||
3435 | bool UsedAssumedInformation = false; | ||||
3436 | Optional<Constant *> C = | ||||
3437 | A.getAssumedConstant(V, *this, UsedAssumedInformation); | ||||
3438 | if (!C || *C) | ||||
3439 | return true; | ||||
3440 | } | ||||
3441 | |||||
3442 | auto UsePred = [&](const Use &U, bool &Follow) { return false; }; | ||||
3443 | // Explicitly set the dependence class to required because we want a long | ||||
3444 | // chain of N dependent instructions to be considered live as soon as one is | ||||
3445 | // without going through N update cycles. This is not required for | ||||
3446 | // correctness. | ||||
3447 | return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, | ||||
3448 | DepClassTy::REQUIRED, | ||||
3449 | /* IgnoreDroppableUses */ false); | ||||
3450 | } | ||||
3451 | |||||
3452 | /// Determine if \p I is assumed to be side-effect free. | ||||
3453 | bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { | ||||
3454 | if (!I || wouldInstructionBeTriviallyDead(I)) | ||||
3455 | return true; | ||||
3456 | |||||
3457 | auto *CB = dyn_cast<CallBase>(I); | ||||
3458 | if (!CB || isa<IntrinsicInst>(CB)) | ||||
3459 | return false; | ||||
3460 | |||||
3461 | const IRPosition &CallIRP = IRPosition::callsite_function(*CB); | ||||
3462 | const auto &NoUnwindAA = | ||||
3463 | A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); | ||||
3464 | if (!NoUnwindAA.isAssumedNoUnwind()) | ||||
3465 | return false; | ||||
3466 | if (!NoUnwindAA.isKnownNoUnwind()) | ||||
3467 | A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); | ||||
3468 | |||||
3469 | bool IsKnown; | ||||
3470 | return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); | ||||
3471 | } | ||||
3472 | }; | ||||
3473 | |||||
3474 | struct AAIsDeadFloating : public AAIsDeadValueImpl { | ||||
3475 | AAIsDeadFloating(const IRPosition &IRP, Attributor &A) | ||||
3476 | : AAIsDeadValueImpl(IRP, A) {} | ||||
3477 | |||||
3478 | /// See AbstractAttribute::initialize(...). | ||||
3479 | void initialize(Attributor &A) override { | ||||
3480 | AAIsDeadValueImpl::initialize(A); | ||||
3481 | |||||
3482 | if (isa<UndefValue>(getAssociatedValue())) { | ||||
3483 | indicatePessimisticFixpoint(); | ||||
3484 | return; | ||||
3485 | } | ||||
3486 | |||||
3487 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); | ||||
3488 | if (!isAssumedSideEffectFree(A, I)) { | ||||
3489 | if (!isa_and_nonnull<StoreInst>(I)) | ||||
3490 | indicatePessimisticFixpoint(); | ||||
3491 | else | ||||
3492 | removeAssumedBits(HAS_NO_EFFECT); | ||||
3493 | } | ||||
3494 | } | ||||
3495 | |||||
3496 | bool isDeadStore(Attributor &A, StoreInst &SI) { | ||||
3497 | // Lang ref now states volatile store is not UB/dead, let's skip them. | ||||
3498 | if (SI.isVolatile()) | ||||
3499 | return false; | ||||
3500 | |||||
3501 | bool UsedAssumedInformation = false; | ||||
3502 | SmallSetVector<Value *, 4> PotentialCopies; | ||||
3503 | if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, | ||||
3504 | UsedAssumedInformation)) | ||||
3505 | return false; | ||||
3506 | return llvm::all_of(PotentialCopies, [&](Value *V) { | ||||
3507 | return A.isAssumedDead(IRPosition::value(*V), this, nullptr, | ||||
3508 | UsedAssumedInformation); | ||||
3509 | }); | ||||
3510 | } | ||||
3511 | |||||
3512 | /// See AbstractAttribute::getAsStr(). | ||||
3513 | const std::string getAsStr() const override { | ||||
3514 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); | ||||
3515 | if (isa_and_nonnull<StoreInst>(I)) | ||||
3516 | if (isValidState()) | ||||
3517 | return "assumed-dead-store"; | ||||
3518 | return AAIsDeadValueImpl::getAsStr(); | ||||
3519 | } | ||||
3520 | |||||
3521 | /// See AbstractAttribute::updateImpl(...). | ||||
3522 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3523 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); | ||||
3524 | if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { | ||||
3525 | if (!isDeadStore(A, *SI)) | ||||
3526 | return indicatePessimisticFixpoint(); | ||||
3527 | } else { | ||||
3528 | if (!isAssumedSideEffectFree(A, I)) | ||||
3529 | return indicatePessimisticFixpoint(); | ||||
3530 | if (!areAllUsesAssumedDead(A, getAssociatedValue())) | ||||
3531 | return indicatePessimisticFixpoint(); | ||||
3532 | } | ||||
3533 | return ChangeStatus::UNCHANGED; | ||||
3534 | } | ||||
3535 | |||||
3536 | bool isRemovableStore() const override { | ||||
3537 | return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue()); | ||||
3538 | } | ||||
3539 | |||||
3540 | /// See AbstractAttribute::manifest(...). | ||||
3541 | ChangeStatus manifest(Attributor &A) override { | ||||
3542 | Value &V = getAssociatedValue(); | ||||
3543 | if (auto *I = dyn_cast<Instruction>(&V)) { | ||||
3544 | // If we get here we basically know the users are all dead. We check if | ||||
3545 | // isAssumedSideEffectFree returns true here again because it might not be | ||||
3546 | // the case and only the users are dead but the instruction (=call) is | ||||
3547 | // still needed. | ||||
3548 | if (isa<StoreInst>(I) || | ||||
3549 | (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { | ||||
3550 | A.deleteAfterManifest(*I); | ||||
3551 | return ChangeStatus::CHANGED; | ||||
3552 | } | ||||
3553 | } | ||||
3554 | return ChangeStatus::UNCHANGED; | ||||
3555 | } | ||||
3556 | |||||
3557 | /// See AbstractAttribute::trackStatistics() | ||||
3558 | void trackStatistics() const override { | ||||
3559 | STATS_DECLTRACK_FLOATING_ATTR(IsDead){ static llvm::Statistic NumIRFloating_IsDead = {"attributor" , "NumIRFloating_IsDead", ("Number of floating values known to be '" "IsDead" "'")};; ++(NumIRFloating_IsDead); } | ||||
3560 | } | ||||
3561 | }; | ||||
3562 | |||||
3563 | struct AAIsDeadArgument : public AAIsDeadFloating { | ||||
3564 | AAIsDeadArgument(const IRPosition &IRP, Attributor &A) | ||||
3565 | : AAIsDeadFloating(IRP, A) {} | ||||
3566 | |||||
3567 | /// See AbstractAttribute::initialize(...). | ||||
3568 | void initialize(Attributor &A) override { | ||||
3569 | AAIsDeadFloating::initialize(A); | ||||
3570 | if (!A.isFunctionIPOAmendable(*getAnchorScope())) | ||||
3571 | indicatePessimisticFixpoint(); | ||||
3572 | } | ||||
3573 | |||||
3574 | /// See AbstractAttribute::manifest(...). | ||||
3575 | ChangeStatus manifest(Attributor &A) override { | ||||
3576 | Argument &Arg = *getAssociatedArgument(); | ||||
3577 | if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) | ||||
3578 | if (A.registerFunctionSignatureRewrite( | ||||
3579 | Arg, /* ReplacementTypes */ {}, | ||||
3580 | Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, | ||||
3581 | Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { | ||||
3582 | return ChangeStatus::CHANGED; | ||||
3583 | } | ||||
3584 | return ChangeStatus::UNCHANGED; | ||||
3585 | } | ||||
3586 | |||||
3587 | /// See AbstractAttribute::trackStatistics() | ||||
3588 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead){ static llvm::Statistic NumIRArguments_IsDead = {"attributor" , "NumIRArguments_IsDead", ("Number of " "arguments" " marked '" "IsDead" "'")};; ++(NumIRArguments_IsDead); } } | ||||
3589 | }; | ||||
3590 | |||||
3591 | struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { | ||||
3592 | AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
3593 | : AAIsDeadValueImpl(IRP, A) {} | ||||
3594 | |||||
3595 | /// See AbstractAttribute::initialize(...). | ||||
3596 | void initialize(Attributor &A) override { | ||||
3597 | AAIsDeadValueImpl::initialize(A); | ||||
3598 | if (isa<UndefValue>(getAssociatedValue())) | ||||
3599 | indicatePessimisticFixpoint(); | ||||
3600 | } | ||||
3601 | |||||
3602 | /// See AbstractAttribute::updateImpl(...). | ||||
3603 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3604 | // TODO: Once we have call site specific value information we can provide | ||||
3605 | // call site specific liveness information and then it makes | ||||
3606 | // sense to specialize attributes for call sites arguments instead of | ||||
3607 | // redirecting requests to the callee argument. | ||||
3608 | Argument *Arg = getAssociatedArgument(); | ||||
3609 | if (!Arg) | ||||
3610 | return indicatePessimisticFixpoint(); | ||||
3611 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
3612 | auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
3613 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); | ||||
3614 | } | ||||
3615 | |||||
3616 | /// See AbstractAttribute::manifest(...). | ||||
3617 | ChangeStatus manifest(Attributor &A) override { | ||||
3618 | CallBase &CB = cast<CallBase>(getAnchorValue()); | ||||
3619 | Use &U = CB.getArgOperandUse(getCallSiteArgNo()); | ||||
3620 | assert(!isa<UndefValue>(U.get()) &&(static_cast <bool> (!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!") ? void (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3621, __extension__ __PRETTY_FUNCTION__)) | ||||
3621 | "Expected undef values to be filtered out!")(static_cast <bool> (!isa<UndefValue>(U.get()) && "Expected undef values to be filtered out!") ? void (0) : __assert_fail ("!isa<UndefValue>(U.get()) && \"Expected undef values to be filtered out!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3621, __extension__ __PRETTY_FUNCTION__)); | ||||
3622 | UndefValue &UV = *UndefValue::get(U->getType()); | ||||
3623 | if (A.changeUseAfterManifest(U, UV)) | ||||
3624 | return ChangeStatus::CHANGED; | ||||
3625 | return ChangeStatus::UNCHANGED; | ||||
3626 | } | ||||
3627 | |||||
3628 | /// See AbstractAttribute::trackStatistics() | ||||
3629 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead){ static llvm::Statistic NumIRCSArguments_IsDead = {"attributor" , "NumIRCSArguments_IsDead", ("Number of " "call site arguments" " marked '" "IsDead" "'")};; ++(NumIRCSArguments_IsDead); } } | ||||
3630 | }; | ||||
3631 | |||||
3632 | struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { | ||||
3633 | AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
3634 | : AAIsDeadFloating(IRP, A) {} | ||||
3635 | |||||
3636 | /// See AAIsDead::isAssumedDead(). | ||||
3637 | bool isAssumedDead() const override { | ||||
3638 | return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; | ||||
3639 | } | ||||
3640 | |||||
3641 | /// See AbstractAttribute::initialize(...). | ||||
3642 | void initialize(Attributor &A) override { | ||||
3643 | AAIsDeadFloating::initialize(A); | ||||
3644 | if (isa<UndefValue>(getAssociatedValue())) { | ||||
3645 | indicatePessimisticFixpoint(); | ||||
3646 | return; | ||||
3647 | } | ||||
3648 | |||||
3649 | // We track this separately as a secondary state. | ||||
3650 | IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); | ||||
3651 | } | ||||
3652 | |||||
3653 | /// See AbstractAttribute::updateImpl(...). | ||||
3654 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3655 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
3656 | if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { | ||||
3657 | IsAssumedSideEffectFree = false; | ||||
3658 | Changed = ChangeStatus::CHANGED; | ||||
3659 | } | ||||
3660 | if (!areAllUsesAssumedDead(A, getAssociatedValue())) | ||||
3661 | return indicatePessimisticFixpoint(); | ||||
3662 | return Changed; | ||||
3663 | } | ||||
3664 | |||||
3665 | /// See AbstractAttribute::trackStatistics() | ||||
3666 | void trackStatistics() const override { | ||||
3667 | if (IsAssumedSideEffectFree) | ||||
3668 | STATS_DECLTRACK_CSRET_ATTR(IsDead){ static llvm::Statistic NumIRCSReturn_IsDead = {"attributor" , "NumIRCSReturn_IsDead", ("Number of " "call site returns" " marked '" "IsDead" "'")};; ++(NumIRCSReturn_IsDead); } | ||||
3669 | else | ||||
3670 | STATS_DECLTRACK_CSRET_ATTR(UnusedResult){ static llvm::Statistic NumIRCSReturn_UnusedResult = {"attributor" , "NumIRCSReturn_UnusedResult", ("Number of " "call site returns" " marked '" "UnusedResult" "'")};; ++(NumIRCSReturn_UnusedResult ); } | ||||
3671 | } | ||||
3672 | |||||
3673 | /// See AbstractAttribute::getAsStr(). | ||||
3674 | const std::string getAsStr() const override { | ||||
3675 | return isAssumedDead() | ||||
3676 | ? "assumed-dead" | ||||
3677 | : (getAssumed() ? "assumed-dead-users" : "assumed-live"); | ||||
3678 | } | ||||
3679 | |||||
3680 | private: | ||||
3681 | bool IsAssumedSideEffectFree = true; | ||||
3682 | }; | ||||
3683 | |||||
3684 | struct AAIsDeadReturned : public AAIsDeadValueImpl { | ||||
3685 | AAIsDeadReturned(const IRPosition &IRP, Attributor &A) | ||||
3686 | : AAIsDeadValueImpl(IRP, A) {} | ||||
3687 | |||||
3688 | /// See AbstractAttribute::updateImpl(...). | ||||
3689 | ChangeStatus updateImpl(Attributor &A) override { | ||||
3690 | |||||
3691 | bool UsedAssumedInformation = false; | ||||
3692 | A.checkForAllInstructions([](Instruction &) { return true; }, *this, | ||||
3693 | {Instruction::Ret}, UsedAssumedInformation); | ||||
3694 | |||||
3695 | auto PredForCallSite = [&](AbstractCallSite ACS) { | ||||
3696 | if (ACS.isCallbackCall() || !ACS.getInstruction()) | ||||
3697 | return false; | ||||
3698 | return areAllUsesAssumedDead(A, *ACS.getInstruction()); | ||||
3699 | }; | ||||
3700 | |||||
3701 | if (!A.checkForAllCallSites(PredForCallSite, *this, true, | ||||
3702 | UsedAssumedInformation)) | ||||
3703 | return indicatePessimisticFixpoint(); | ||||
3704 | |||||
3705 | return ChangeStatus::UNCHANGED; | ||||
3706 | } | ||||
3707 | |||||
3708 | /// See AbstractAttribute::manifest(...). | ||||
3709 | ChangeStatus manifest(Attributor &A) override { | ||||
3710 | // TODO: Rewrite the signature to return void? | ||||
3711 | bool AnyChange = false; | ||||
3712 | UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); | ||||
3713 | auto RetInstPred = [&](Instruction &I) { | ||||
3714 | ReturnInst &RI = cast<ReturnInst>(I); | ||||
3715 | if (!isa<UndefValue>(RI.getReturnValue())) | ||||
3716 | AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); | ||||
3717 | return true; | ||||
3718 | }; | ||||
3719 | bool UsedAssumedInformation = false; | ||||
3720 | A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, | ||||
3721 | UsedAssumedInformation); | ||||
3722 | return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
3723 | } | ||||
3724 | |||||
3725 | /// See AbstractAttribute::trackStatistics() | ||||
3726 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead){ static llvm::Statistic NumIRFunctionReturn_IsDead = {"attributor" , "NumIRFunctionReturn_IsDead", ("Number of " "function returns" " marked '" "IsDead" "'")};; ++(NumIRFunctionReturn_IsDead); } } | ||||
3727 | }; | ||||
3728 | |||||
3729 | struct AAIsDeadFunction : public AAIsDead { | ||||
3730 | AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} | ||||
3731 | |||||
3732 | /// See AbstractAttribute::initialize(...). | ||||
3733 | void initialize(Attributor &A) override { | ||||
3734 | Function *F = getAnchorScope(); | ||||
3735 | if (!F || F->isDeclaration() || !A.isRunOn(*F)) { | ||||
3736 | indicatePessimisticFixpoint(); | ||||
3737 | return; | ||||
3738 | } | ||||
3739 | ToBeExploredFrom.insert(&F->getEntryBlock().front()); | ||||
3740 | assumeLive(A, F->getEntryBlock()); | ||||
3741 | } | ||||
3742 | |||||
3743 | /// See AbstractAttribute::getAsStr(). | ||||
3744 | const std::string getAsStr() const override { | ||||
3745 | return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + | ||||
3746 | std::to_string(getAnchorScope()->size()) + "][#TBEP " + | ||||
3747 | std::to_string(ToBeExploredFrom.size()) + "][#KDE " + | ||||
3748 | std::to_string(KnownDeadEnds.size()) + "]"; | ||||
3749 | } | ||||
3750 | |||||
3751 | /// See AbstractAttribute::manifest(...). | ||||
3752 | ChangeStatus manifest(Attributor &A) override { | ||||
3753 | assert(getState().isValidState() &&(static_cast <bool> (getState().isValidState() && "Attempted to manifest an invalid state!") ? void (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3754, __extension__ __PRETTY_FUNCTION__)) | ||||
3754 | "Attempted to manifest an invalid state!")(static_cast <bool> (getState().isValidState() && "Attempted to manifest an invalid state!") ? void (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); | ||||
3755 | |||||
3756 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; | ||||
3757 | Function &F = *getAnchorScope(); | ||||
3758 | |||||
3759 | if (AssumedLiveBlocks.empty()) { | ||||
3760 | A.deleteAfterManifest(F); | ||||
3761 | return ChangeStatus::CHANGED; | ||||
3762 | } | ||||
3763 | |||||
3764 | // Flag to determine if we can change an invoke to a call assuming the | ||||
3765 | // callee is nounwind. This is not possible if the personality of the | ||||
3766 | // function allows to catch asynchronous exceptions. | ||||
3767 | bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); | ||||
3768 | |||||
3769 | KnownDeadEnds.set_union(ToBeExploredFrom); | ||||
3770 | for (const Instruction *DeadEndI : KnownDeadEnds) { | ||||
3771 | auto *CB = dyn_cast<CallBase>(DeadEndI); | ||||
3772 | if (!CB) | ||||
3773 | continue; | ||||
3774 | const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( | ||||
3775 | *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); | ||||
3776 | bool MayReturn = !NoReturnAA.isAssumedNoReturn(); | ||||
3777 | if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) | ||||
3778 | continue; | ||||
3779 | |||||
3780 | if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) | ||||
3781 | A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); | ||||
3782 | else | ||||
3783 | A.changeToUnreachableAfterManifest( | ||||
3784 | const_cast<Instruction *>(DeadEndI->getNextNode())); | ||||
3785 | HasChanged = ChangeStatus::CHANGED; | ||||
3786 | } | ||||
3787 | |||||
3788 | STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.")static llvm::Statistic NumIRBasicBlock_AAIsDead = {"attributor" , "NumIRBasicBlock_AAIsDead", "Number of dead basic blocks deleted." };;; | ||||
3789 | for (BasicBlock &BB : F) | ||||
3790 | if (!AssumedLiveBlocks.count(&BB)) { | ||||
3791 | A.deleteAfterManifest(BB); | ||||
3792 | ++BUILD_STAT_NAME(AAIsDead, BasicBlock)NumIRBasicBlock_AAIsDead; | ||||
3793 | HasChanged = ChangeStatus::CHANGED; | ||||
3794 | } | ||||
3795 | |||||
3796 | return HasChanged; | ||||
3797 | } | ||||
3798 | |||||
3799 | /// See AbstractAttribute::updateImpl(...). | ||||
3800 | ChangeStatus updateImpl(Attributor &A) override; | ||||
3801 | |||||
3802 | bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { | ||||
3803 | assert(From->getParent() == getAnchorScope() &&(static_cast <bool> (From->getParent() == getAnchorScope () && To->getParent() == getAnchorScope() && "Used AAIsDead of the wrong function") ? void (0) : __assert_fail ("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3805, __extension__ __PRETTY_FUNCTION__)) | ||||
3804 | To->getParent() == getAnchorScope() &&(static_cast <bool> (From->getParent() == getAnchorScope () && To->getParent() == getAnchorScope() && "Used AAIsDead of the wrong function") ? void (0) : __assert_fail ("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3805, __extension__ __PRETTY_FUNCTION__)) | ||||
3805 | "Used AAIsDead of the wrong function")(static_cast <bool> (From->getParent() == getAnchorScope () && To->getParent() == getAnchorScope() && "Used AAIsDead of the wrong function") ? void (0) : __assert_fail ("From->getParent() == getAnchorScope() && To->getParent() == getAnchorScope() && \"Used AAIsDead of the wrong function\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3805, __extension__ __PRETTY_FUNCTION__)); | ||||
3806 | return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); | ||||
3807 | } | ||||
3808 | |||||
3809 | /// See AbstractAttribute::trackStatistics() | ||||
3810 | void trackStatistics() const override {} | ||||
3811 | |||||
3812 | /// Returns true if the function is assumed dead. | ||||
3813 | bool isAssumedDead() const override { return false; } | ||||
3814 | |||||
3815 | /// See AAIsDead::isKnownDead(). | ||||
3816 | bool isKnownDead() const override { return false; } | ||||
3817 | |||||
3818 | /// See AAIsDead::isAssumedDead(BasicBlock *). | ||||
3819 | bool isAssumedDead(const BasicBlock *BB) const override { | ||||
3820 | assert(BB->getParent() == getAnchorScope() &&(static_cast <bool> (BB->getParent() == getAnchorScope () && "BB must be in the same anchor scope function." ) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3821, __extension__ __PRETTY_FUNCTION__)) | ||||
3821 | "BB must be in the same anchor scope function.")(static_cast <bool> (BB->getParent() == getAnchorScope () && "BB must be in the same anchor scope function." ) ? void (0) : __assert_fail ("BB->getParent() == getAnchorScope() && \"BB must be in the same anchor scope function.\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3821, __extension__ __PRETTY_FUNCTION__)); | ||||
3822 | |||||
3823 | if (!getAssumed()) | ||||
3824 | return false; | ||||
3825 | return !AssumedLiveBlocks.count(BB); | ||||
3826 | } | ||||
3827 | |||||
3828 | /// See AAIsDead::isKnownDead(BasicBlock *). | ||||
3829 | bool isKnownDead(const BasicBlock *BB) const override { | ||||
3830 | return getKnown() && isAssumedDead(BB); | ||||
3831 | } | ||||
3832 | |||||
3833 | /// See AAIsDead::isAssumed(Instruction *I). | ||||
3834 | bool isAssumedDead(const Instruction *I) const override { | ||||
3835 | assert(I->getParent()->getParent() == getAnchorScope() &&(static_cast <bool> (I->getParent()->getParent() == getAnchorScope() && "Instruction must be in the same anchor scope function." ) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3836, __extension__ __PRETTY_FUNCTION__)) | ||||
3836 | "Instruction must be in the same anchor scope function.")(static_cast <bool> (I->getParent()->getParent() == getAnchorScope() && "Instruction must be in the same anchor scope function." ) ? void (0) : __assert_fail ("I->getParent()->getParent() == getAnchorScope() && \"Instruction must be in the same anchor scope function.\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 3836, __extension__ __PRETTY_FUNCTION__)); | ||||
3837 | |||||
3838 | if (!getAssumed()) | ||||
3839 | return false; | ||||
3840 | |||||
3841 | // If it is not in AssumedLiveBlocks then it for sure dead. | ||||
3842 | // Otherwise, it can still be after noreturn call in a live block. | ||||
3843 | if (!AssumedLiveBlocks.count(I->getParent())) | ||||
3844 | return true; | ||||
3845 | |||||
3846 | // If it is not after a liveness barrier it is live. | ||||
3847 | const Instruction *PrevI = I->getPrevNode(); | ||||
3848 | while (PrevI) { | ||||
3849 | if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) | ||||
3850 | return true; | ||||
3851 | PrevI = PrevI->getPrevNode(); | ||||
3852 | } | ||||
3853 | return false; | ||||
3854 | } | ||||
3855 | |||||
3856 | /// See AAIsDead::isKnownDead(Instruction *I). | ||||
3857 | bool isKnownDead(const Instruction *I) const override { | ||||
3858 | return getKnown() && isAssumedDead(I); | ||||
3859 | } | ||||
3860 | |||||
3861 | /// Assume \p BB is (partially) live now and indicate to the Attributor \p A | ||||
3862 | /// that internal function called from \p BB should now be looked at. | ||||
3863 | bool assumeLive(Attributor &A, const BasicBlock &BB) { | ||||
3864 | if (!AssumedLiveBlocks.insert(&BB).second) | ||||
3865 | return false; | ||||
3866 | |||||
3867 | // We assume that all of BB is (probably) live now and if there are calls to | ||||
3868 | // internal functions we will assume that those are now live as well. This | ||||
3869 | // is a performance optimization for blocks with calls to a lot of internal | ||||
3870 | // functions. It can however cause dead functions to be treated as live. | ||||
3871 | for (const Instruction &I : BB) | ||||
3872 | if (const auto *CB = dyn_cast<CallBase>(&I)) | ||||
3873 | if (const Function *F = CB->getCalledFunction()) | ||||
3874 | if (F->hasLocalLinkage()) | ||||
3875 | A.markLiveInternalFunction(*F); | ||||
3876 | return true; | ||||
3877 | } | ||||
3878 | |||||
3879 | /// Collection of instructions that need to be explored again, e.g., we | ||||
3880 | /// did assume they do not transfer control to (one of their) successors. | ||||
3881 | SmallSetVector<const Instruction *, 8> ToBeExploredFrom; | ||||
3882 | |||||
3883 | /// Collection of instructions that are known to not transfer control. | ||||
3884 | SmallSetVector<const Instruction *, 8> KnownDeadEnds; | ||||
3885 | |||||
3886 | /// Collection of all assumed live edges | ||||
3887 | DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; | ||||
3888 | |||||
3889 | /// Collection of all assumed live BasicBlocks. | ||||
3890 | DenseSet<const BasicBlock *> AssumedLiveBlocks; | ||||
3891 | }; | ||||
3892 | |||||
3893 | static bool | ||||
3894 | identifyAliveSuccessors(Attributor &A, const CallBase &CB, | ||||
3895 | AbstractAttribute &AA, | ||||
3896 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { | ||||
3897 | const IRPosition &IPos = IRPosition::callsite_function(CB); | ||||
3898 | |||||
3899 | const auto &NoReturnAA = | ||||
3900 | A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); | ||||
3901 | if (NoReturnAA.isAssumedNoReturn()) | ||||
3902 | return !NoReturnAA.isKnownNoReturn(); | ||||
3903 | if (CB.isTerminator()) | ||||
3904 | AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); | ||||
3905 | else | ||||
3906 | AliveSuccessors.push_back(CB.getNextNode()); | ||||
3907 | return false; | ||||
3908 | } | ||||
3909 | |||||
3910 | static bool | ||||
3911 | identifyAliveSuccessors(Attributor &A, const InvokeInst &II, | ||||
3912 | AbstractAttribute &AA, | ||||
3913 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { | ||||
3914 | bool UsedAssumedInformation = | ||||
3915 | identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); | ||||
3916 | |||||
3917 | // First, determine if we can change an invoke to a call assuming the | ||||
3918 | // callee is nounwind. This is not possible if the personality of the | ||||
3919 | // function allows to catch asynchronous exceptions. | ||||
3920 | if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { | ||||
3921 | AliveSuccessors.push_back(&II.getUnwindDest()->front()); | ||||
3922 | } else { | ||||
3923 | const IRPosition &IPos = IRPosition::callsite_function(II); | ||||
3924 | const auto &AANoUnw = | ||||
3925 | A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); | ||||
3926 | if (AANoUnw.isAssumedNoUnwind()) { | ||||
3927 | UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); | ||||
3928 | } else { | ||||
3929 | AliveSuccessors.push_back(&II.getUnwindDest()->front()); | ||||
3930 | } | ||||
3931 | } | ||||
3932 | return UsedAssumedInformation; | ||||
3933 | } | ||||
3934 | |||||
3935 | static bool | ||||
3936 | identifyAliveSuccessors(Attributor &A, const BranchInst &BI, | ||||
3937 | AbstractAttribute &AA, | ||||
3938 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { | ||||
3939 | bool UsedAssumedInformation = false; | ||||
3940 | if (BI.getNumSuccessors() == 1) { | ||||
3941 | AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); | ||||
3942 | } else { | ||||
3943 | Optional<Constant *> C = | ||||
3944 | A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); | ||||
3945 | if (!C || isa_and_nonnull<UndefValue>(*C)) { | ||||
3946 | // No value yet, assume both edges are dead. | ||||
3947 | } else if (isa_and_nonnull<ConstantInt>(*C)) { | ||||
3948 | const BasicBlock *SuccBB = | ||||
3949 | BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); | ||||
3950 | AliveSuccessors.push_back(&SuccBB->front()); | ||||
3951 | } else { | ||||
3952 | AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); | ||||
3953 | AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); | ||||
3954 | UsedAssumedInformation = false; | ||||
3955 | } | ||||
3956 | } | ||||
3957 | return UsedAssumedInformation; | ||||
3958 | } | ||||
3959 | |||||
3960 | static bool | ||||
3961 | identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, | ||||
3962 | AbstractAttribute &AA, | ||||
3963 | SmallVectorImpl<const Instruction *> &AliveSuccessors) { | ||||
3964 | bool UsedAssumedInformation = false; | ||||
3965 | Optional<Constant *> C = | ||||
3966 | A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); | ||||
3967 | if (!C || isa_and_nonnull<UndefValue>(C.value())) { | ||||
3968 | // No value yet, assume all edges are dead. | ||||
3969 | } else if (isa_and_nonnull<ConstantInt>(C.value())) { | ||||
3970 | for (const auto &CaseIt : SI.cases()) { | ||||
3971 | if (CaseIt.getCaseValue() == C.value()) { | ||||
3972 | AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); | ||||
3973 | return UsedAssumedInformation; | ||||
3974 | } | ||||
3975 | } | ||||
3976 | AliveSuccessors.push_back(&SI.getDefaultDest()->front()); | ||||
3977 | return UsedAssumedInformation; | ||||
3978 | } else { | ||||
3979 | for (const BasicBlock *SuccBB : successors(SI.getParent())) | ||||
3980 | AliveSuccessors.push_back(&SuccBB->front()); | ||||
3981 | } | ||||
3982 | return UsedAssumedInformation; | ||||
3983 | } | ||||
3984 | |||||
3985 | ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { | ||||
3986 | ChangeStatus Change = ChangeStatus::UNCHANGED; | ||||
3987 | |||||
3988 | LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) | ||||
3989 | << getAnchorScope()->size() << "] BBs and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) | ||||
3990 | << ToBeExploredFrom.size() << " exploration points and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false) | ||||
3991 | << KnownDeadEnds.size() << " known dead ends\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" << getAnchorScope ()->size() << "] BBs and " << ToBeExploredFrom .size() << " exploration points and " << KnownDeadEnds .size() << " known dead ends\n"; } } while (false); | ||||
3992 | |||||
3993 | // Copy and clear the list of instructions we need to explore from. It is | ||||
3994 | // refilled with instructions the next update has to look at. | ||||
3995 | SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), | ||||
3996 | ToBeExploredFrom.end()); | ||||
3997 | decltype(ToBeExploredFrom) NewToBeExploredFrom; | ||||
3998 | |||||
3999 | SmallVector<const Instruction *, 8> AliveSuccessors; | ||||
4000 | while (!Worklist.empty()) { | ||||
4001 | const Instruction *I = Worklist.pop_back_val(); | ||||
4002 | LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"; } } while (false); | ||||
4003 | |||||
4004 | // Fast forward for uninteresting instructions. We could look for UB here | ||||
4005 | // though. | ||||
4006 | while (!I->isTerminator() && !isa<CallBase>(I)) | ||||
4007 | I = I->getNextNode(); | ||||
4008 | |||||
4009 | AliveSuccessors.clear(); | ||||
4010 | |||||
4011 | bool UsedAssumedInformation = false; | ||||
4012 | switch (I->getOpcode()) { | ||||
4013 | // TODO: look for (assumed) UB to backwards propagate "deadness". | ||||
4014 | default: | ||||
4015 | assert(I->isTerminator() &&(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!" ) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4016, __extension__ __PRETTY_FUNCTION__)) | ||||
4016 | "Expected non-terminators to be handled already!")(static_cast <bool> (I->isTerminator() && "Expected non-terminators to be handled already!" ) ? void (0) : __assert_fail ("I->isTerminator() && \"Expected non-terminators to be handled already!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4016, __extension__ __PRETTY_FUNCTION__)); | ||||
4017 | for (const BasicBlock *SuccBB : successors(I->getParent())) | ||||
4018 | AliveSuccessors.push_back(&SuccBB->front()); | ||||
4019 | break; | ||||
4020 | case Instruction::Call: | ||||
4021 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), | ||||
4022 | *this, AliveSuccessors); | ||||
4023 | break; | ||||
4024 | case Instruction::Invoke: | ||||
4025 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), | ||||
4026 | *this, AliveSuccessors); | ||||
4027 | break; | ||||
4028 | case Instruction::Br: | ||||
4029 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), | ||||
4030 | *this, AliveSuccessors); | ||||
4031 | break; | ||||
4032 | case Instruction::Switch: | ||||
4033 | UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), | ||||
4034 | *this, AliveSuccessors); | ||||
4035 | break; | ||||
4036 | } | ||||
4037 | |||||
4038 | if (UsedAssumedInformation) { | ||||
4039 | NewToBeExploredFrom.insert(I); | ||||
4040 | } else if (AliveSuccessors.empty() || | ||||
4041 | (I->isTerminator() && | ||||
4042 | AliveSuccessors.size() < I->getNumSuccessors())) { | ||||
4043 | if (KnownDeadEnds.insert(I)) | ||||
4044 | Change = ChangeStatus::CHANGED; | ||||
4045 | } | ||||
4046 | |||||
4047 | LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ) | ||||
4048 | << AliveSuccessors.size() << " UsedAssumedInformation: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ) | ||||
4049 | << UsedAssumedInformation << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAIsDead] #AliveSuccessors: " << AliveSuccessors.size() << " UsedAssumedInformation: " << UsedAssumedInformation << "\n"; } } while (false ); | ||||
4050 | |||||
4051 | for (const Instruction *AliveSuccessor : AliveSuccessors) { | ||||
4052 | if (!I->isTerminator()) { | ||||
4053 | assert(AliveSuccessors.size() == 1 &&(static_cast <bool> (AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!") ? void (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4054, __extension__ __PRETTY_FUNCTION__)) | ||||
4054 | "Non-terminator expected to have a single successor!")(static_cast <bool> (AliveSuccessors.size() == 1 && "Non-terminator expected to have a single successor!") ? void (0) : __assert_fail ("AliveSuccessors.size() == 1 && \"Non-terminator expected to have a single successor!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4054, __extension__ __PRETTY_FUNCTION__)); | ||||
4055 | Worklist.push_back(AliveSuccessor); | ||||
4056 | } else { | ||||
4057 | // record the assumed live edge | ||||
4058 | auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); | ||||
4059 | if (AssumedLiveEdges.insert(Edge).second) | ||||
4060 | Change = ChangeStatus::CHANGED; | ||||
4061 | if (assumeLive(A, *AliveSuccessor->getParent())) | ||||
4062 | Worklist.push_back(AliveSuccessor); | ||||
4063 | } | ||||
4064 | } | ||||
4065 | } | ||||
4066 | |||||
4067 | // Check if the content of ToBeExploredFrom changed, ignore the order. | ||||
4068 | if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || | ||||
4069 | llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { | ||||
4070 | return !ToBeExploredFrom.count(I); | ||||
4071 | })) { | ||||
4072 | Change = ChangeStatus::CHANGED; | ||||
4073 | ToBeExploredFrom = std::move(NewToBeExploredFrom); | ||||
4074 | } | ||||
4075 | |||||
4076 | // If we know everything is live there is no need to query for liveness. | ||||
4077 | // Instead, indicating a pessimistic fixpoint will cause the state to be | ||||
4078 | // "invalid" and all queries to be answered conservatively without lookups. | ||||
4079 | // To be in this state we have to (1) finished the exploration and (3) not | ||||
4080 | // discovered any non-trivial dead end and (2) not ruled unreachable code | ||||
4081 | // dead. | ||||
4082 | if (ToBeExploredFrom.empty() && | ||||
4083 | getAnchorScope()->size() == AssumedLiveBlocks.size() && | ||||
4084 | llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { | ||||
4085 | return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; | ||||
4086 | })) | ||||
4087 | return indicatePessimisticFixpoint(); | ||||
4088 | return Change; | ||||
4089 | } | ||||
4090 | |||||
4091 | /// Liveness information for a call sites. | ||||
4092 | struct AAIsDeadCallSite final : AAIsDeadFunction { | ||||
4093 | AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) | ||||
4094 | : AAIsDeadFunction(IRP, A) {} | ||||
4095 | |||||
4096 | /// See AbstractAttribute::initialize(...). | ||||
4097 | void initialize(Attributor &A) override { | ||||
4098 | // TODO: Once we have call site specific value information we can provide | ||||
4099 | // call site specific liveness information and then it makes | ||||
4100 | // sense to specialize attributes for call sites instead of | ||||
4101 | // redirecting requests to the callee. | ||||
4102 | llvm_unreachable("Abstract attributes for liveness are not "::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not " "supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4103) | ||||
4103 | "supported for call sites yet!")::llvm::llvm_unreachable_internal("Abstract attributes for liveness are not " "supported for call sites yet!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 4103); | ||||
4104 | } | ||||
4105 | |||||
4106 | /// See AbstractAttribute::updateImpl(...). | ||||
4107 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4108 | return indicatePessimisticFixpoint(); | ||||
4109 | } | ||||
4110 | |||||
4111 | /// See AbstractAttribute::trackStatistics() | ||||
4112 | void trackStatistics() const override {} | ||||
4113 | }; | ||||
4114 | } // namespace | ||||
4115 | |||||
4116 | /// -------------------- Dereferenceable Argument Attribute -------------------- | ||||
4117 | |||||
4118 | namespace { | ||||
4119 | struct AADereferenceableImpl : AADereferenceable { | ||||
4120 | AADereferenceableImpl(const IRPosition &IRP, Attributor &A) | ||||
4121 | : AADereferenceable(IRP, A) {} | ||||
4122 | using StateType = DerefState; | ||||
4123 | |||||
4124 | /// See AbstractAttribute::initialize(...). | ||||
4125 | void initialize(Attributor &A) override { | ||||
4126 | Value &V = *getAssociatedValue().stripPointerCasts(); | ||||
4127 | SmallVector<Attribute, 4> Attrs; | ||||
4128 | getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, | ||||
4129 | Attrs, /* IgnoreSubsumingPositions */ false, &A); | ||||
4130 | for (const Attribute &Attr : Attrs) | ||||
4131 | takeKnownDerefBytesMaximum(Attr.getValueAsInt()); | ||||
4132 | |||||
4133 | const IRPosition &IRP = this->getIRPosition(); | ||||
4134 | NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); | ||||
4135 | |||||
4136 | bool CanBeNull, CanBeFreed; | ||||
4137 | takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes( | ||||
4138 | A.getDataLayout(), CanBeNull, CanBeFreed)); | ||||
4139 | |||||
4140 | bool IsFnInterface = IRP.isFnInterfaceKind(); | ||||
4141 | Function *FnScope = IRP.getAnchorScope(); | ||||
4142 | if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { | ||||
4143 | indicatePessimisticFixpoint(); | ||||
4144 | return; | ||||
4145 | } | ||||
4146 | |||||
4147 | if (Instruction *CtxI = getCtxI()) | ||||
4148 | followUsesInMBEC(*this, A, getState(), *CtxI); | ||||
4149 | } | ||||
4150 | |||||
4151 | /// See AbstractAttribute::getState() | ||||
4152 | /// { | ||||
4153 | StateType &getState() override { return *this; } | ||||
4154 | const StateType &getState() const override { return *this; } | ||||
4155 | /// } | ||||
4156 | |||||
4157 | /// Helper function for collecting accessed bytes in must-be-executed-context | ||||
4158 | void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, | ||||
4159 | DerefState &State) { | ||||
4160 | const Value *UseV = U->get(); | ||||
4161 | if (!UseV->getType()->isPointerTy()) | ||||
4162 | return; | ||||
4163 | |||||
4164 | Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); | ||||
4165 | if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) | ||||
4166 | return; | ||||
4167 | |||||
4168 | int64_t Offset; | ||||
4169 | const Value *Base = GetPointerBaseWithConstantOffset( | ||||
4170 | Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); | ||||
4171 | if (Base && Base == &getAssociatedValue()) | ||||
4172 | State.addAccessedBytes(Offset, Loc->Size.getValue()); | ||||
4173 | } | ||||
4174 | |||||
4175 | /// See followUsesInMBEC | ||||
4176 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, | ||||
4177 | AADereferenceable::StateType &State) { | ||||
4178 | bool IsNonNull = false; | ||||
4179 | bool TrackUse = false; | ||||
4180 | int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( | ||||
4181 | A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); | ||||
4182 | LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes << " for instruction " << *I << "\n"; } } while (false) | ||||
4183 | << " for instruction " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes << " for instruction " << *I << "\n"; } } while (false); | ||||
4184 | |||||
4185 | addAccessedBytesForUse(A, U, I, State); | ||||
4186 | State.takeKnownDerefBytesMaximum(DerefBytes); | ||||
4187 | return TrackUse; | ||||
4188 | } | ||||
4189 | |||||
4190 | /// See AbstractAttribute::manifest(...). | ||||
4191 | ChangeStatus manifest(Attributor &A) override { | ||||
4192 | ChangeStatus Change = AADereferenceable::manifest(A); | ||||
4193 | if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { | ||||
4194 | removeAttrs({Attribute::DereferenceableOrNull}); | ||||
4195 | return ChangeStatus::CHANGED; | ||||
4196 | } | ||||
4197 | return Change; | ||||
4198 | } | ||||
4199 | |||||
4200 | void getDeducedAttributes(LLVMContext &Ctx, | ||||
4201 | SmallVectorImpl<Attribute> &Attrs) const override { | ||||
4202 | // TODO: Add *_globally support | ||||
4203 | if (isAssumedNonNull()) | ||||
4204 | Attrs.emplace_back(Attribute::getWithDereferenceableBytes( | ||||
4205 | Ctx, getAssumedDereferenceableBytes())); | ||||
4206 | else | ||||
4207 | Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( | ||||
4208 | Ctx, getAssumedDereferenceableBytes())); | ||||
4209 | } | ||||
4210 | |||||
4211 | /// See AbstractAttribute::getAsStr(). | ||||
4212 | const std::string getAsStr() const override { | ||||
4213 | if (!getAssumedDereferenceableBytes()) | ||||
4214 | return "unknown-dereferenceable"; | ||||
4215 | return std::string("dereferenceable") + | ||||
4216 | (isAssumedNonNull() ? "" : "_or_null") + | ||||
4217 | (isAssumedGlobal() ? "_globally" : "") + "<" + | ||||
4218 | std::to_string(getKnownDereferenceableBytes()) + "-" + | ||||
4219 | std::to_string(getAssumedDereferenceableBytes()) + ">"; | ||||
4220 | } | ||||
4221 | }; | ||||
4222 | |||||
4223 | /// Dereferenceable attribute for a floating value. | ||||
4224 | struct AADereferenceableFloating : AADereferenceableImpl { | ||||
4225 | AADereferenceableFloating(const IRPosition &IRP, Attributor &A) | ||||
4226 | : AADereferenceableImpl(IRP, A) {} | ||||
4227 | |||||
4228 | /// See AbstractAttribute::updateImpl(...). | ||||
4229 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4230 | |||||
4231 | bool Stripped; | ||||
4232 | bool UsedAssumedInformation = false; | ||||
4233 | SmallVector<AA::ValueAndContext> Values; | ||||
4234 | if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, | ||||
4235 | AA::AnyScope, UsedAssumedInformation)) { | ||||
4236 | Values.push_back({getAssociatedValue(), getCtxI()}); | ||||
4237 | Stripped = false; | ||||
4238 | } else { | ||||
4239 | Stripped = Values.size() != 1 || | ||||
4240 | Values.front().getValue() != &getAssociatedValue(); | ||||
4241 | } | ||||
4242 | |||||
4243 | const DataLayout &DL = A.getDataLayout(); | ||||
4244 | DerefState T; | ||||
4245 | |||||
4246 | auto VisitValueCB = [&](const Value &V) -> bool { | ||||
4247 | unsigned IdxWidth = | ||||
4248 | DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); | ||||
4249 | APInt Offset(IdxWidth, 0); | ||||
4250 | const Value *Base = stripAndAccumulateOffsets( | ||||
4251 | A, *this, &V, DL, Offset, /* GetMinOffset */ false, | ||||
4252 | /* AllowNonInbounds */ true); | ||||
4253 | |||||
4254 | const auto &AA = A.getAAFor<AADereferenceable>( | ||||
4255 | *this, IRPosition::value(*Base), DepClassTy::REQUIRED); | ||||
4256 | int64_t DerefBytes = 0; | ||||
4257 | if (!Stripped && this == &AA) { | ||||
4258 | // Use IR information if we did not strip anything. | ||||
4259 | // TODO: track globally. | ||||
4260 | bool CanBeNull, CanBeFreed; | ||||
4261 | DerefBytes = | ||||
4262 | Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); | ||||
4263 | T.GlobalState.indicatePessimisticFixpoint(); | ||||
4264 | } else { | ||||
4265 | const DerefState &DS = AA.getState(); | ||||
4266 | DerefBytes = DS.DerefBytesState.getAssumed(); | ||||
4267 | T.GlobalState &= DS.GlobalState; | ||||
4268 | } | ||||
4269 | |||||
4270 | // For now we do not try to "increase" dereferenceability due to negative | ||||
4271 | // indices as we first have to come up with code to deal with loops and | ||||
4272 | // for overflows of the dereferenceable bytes. | ||||
4273 | int64_t OffsetSExt = Offset.getSExtValue(); | ||||
4274 | if (OffsetSExt < 0) | ||||
4275 | OffsetSExt = 0; | ||||
4276 | |||||
4277 | T.takeAssumedDerefBytesMinimum( | ||||
4278 | std::max(int64_t(0), DerefBytes - OffsetSExt)); | ||||
4279 | |||||
4280 | if (this == &AA) { | ||||
4281 | if (!Stripped) { | ||||
4282 | // If nothing was stripped IR information is all we got. | ||||
4283 | T.takeKnownDerefBytesMaximum( | ||||
4284 | std::max(int64_t(0), DerefBytes - OffsetSExt)); | ||||
4285 | T.indicatePessimisticFixpoint(); | ||||
4286 | } else if (OffsetSExt > 0) { | ||||
4287 | // If something was stripped but there is circular reasoning we look | ||||
4288 | // for the offset. If it is positive we basically decrease the | ||||
4289 | // dereferenceable bytes in a circular loop now, which will simply | ||||
4290 | // drive them down to the known value in a very slow way which we | ||||
4291 | // can accelerate. | ||||
4292 | T.indicatePessimisticFixpoint(); | ||||
4293 | } | ||||
4294 | } | ||||
4295 | |||||
4296 | return T.isValidState(); | ||||
4297 | }; | ||||
4298 | |||||
4299 | for (const auto &VAC : Values) | ||||
4300 | if (!VisitValueCB(*VAC.getValue())) | ||||
4301 | return indicatePessimisticFixpoint(); | ||||
4302 | |||||
4303 | return clampStateAndIndicateChange(getState(), T); | ||||
4304 | } | ||||
4305 | |||||
4306 | /// See AbstractAttribute::trackStatistics() | ||||
4307 | void trackStatistics() const override { | ||||
4308 | STATS_DECLTRACK_FLOATING_ATTR(dereferenceable){ static llvm::Statistic NumIRFloating_dereferenceable = {"attributor" , "NumIRFloating_dereferenceable", ("Number of floating values known to be '" "dereferenceable" "'")};; ++(NumIRFloating_dereferenceable); } | ||||
4309 | } | ||||
4310 | }; | ||||
4311 | |||||
4312 | /// Dereferenceable attribute for a return value. | ||||
4313 | struct AADereferenceableReturned final | ||||
4314 | : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { | ||||
4315 | AADereferenceableReturned(const IRPosition &IRP, Attributor &A) | ||||
4316 | : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( | ||||
4317 | IRP, A) {} | ||||
4318 | |||||
4319 | /// See AbstractAttribute::trackStatistics() | ||||
4320 | void trackStatistics() const override { | ||||
4321 | STATS_DECLTRACK_FNRET_ATTR(dereferenceable){ static llvm::Statistic NumIRFunctionReturn_dereferenceable = {"attributor", "NumIRFunctionReturn_dereferenceable", ("Number of " "function returns" " marked '" "dereferenceable" "'")};; ++( NumIRFunctionReturn_dereferenceable); } | ||||
4322 | } | ||||
4323 | }; | ||||
4324 | |||||
4325 | /// Dereferenceable attribute for an argument | ||||
4326 | struct AADereferenceableArgument final | ||||
4327 | : AAArgumentFromCallSiteArguments<AADereferenceable, | ||||
4328 | AADereferenceableImpl> { | ||||
4329 | using Base = | ||||
4330 | AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; | ||||
4331 | AADereferenceableArgument(const IRPosition &IRP, Attributor &A) | ||||
4332 | : Base(IRP, A) {} | ||||
4333 | |||||
4334 | /// See AbstractAttribute::trackStatistics() | ||||
4335 | void trackStatistics() const override { | ||||
4336 | STATS_DECLTRACK_ARG_ATTR(dereferenceable){ static llvm::Statistic NumIRArguments_dereferenceable = {"attributor" , "NumIRArguments_dereferenceable", ("Number of " "arguments" " marked '" "dereferenceable" "'")};; ++(NumIRArguments_dereferenceable ); } | ||||
4337 | } | ||||
4338 | }; | ||||
4339 | |||||
4340 | /// Dereferenceable attribute for a call site argument. | ||||
4341 | struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { | ||||
4342 | AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
4343 | : AADereferenceableFloating(IRP, A) {} | ||||
4344 | |||||
4345 | /// See AbstractAttribute::trackStatistics() | ||||
4346 | void trackStatistics() const override { | ||||
4347 | STATS_DECLTRACK_CSARG_ATTR(dereferenceable){ static llvm::Statistic NumIRCSArguments_dereferenceable = { "attributor", "NumIRCSArguments_dereferenceable", ("Number of " "call site arguments" " marked '" "dereferenceable" "'")};; ++ (NumIRCSArguments_dereferenceable); } | ||||
4348 | } | ||||
4349 | }; | ||||
4350 | |||||
4351 | /// Dereferenceable attribute deduction for a call site return value. | ||||
4352 | struct AADereferenceableCallSiteReturned final | ||||
4353 | : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { | ||||
4354 | using Base = | ||||
4355 | AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; | ||||
4356 | AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
4357 | : Base(IRP, A) {} | ||||
4358 | |||||
4359 | /// See AbstractAttribute::trackStatistics() | ||||
4360 | void trackStatistics() const override { | ||||
4361 | STATS_DECLTRACK_CS_ATTR(dereferenceable){ static llvm::Statistic NumIRCS_dereferenceable = {"attributor" , "NumIRCS_dereferenceable", ("Number of " "call site" " marked '" "dereferenceable" "'")};; ++(NumIRCS_dereferenceable); }; | ||||
4362 | } | ||||
4363 | }; | ||||
4364 | } // namespace | ||||
4365 | |||||
4366 | // ------------------------ Align Argument Attribute ------------------------ | ||||
4367 | |||||
4368 | namespace { | ||||
4369 | static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, | ||||
4370 | Value &AssociatedValue, const Use *U, | ||||
4371 | const Instruction *I, bool &TrackUse) { | ||||
4372 | // We need to follow common pointer manipulation uses to the accesses they | ||||
4373 | // feed into. | ||||
4374 | if (isa<CastInst>(I)) { | ||||
| |||||
4375 | // Follow all but ptr2int casts. | ||||
4376 | TrackUse = !isa<PtrToIntInst>(I); | ||||
4377 | return 0; | ||||
4378 | } | ||||
4379 | if (auto *GEP
| ||||
4380 | if (GEP->hasAllConstantIndices()) | ||||
4381 | TrackUse = true; | ||||
4382 | return 0; | ||||
4383 | } | ||||
4384 | |||||
4385 | MaybeAlign MA; | ||||
4386 | if (const auto *CB
| ||||
4387 | if (CB->isBundleOperand(U) || CB->isCallee(U)) | ||||
4388 | return 0; | ||||
4389 | |||||
4390 | unsigned ArgNo = CB->getArgOperandNo(U); | ||||
4391 | IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); | ||||
4392 | // As long as we only use known information there is no need to track | ||||
4393 | // dependences here. | ||||
4394 | auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); | ||||
4395 | MA = MaybeAlign(AlignAA.getKnownAlign()); | ||||
4396 | } | ||||
4397 | |||||
4398 | const DataLayout &DL = A.getDataLayout(); | ||||
4399 | const Value *UseV = U->get(); | ||||
4400 | if (auto *SI
| ||||
4401 | if (SI->getPointerOperand() == UseV) | ||||
4402 | MA = SI->getAlign(); | ||||
4403 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
4404 | if (LI->getPointerOperand() == UseV) | ||||
4405 | MA = LI->getAlign(); | ||||
4406 | } | ||||
4407 | |||||
4408 | if (!MA || *MA <= QueryingAA.getKnownAlign()) | ||||
4409 | return 0; | ||||
4410 | |||||
4411 | unsigned Alignment = MA->value(); | ||||
4412 | int64_t Offset; | ||||
4413 | |||||
4414 | if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { | ||||
4415 | if (Base == &AssociatedValue) { | ||||
4416 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. | ||||
4417 | // So we can say that the maximum power of two which is a divisor of | ||||
4418 | // gcd(Offset, Alignment) is an alignment. | ||||
4419 | |||||
4420 | uint32_t gcd = std::gcd(uint32_t(abs((int32_t)Offset)), Alignment); | ||||
4421 | Alignment = llvm::PowerOf2Floor(gcd); | ||||
4422 | } | ||||
4423 | } | ||||
4424 | |||||
4425 | return Alignment; | ||||
4426 | } | ||||
4427 | |||||
4428 | struct AAAlignImpl : AAAlign { | ||||
4429 | AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} | ||||
4430 | |||||
4431 | /// See AbstractAttribute::initialize(...). | ||||
4432 | void initialize(Attributor &A) override { | ||||
4433 | SmallVector<Attribute, 4> Attrs; | ||||
4434 | getAttrs({Attribute::Alignment}, Attrs); | ||||
4435 | for (const Attribute &Attr : Attrs) | ||||
4436 | takeKnownMaximum(Attr.getValueAsInt()); | ||||
4437 | |||||
4438 | Value &V = *getAssociatedValue().stripPointerCasts(); | ||||
4439 | takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); | ||||
4440 | |||||
4441 | if (getIRPosition().isFnInterfaceKind() && | ||||
4442 | (!getAnchorScope() || | ||||
4443 | !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { | ||||
4444 | indicatePessimisticFixpoint(); | ||||
4445 | return; | ||||
4446 | } | ||||
4447 | |||||
4448 | if (Instruction *CtxI = getCtxI()) | ||||
4449 | followUsesInMBEC(*this, A, getState(), *CtxI); | ||||
4450 | } | ||||
4451 | |||||
4452 | /// See AbstractAttribute::manifest(...). | ||||
4453 | ChangeStatus manifest(Attributor &A) override { | ||||
4454 | ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; | ||||
4455 | |||||
4456 | // Check for users that allow alignment annotations. | ||||
4457 | Value &AssociatedValue = getAssociatedValue(); | ||||
4458 | for (const Use &U : AssociatedValue.uses()) { | ||||
4459 | if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { | ||||
4460 | if (SI->getPointerOperand() == &AssociatedValue) | ||||
4461 | if (SI->getAlign() < getAssumedAlign()) { | ||||
4462 | STATS_DECLTRACK(AAAlign, Store,{ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign" , "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign ); } | ||||
4463 | "Number of times alignment added to a store"){ static llvm::Statistic NumIRStore_AAAlign = {"attributor", "NumIRStore_AAAlign" , "Number of times alignment added to a store"};; ++(NumIRStore_AAAlign ); }; | ||||
4464 | SI->setAlignment(getAssumedAlign()); | ||||
4465 | LoadStoreChanged = ChangeStatus::CHANGED; | ||||
4466 | } | ||||
4467 | } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { | ||||
4468 | if (LI->getPointerOperand() == &AssociatedValue) | ||||
4469 | if (LI->getAlign() < getAssumedAlign()) { | ||||
4470 | LI->setAlignment(getAssumedAlign()); | ||||
4471 | STATS_DECLTRACK(AAAlign, Load,{ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign" , "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign ); } | ||||
4472 | "Number of times alignment added to a load"){ static llvm::Statistic NumIRLoad_AAAlign = {"attributor", "NumIRLoad_AAAlign" , "Number of times alignment added to a load"};; ++(NumIRLoad_AAAlign ); }; | ||||
4473 | LoadStoreChanged = ChangeStatus::CHANGED; | ||||
4474 | } | ||||
4475 | } | ||||
4476 | } | ||||
4477 | |||||
4478 | ChangeStatus Changed = AAAlign::manifest(A); | ||||
4479 | |||||
4480 | Align InheritAlign = | ||||
4481 | getAssociatedValue().getPointerAlignment(A.getDataLayout()); | ||||
4482 | if (InheritAlign >= getAssumedAlign()) | ||||
4483 | return LoadStoreChanged; | ||||
4484 | return Changed | LoadStoreChanged; | ||||
4485 | } | ||||
4486 | |||||
4487 | // TODO: Provide a helper to determine the implied ABI alignment and check in | ||||
4488 | // the existing manifest method and a new one for AAAlignImpl that value | ||||
4489 | // to avoid making the alignment explicit if it did not improve. | ||||
4490 | |||||
4491 | /// See AbstractAttribute::getDeducedAttributes | ||||
4492 | void getDeducedAttributes(LLVMContext &Ctx, | ||||
4493 | SmallVectorImpl<Attribute> &Attrs) const override { | ||||
4494 | if (getAssumedAlign() > 1) | ||||
4495 | Attrs.emplace_back( | ||||
4496 | Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); | ||||
4497 | } | ||||
4498 | |||||
4499 | /// See followUsesInMBEC | ||||
4500 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, | ||||
4501 | AAAlign::StateType &State) { | ||||
4502 | bool TrackUse = false; | ||||
4503 | |||||
4504 | unsigned int KnownAlign = | ||||
4505 | getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); | ||||
4506 | State.takeKnownMaximum(KnownAlign); | ||||
4507 | |||||
4508 | return TrackUse; | ||||
4509 | } | ||||
4510 | |||||
4511 | /// See AbstractAttribute::getAsStr(). | ||||
4512 | const std::string getAsStr() const override { | ||||
4513 | return "align<" + std::to_string(getKnownAlign().value()) + "-" + | ||||
4514 | std::to_string(getAssumedAlign().value()) + ">"; | ||||
4515 | } | ||||
4516 | }; | ||||
4517 | |||||
4518 | /// Align attribute for a floating value. | ||||
4519 | struct AAAlignFloating : AAAlignImpl { | ||||
4520 | AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} | ||||
4521 | |||||
4522 | /// See AbstractAttribute::updateImpl(...). | ||||
4523 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4524 | const DataLayout &DL = A.getDataLayout(); | ||||
4525 | |||||
4526 | bool Stripped; | ||||
4527 | bool UsedAssumedInformation = false; | ||||
4528 | SmallVector<AA::ValueAndContext> Values; | ||||
4529 | if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, | ||||
4530 | AA::AnyScope, UsedAssumedInformation)) { | ||||
4531 | Values.push_back({getAssociatedValue(), getCtxI()}); | ||||
4532 | Stripped = false; | ||||
4533 | } else { | ||||
4534 | Stripped = Values.size() != 1 || | ||||
4535 | Values.front().getValue() != &getAssociatedValue(); | ||||
4536 | } | ||||
4537 | |||||
4538 | StateType T; | ||||
4539 | auto VisitValueCB = [&](Value &V) -> bool { | ||||
4540 | if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) | ||||
4541 | return true; | ||||
4542 | const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), | ||||
4543 | DepClassTy::REQUIRED); | ||||
4544 | if (!Stripped && this == &AA) { | ||||
4545 | int64_t Offset; | ||||
4546 | unsigned Alignment = 1; | ||||
4547 | if (const Value *Base = | ||||
4548 | GetPointerBaseWithConstantOffset(&V, Offset, DL)) { | ||||
4549 | // TODO: Use AAAlign for the base too. | ||||
4550 | Align PA = Base->getPointerAlignment(DL); | ||||
4551 | // BasePointerAddr + Offset = Alignment * Q for some integer Q. | ||||
4552 | // So we can say that the maximum power of two which is a divisor of | ||||
4553 | // gcd(Offset, Alignment) is an alignment. | ||||
4554 | |||||
4555 | uint32_t gcd = | ||||
4556 | std::gcd(uint32_t(abs((int32_t)Offset)), uint32_t(PA.value())); | ||||
4557 | Alignment = llvm::PowerOf2Floor(gcd); | ||||
4558 | } else { | ||||
4559 | Alignment = V.getPointerAlignment(DL).value(); | ||||
4560 | } | ||||
4561 | // Use only IR information if we did not strip anything. | ||||
4562 | T.takeKnownMaximum(Alignment); | ||||
4563 | T.indicatePessimisticFixpoint(); | ||||
4564 | } else { | ||||
4565 | // Use abstract attribute information. | ||||
4566 | const AAAlign::StateType &DS = AA.getState(); | ||||
4567 | T ^= DS; | ||||
4568 | } | ||||
4569 | return T.isValidState(); | ||||
4570 | }; | ||||
4571 | |||||
4572 | for (const auto &VAC : Values) { | ||||
4573 | if (!VisitValueCB(*VAC.getValue())) | ||||
4574 | return indicatePessimisticFixpoint(); | ||||
4575 | } | ||||
4576 | |||||
4577 | // TODO: If we know we visited all incoming values, thus no are assumed | ||||
4578 | // dead, we can take the known information from the state T. | ||||
4579 | return clampStateAndIndicateChange(getState(), T); | ||||
4580 | } | ||||
4581 | |||||
4582 | /// See AbstractAttribute::trackStatistics() | ||||
4583 | void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align){ static llvm::Statistic NumIRFloating_align = {"attributor", "NumIRFloating_align", ("Number of floating values known to be '" "align" "'")};; ++(NumIRFloating_align); } } | ||||
4584 | }; | ||||
4585 | |||||
4586 | /// Align attribute for function return value. | ||||
4587 | struct AAAlignReturned final | ||||
4588 | : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { | ||||
4589 | using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; | ||||
4590 | AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} | ||||
4591 | |||||
4592 | /// See AbstractAttribute::initialize(...). | ||||
4593 | void initialize(Attributor &A) override { | ||||
4594 | Base::initialize(A); | ||||
4595 | Function *F = getAssociatedFunction(); | ||||
4596 | if (!F || F->isDeclaration()) | ||||
4597 | indicatePessimisticFixpoint(); | ||||
4598 | } | ||||
4599 | |||||
4600 | /// See AbstractAttribute::trackStatistics() | ||||
4601 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned){ static llvm::Statistic NumIRFunctionReturn_aligned = {"attributor" , "NumIRFunctionReturn_aligned", ("Number of " "function returns" " marked '" "aligned" "'")};; ++(NumIRFunctionReturn_aligned ); } } | ||||
4602 | }; | ||||
4603 | |||||
4604 | /// Align attribute for function argument. | ||||
4605 | struct AAAlignArgument final | ||||
4606 | : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { | ||||
4607 | using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; | ||||
4608 | AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} | ||||
4609 | |||||
4610 | /// See AbstractAttribute::manifest(...). | ||||
4611 | ChangeStatus manifest(Attributor &A) override { | ||||
4612 | // If the associated argument is involved in a must-tail call we give up | ||||
4613 | // because we would need to keep the argument alignments of caller and | ||||
4614 | // callee in-sync. Just does not seem worth the trouble right now. | ||||
4615 | if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) | ||||
4616 | return ChangeStatus::UNCHANGED; | ||||
4617 | return Base::manifest(A); | ||||
4618 | } | ||||
4619 | |||||
4620 | /// See AbstractAttribute::trackStatistics() | ||||
4621 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned){ static llvm::Statistic NumIRArguments_aligned = {"attributor" , "NumIRArguments_aligned", ("Number of " "arguments" " marked '" "aligned" "'")};; ++(NumIRArguments_aligned); } } | ||||
4622 | }; | ||||
4623 | |||||
4624 | struct AAAlignCallSiteArgument final : AAAlignFloating { | ||||
4625 | AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
4626 | : AAAlignFloating(IRP, A) {} | ||||
4627 | |||||
4628 | /// See AbstractAttribute::manifest(...). | ||||
4629 | ChangeStatus manifest(Attributor &A) override { | ||||
4630 | // If the associated argument is involved in a must-tail call we give up | ||||
4631 | // because we would need to keep the argument alignments of caller and | ||||
4632 | // callee in-sync. Just does not seem worth the trouble right now. | ||||
4633 | if (Argument *Arg = getAssociatedArgument()) | ||||
4634 | if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) | ||||
4635 | return ChangeStatus::UNCHANGED; | ||||
4636 | ChangeStatus Changed = AAAlignImpl::manifest(A); | ||||
4637 | Align InheritAlign = | ||||
4638 | getAssociatedValue().getPointerAlignment(A.getDataLayout()); | ||||
4639 | if (InheritAlign >= getAssumedAlign()) | ||||
4640 | Changed = ChangeStatus::UNCHANGED; | ||||
4641 | return Changed; | ||||
4642 | } | ||||
4643 | |||||
4644 | /// See AbstractAttribute::updateImpl(Attributor &A). | ||||
4645 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4646 | ChangeStatus Changed = AAAlignFloating::updateImpl(A); | ||||
4647 | if (Argument *Arg = getAssociatedArgument()) { | ||||
4648 | // We only take known information from the argument | ||||
4649 | // so we do not need to track a dependence. | ||||
4650 | const auto &ArgAlignAA = A.getAAFor<AAAlign>( | ||||
4651 | *this, IRPosition::argument(*Arg), DepClassTy::NONE); | ||||
4652 | takeKnownMaximum(ArgAlignAA.getKnownAlign().value()); | ||||
4653 | } | ||||
4654 | return Changed; | ||||
4655 | } | ||||
4656 | |||||
4657 | /// See AbstractAttribute::trackStatistics() | ||||
4658 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned){ static llvm::Statistic NumIRCSArguments_aligned = {"attributor" , "NumIRCSArguments_aligned", ("Number of " "call site arguments" " marked '" "aligned" "'")};; ++(NumIRCSArguments_aligned); } } | ||||
4659 | }; | ||||
4660 | |||||
4661 | /// Align attribute deduction for a call site return value. | ||||
4662 | struct AAAlignCallSiteReturned final | ||||
4663 | : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { | ||||
4664 | using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; | ||||
4665 | AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
4666 | : Base(IRP, A) {} | ||||
4667 | |||||
4668 | /// See AbstractAttribute::initialize(...). | ||||
4669 | void initialize(Attributor &A) override { | ||||
4670 | Base::initialize(A); | ||||
4671 | Function *F = getAssociatedFunction(); | ||||
4672 | if (!F || F->isDeclaration()) | ||||
4673 | indicatePessimisticFixpoint(); | ||||
4674 | } | ||||
4675 | |||||
4676 | /// See AbstractAttribute::trackStatistics() | ||||
4677 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align){ static llvm::Statistic NumIRCS_align = {"attributor", "NumIRCS_align" , ("Number of " "call site" " marked '" "align" "'")};; ++(NumIRCS_align ); }; } | ||||
4678 | }; | ||||
4679 | } // namespace | ||||
4680 | |||||
4681 | /// ------------------ Function No-Return Attribute ---------------------------- | ||||
4682 | namespace { | ||||
4683 | struct AANoReturnImpl : public AANoReturn { | ||||
4684 | AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} | ||||
4685 | |||||
4686 | /// See AbstractAttribute::initialize(...). | ||||
4687 | void initialize(Attributor &A) override { | ||||
4688 | AANoReturn::initialize(A); | ||||
4689 | Function *F = getAssociatedFunction(); | ||||
4690 | if (!F || F->isDeclaration()) | ||||
4691 | indicatePessimisticFixpoint(); | ||||
4692 | } | ||||
4693 | |||||
4694 | /// See AbstractAttribute::getAsStr(). | ||||
4695 | const std::string getAsStr() const override { | ||||
4696 | return getAssumed() ? "noreturn" : "may-return"; | ||||
4697 | } | ||||
4698 | |||||
4699 | /// See AbstractAttribute::updateImpl(Attributor &A). | ||||
4700 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4701 | auto CheckForNoReturn = [](Instruction &) { return false; }; | ||||
4702 | bool UsedAssumedInformation = false; | ||||
4703 | if (!A.checkForAllInstructions(CheckForNoReturn, *this, | ||||
4704 | {(unsigned)Instruction::Ret}, | ||||
4705 | UsedAssumedInformation)) | ||||
4706 | return indicatePessimisticFixpoint(); | ||||
4707 | return ChangeStatus::UNCHANGED; | ||||
4708 | } | ||||
4709 | }; | ||||
4710 | |||||
4711 | struct AANoReturnFunction final : AANoReturnImpl { | ||||
4712 | AANoReturnFunction(const IRPosition &IRP, Attributor &A) | ||||
4713 | : AANoReturnImpl(IRP, A) {} | ||||
4714 | |||||
4715 | /// See AbstractAttribute::trackStatistics() | ||||
4716 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn){ static llvm::Statistic NumIRFunction_noreturn = {"attributor" , "NumIRFunction_noreturn", ("Number of " "functions" " marked '" "noreturn" "'")};; ++(NumIRFunction_noreturn); } } | ||||
4717 | }; | ||||
4718 | |||||
4719 | /// NoReturn attribute deduction for a call sites. | ||||
4720 | struct AANoReturnCallSite final : AANoReturnImpl { | ||||
4721 | AANoReturnCallSite(const IRPosition &IRP, Attributor &A) | ||||
4722 | : AANoReturnImpl(IRP, A) {} | ||||
4723 | |||||
4724 | /// See AbstractAttribute::initialize(...). | ||||
4725 | void initialize(Attributor &A) override { | ||||
4726 | AANoReturnImpl::initialize(A); | ||||
4727 | if (Function *F = getAssociatedFunction()) { | ||||
4728 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
4729 | auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); | ||||
4730 | if (!FnAA.isAssumedNoReturn()) | ||||
4731 | indicatePessimisticFixpoint(); | ||||
4732 | } | ||||
4733 | } | ||||
4734 | |||||
4735 | /// See AbstractAttribute::updateImpl(...). | ||||
4736 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4737 | // TODO: Once we have call site specific value information we can provide | ||||
4738 | // call site specific liveness information and then it makes | ||||
4739 | // sense to specialize attributes for call sites arguments instead of | ||||
4740 | // redirecting requests to the callee argument. | ||||
4741 | Function *F = getAssociatedFunction(); | ||||
4742 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
4743 | auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); | ||||
4744 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
4745 | } | ||||
4746 | |||||
4747 | /// See AbstractAttribute::trackStatistics() | ||||
4748 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn){ static llvm::Statistic NumIRCS_noreturn = {"attributor", "NumIRCS_noreturn" , ("Number of " "call site" " marked '" "noreturn" "'")};; ++ (NumIRCS_noreturn); }; } | ||||
4749 | }; | ||||
4750 | } // namespace | ||||
4751 | |||||
4752 | /// ----------------------- Instance Info --------------------------------- | ||||
4753 | |||||
4754 | namespace { | ||||
4755 | /// A class to hold the state of for no-capture attributes. | ||||
4756 | struct AAInstanceInfoImpl : public AAInstanceInfo { | ||||
4757 | AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A) | ||||
4758 | : AAInstanceInfo(IRP, A) {} | ||||
4759 | |||||
4760 | /// See AbstractAttribute::initialize(...). | ||||
4761 | void initialize(Attributor &A) override { | ||||
4762 | Value &V = getAssociatedValue(); | ||||
4763 | if (auto *C = dyn_cast<Constant>(&V)) { | ||||
4764 | if (C->isThreadDependent()) | ||||
4765 | indicatePessimisticFixpoint(); | ||||
4766 | else | ||||
4767 | indicateOptimisticFixpoint(); | ||||
4768 | return; | ||||
4769 | } | ||||
4770 | if (auto *CB = dyn_cast<CallBase>(&V)) | ||||
4771 | if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() && | ||||
4772 | !CB->mayReadFromMemory()) { | ||||
4773 | indicateOptimisticFixpoint(); | ||||
4774 | return; | ||||
4775 | } | ||||
4776 | } | ||||
4777 | |||||
4778 | /// See AbstractAttribute::updateImpl(...). | ||||
4779 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4780 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
4781 | |||||
4782 | Value &V = getAssociatedValue(); | ||||
4783 | const Function *Scope = nullptr; | ||||
4784 | if (auto *I = dyn_cast<Instruction>(&V)) | ||||
4785 | Scope = I->getFunction(); | ||||
4786 | if (auto *A = dyn_cast<Argument>(&V)) { | ||||
4787 | Scope = A->getParent(); | ||||
4788 | if (!Scope->hasLocalLinkage()) | ||||
4789 | return Changed; | ||||
4790 | } | ||||
4791 | if (!Scope) | ||||
4792 | return indicateOptimisticFixpoint(); | ||||
4793 | |||||
4794 | auto &NoRecurseAA = A.getAAFor<AANoRecurse>( | ||||
4795 | *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL); | ||||
4796 | if (NoRecurseAA.isAssumedNoRecurse()) | ||||
4797 | return Changed; | ||||
4798 | |||||
4799 | auto UsePred = [&](const Use &U, bool &Follow) { | ||||
4800 | const Instruction *UserI = dyn_cast<Instruction>(U.getUser()); | ||||
4801 | if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) || | ||||
4802 | isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { | ||||
4803 | Follow = true; | ||||
4804 | return true; | ||||
4805 | } | ||||
4806 | if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) || | ||||
4807 | (isa<StoreInst>(UserI) && | ||||
4808 | cast<StoreInst>(UserI)->getValueOperand() != U.get())) | ||||
4809 | return true; | ||||
4810 | if (auto *CB = dyn_cast<CallBase>(UserI)) { | ||||
4811 | // This check is not guaranteeing uniqueness but for now that we cannot | ||||
4812 | // end up with two versions of \p U thinking it was one. | ||||
4813 | if (!CB->getCalledFunction() || | ||||
4814 | !CB->getCalledFunction()->hasLocalLinkage()) | ||||
4815 | return true; | ||||
4816 | if (!CB->isArgOperand(&U)) | ||||
4817 | return false; | ||||
4818 | const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>( | ||||
4819 | *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)), | ||||
4820 | DepClassTy::OPTIONAL); | ||||
4821 | if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis()) | ||||
4822 | return false; | ||||
4823 | // If this call base might reach the scope again we might forward the | ||||
4824 | // argument back here. This is very conservative. | ||||
4825 | if (AA::isPotentiallyReachable( | ||||
4826 | A, *CB, *Scope, *this, | ||||
4827 | [Scope](const Function &Fn) { return &Fn != Scope; })) | ||||
4828 | return false; | ||||
4829 | return true; | ||||
4830 | } | ||||
4831 | return false; | ||||
4832 | }; | ||||
4833 | |||||
4834 | auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { | ||||
4835 | if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) { | ||||
4836 | auto *Ptr = SI->getPointerOperand()->stripPointerCasts(); | ||||
4837 | if ((isa<AllocaInst>(Ptr) || isNoAliasCall(Ptr)) && | ||||
4838 | AA::isDynamicallyUnique(A, *this, *Ptr)) | ||||
4839 | return true; | ||||
4840 | } | ||||
4841 | return false; | ||||
4842 | }; | ||||
4843 | |||||
4844 | if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true, | ||||
4845 | DepClassTy::OPTIONAL, | ||||
4846 | /* IgnoreDroppableUses */ true, EquivalentUseCB)) | ||||
4847 | return indicatePessimisticFixpoint(); | ||||
4848 | |||||
4849 | return Changed; | ||||
4850 | } | ||||
4851 | |||||
4852 | /// See AbstractState::getAsStr(). | ||||
4853 | const std::string getAsStr() const override { | ||||
4854 | return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>"; | ||||
4855 | } | ||||
4856 | |||||
4857 | /// See AbstractAttribute::trackStatistics() | ||||
4858 | void trackStatistics() const override {} | ||||
4859 | }; | ||||
4860 | |||||
4861 | /// InstanceInfo attribute for floating values. | ||||
4862 | struct AAInstanceInfoFloating : AAInstanceInfoImpl { | ||||
4863 | AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A) | ||||
4864 | : AAInstanceInfoImpl(IRP, A) {} | ||||
4865 | }; | ||||
4866 | |||||
4867 | /// NoCapture attribute for function arguments. | ||||
4868 | struct AAInstanceInfoArgument final : AAInstanceInfoFloating { | ||||
4869 | AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A) | ||||
4870 | : AAInstanceInfoFloating(IRP, A) {} | ||||
4871 | }; | ||||
4872 | |||||
4873 | /// InstanceInfo attribute for call site arguments. | ||||
4874 | struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl { | ||||
4875 | AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
4876 | : AAInstanceInfoImpl(IRP, A) {} | ||||
4877 | |||||
4878 | /// See AbstractAttribute::updateImpl(...). | ||||
4879 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4880 | // TODO: Once we have call site specific value information we can provide | ||||
4881 | // call site specific liveness information and then it makes | ||||
4882 | // sense to specialize attributes for call sites arguments instead of | ||||
4883 | // redirecting requests to the callee argument. | ||||
4884 | Argument *Arg = getAssociatedArgument(); | ||||
4885 | if (!Arg) | ||||
4886 | return indicatePessimisticFixpoint(); | ||||
4887 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
4888 | auto &ArgAA = | ||||
4889 | A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
4890 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); | ||||
4891 | } | ||||
4892 | }; | ||||
4893 | |||||
4894 | /// InstanceInfo attribute for function return value. | ||||
4895 | struct AAInstanceInfoReturned final : AAInstanceInfoImpl { | ||||
4896 | AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A) | ||||
4897 | : AAInstanceInfoImpl(IRP, A) { | ||||
4898 | llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4898); | ||||
4899 | } | ||||
4900 | |||||
4901 | /// See AbstractAttribute::initialize(...). | ||||
4902 | void initialize(Attributor &A) override { | ||||
4903 | llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4903); | ||||
4904 | } | ||||
4905 | |||||
4906 | /// See AbstractAttribute::updateImpl(...). | ||||
4907 | ChangeStatus updateImpl(Attributor &A) override { | ||||
4908 | llvm_unreachable("InstanceInfo is not applicable to function returns!")::llvm::llvm_unreachable_internal("InstanceInfo is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 4908); | ||||
4909 | } | ||||
4910 | }; | ||||
4911 | |||||
4912 | /// InstanceInfo attribute deduction for a call site return value. | ||||
4913 | struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating { | ||||
4914 | AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
4915 | : AAInstanceInfoFloating(IRP, A) {} | ||||
4916 | }; | ||||
4917 | } // namespace | ||||
4918 | |||||
4919 | /// ----------------------- Variable Capturing --------------------------------- | ||||
4920 | |||||
4921 | namespace { | ||||
4922 | /// A class to hold the state of for no-capture attributes. | ||||
4923 | struct AANoCaptureImpl : public AANoCapture { | ||||
4924 | AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} | ||||
4925 | |||||
4926 | /// See AbstractAttribute::initialize(...). | ||||
4927 | void initialize(Attributor &A) override { | ||||
4928 | if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { | ||||
4929 | indicateOptimisticFixpoint(); | ||||
4930 | return; | ||||
4931 | } | ||||
4932 | Function *AnchorScope = getAnchorScope(); | ||||
4933 | if (isFnInterfaceKind() && | ||||
4934 | (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { | ||||
4935 | indicatePessimisticFixpoint(); | ||||
4936 | return; | ||||
4937 | } | ||||
4938 | |||||
4939 | // You cannot "capture" null in the default address space. | ||||
4940 | if (isa<ConstantPointerNull>(getAssociatedValue()) && | ||||
4941 | getAssociatedValue().getType()->getPointerAddressSpace() == 0) { | ||||
4942 | indicateOptimisticFixpoint(); | ||||
4943 | return; | ||||
4944 | } | ||||
4945 | |||||
4946 | const Function *F = | ||||
4947 | isArgumentPosition() ? getAssociatedFunction() : AnchorScope; | ||||
4948 | |||||
4949 | // Check what state the associated function can actually capture. | ||||
4950 | if (F) | ||||
4951 | determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); | ||||
4952 | else | ||||
4953 | indicatePessimisticFixpoint(); | ||||
4954 | } | ||||
4955 | |||||
4956 | /// See AbstractAttribute::updateImpl(...). | ||||
4957 | ChangeStatus updateImpl(Attributor &A) override; | ||||
4958 | |||||
4959 | /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). | ||||
4960 | void getDeducedAttributes(LLVMContext &Ctx, | ||||
4961 | SmallVectorImpl<Attribute> &Attrs) const override { | ||||
4962 | if (!isAssumedNoCaptureMaybeReturned()) | ||||
4963 | return; | ||||
4964 | |||||
4965 | if (isArgumentPosition()) { | ||||
4966 | if (isAssumedNoCapture()) | ||||
4967 | Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); | ||||
4968 | else if (ManifestInternal) | ||||
4969 | Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); | ||||
4970 | } | ||||
4971 | } | ||||
4972 | |||||
4973 | /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known | ||||
4974 | /// depending on the ability of the function associated with \p IRP to capture | ||||
4975 | /// state in memory and through "returning/throwing", respectively. | ||||
4976 | static void determineFunctionCaptureCapabilities(const IRPosition &IRP, | ||||
4977 | const Function &F, | ||||
4978 | BitIntegerState &State) { | ||||
4979 | // TODO: Once we have memory behavior attributes we should use them here. | ||||
4980 | |||||
4981 | // If we know we cannot communicate or write to memory, we do not care about | ||||
4982 | // ptr2int anymore. | ||||
4983 | if (F.onlyReadsMemory() && F.doesNotThrow() && | ||||
4984 | F.getReturnType()->isVoidTy()) { | ||||
4985 | State.addKnownBits(NO_CAPTURE); | ||||
4986 | return; | ||||
4987 | } | ||||
4988 | |||||
4989 | // A function cannot capture state in memory if it only reads memory, it can | ||||
4990 | // however return/throw state and the state might be influenced by the | ||||
4991 | // pointer value, e.g., loading from a returned pointer might reveal a bit. | ||||
4992 | if (F.onlyReadsMemory()) | ||||
4993 | State.addKnownBits(NOT_CAPTURED_IN_MEM); | ||||
4994 | |||||
4995 | // A function cannot communicate state back if it does not through | ||||
4996 | // exceptions and doesn not return values. | ||||
4997 | if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) | ||||
4998 | State.addKnownBits(NOT_CAPTURED_IN_RET); | ||||
4999 | |||||
5000 | // Check existing "returned" attributes. | ||||
5001 | int ArgNo = IRP.getCalleeArgNo(); | ||||
5002 | if (F.doesNotThrow() && ArgNo >= 0) { | ||||
5003 | for (unsigned u = 0, e = F.arg_size(); u < e; ++u) | ||||
5004 | if (F.hasParamAttribute(u, Attribute::Returned)) { | ||||
5005 | if (u == unsigned(ArgNo)) | ||||
5006 | State.removeAssumedBits(NOT_CAPTURED_IN_RET); | ||||
5007 | else if (F.onlyReadsMemory()) | ||||
5008 | State.addKnownBits(NO_CAPTURE); | ||||
5009 | else | ||||
5010 | State.addKnownBits(NOT_CAPTURED_IN_RET); | ||||
5011 | break; | ||||
5012 | } | ||||
5013 | } | ||||
5014 | } | ||||
5015 | |||||
5016 | /// See AbstractState::getAsStr(). | ||||
5017 | const std::string getAsStr() const override { | ||||
5018 | if (isKnownNoCapture()) | ||||
5019 | return "known not-captured"; | ||||
5020 | if (isAssumedNoCapture()) | ||||
5021 | return "assumed not-captured"; | ||||
5022 | if (isKnownNoCaptureMaybeReturned()) | ||||
5023 | return "known not-captured-maybe-returned"; | ||||
5024 | if (isAssumedNoCaptureMaybeReturned()) | ||||
5025 | return "assumed not-captured-maybe-returned"; | ||||
5026 | return "assumed-captured"; | ||||
5027 | } | ||||
5028 | |||||
5029 | /// Check the use \p U and update \p State accordingly. Return true if we | ||||
5030 | /// should continue to update the state. | ||||
5031 | bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, | ||||
5032 | bool &Follow) { | ||||
5033 | Instruction *UInst = cast<Instruction>(U.getUser()); | ||||
5034 | LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " << *UInst << "\n" ; } } while (false) | ||||
5035 | << *UInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " << *UInst << "\n" ; } } while (false); | ||||
5036 | |||||
5037 | // Deal with ptr2int by following uses. | ||||
5038 | if (isa<PtrToIntInst>(UInst)) { | ||||
5039 | LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - ptr2int assume the worst!\n" ; } } while (false); | ||||
5040 | return isCapturedIn(State, /* Memory */ true, /* Integer */ true, | ||||
5041 | /* Return */ true); | ||||
5042 | } | ||||
5043 | |||||
5044 | // For stores we already checked if we can follow them, if they make it | ||||
5045 | // here we give up. | ||||
5046 | if (isa<StoreInst>(UInst)) | ||||
5047 | return isCapturedIn(State, /* Memory */ true, /* Integer */ false, | ||||
5048 | /* Return */ false); | ||||
5049 | |||||
5050 | // Explicitly catch return instructions. | ||||
5051 | if (isa<ReturnInst>(UInst)) { | ||||
5052 | if (UInst->getFunction() == getAnchorScope()) | ||||
5053 | return isCapturedIn(State, /* Memory */ false, /* Integer */ false, | ||||
5054 | /* Return */ true); | ||||
5055 | return isCapturedIn(State, /* Memory */ true, /* Integer */ true, | ||||
5056 | /* Return */ true); | ||||
5057 | } | ||||
5058 | |||||
5059 | // For now we only use special logic for call sites. However, the tracker | ||||
5060 | // itself knows about a lot of other non-capturing cases already. | ||||
5061 | auto *CB = dyn_cast<CallBase>(UInst); | ||||
5062 | if (!CB || !CB->isArgOperand(&U)) | ||||
5063 | return isCapturedIn(State, /* Memory */ true, /* Integer */ true, | ||||
5064 | /* Return */ true); | ||||
5065 | |||||
5066 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
5067 | const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); | ||||
5068 | // If we have a abstract no-capture attribute for the argument we can use | ||||
5069 | // it to justify a non-capture attribute here. This allows recursion! | ||||
5070 | auto &ArgNoCaptureAA = | ||||
5071 | A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED); | ||||
5072 | if (ArgNoCaptureAA.isAssumedNoCapture()) | ||||
5073 | return isCapturedIn(State, /* Memory */ false, /* Integer */ false, | ||||
5074 | /* Return */ false); | ||||
5075 | if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { | ||||
5076 | Follow = true; | ||||
5077 | return isCapturedIn(State, /* Memory */ false, /* Integer */ false, | ||||
5078 | /* Return */ false); | ||||
5079 | } | ||||
5080 | |||||
5081 | // Lastly, we could not find a reason no-capture can be assumed so we don't. | ||||
5082 | return isCapturedIn(State, /* Memory */ true, /* Integer */ true, | ||||
5083 | /* Return */ true); | ||||
5084 | } | ||||
5085 | |||||
5086 | /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and | ||||
5087 | /// \p CapturedInRet, then return true if we should continue updating the | ||||
5088 | /// state. | ||||
5089 | static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, | ||||
5090 | bool CapturedInInt, bool CapturedInRet) { | ||||
5091 | LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - captures [Mem " << CapturedInMem << "|Int " << CapturedInInt << "|Ret " << CapturedInRet << "]\n"; } } while (false ) | ||||
5092 | << CapturedInInt << "|Ret " << CapturedInRet << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << " - captures [Mem " << CapturedInMem << "|Int " << CapturedInInt << "|Ret " << CapturedInRet << "]\n"; } } while (false ); | ||||
5093 | if (CapturedInMem) | ||||
5094 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); | ||||
5095 | if (CapturedInInt) | ||||
5096 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); | ||||
5097 | if (CapturedInRet) | ||||
5098 | State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); | ||||
5099 | return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); | ||||
5100 | } | ||||
5101 | }; | ||||
5102 | |||||
5103 | ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { | ||||
5104 | const IRPosition &IRP = getIRPosition(); | ||||
5105 | Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() | ||||
5106 | : &IRP.getAssociatedValue(); | ||||
5107 | if (!V) | ||||
5108 | return indicatePessimisticFixpoint(); | ||||
5109 | |||||
5110 | const Function *F = | ||||
5111 | isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); | ||||
5112 | assert(F && "Expected a function!")(static_cast <bool> (F && "Expected a function!" ) ? void (0) : __assert_fail ("F && \"Expected a function!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5112, __extension__ __PRETTY_FUNCTION__)); | ||||
5113 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
5114 | |||||
5115 | AANoCapture::StateType T; | ||||
5116 | |||||
5117 | // Readonly means we cannot capture through memory. | ||||
5118 | bool IsKnown; | ||||
5119 | if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { | ||||
5120 | T.addKnownBits(NOT_CAPTURED_IN_MEM); | ||||
5121 | if (IsKnown) | ||||
5122 | addKnownBits(NOT_CAPTURED_IN_MEM); | ||||
5123 | } | ||||
5124 | |||||
5125 | // Make sure all returned values are different than the underlying value. | ||||
5126 | // TODO: we could do this in a more sophisticated way inside | ||||
5127 | // AAReturnedValues, e.g., track all values that escape through returns | ||||
5128 | // directly somehow. | ||||
5129 | auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { | ||||
5130 | if (!RVAA.getState().isValidState()) | ||||
5131 | return false; | ||||
5132 | bool SeenConstant = false; | ||||
5133 | for (const auto &It : RVAA.returned_values()) { | ||||
5134 | if (isa<Constant>(It.first)) { | ||||
5135 | if (SeenConstant) | ||||
5136 | return false; | ||||
5137 | SeenConstant = true; | ||||
5138 | } else if (!isa<Argument>(It.first) || | ||||
5139 | It.first == getAssociatedArgument()) | ||||
5140 | return false; | ||||
5141 | } | ||||
5142 | return true; | ||||
5143 | }; | ||||
5144 | |||||
5145 | const auto &NoUnwindAA = | ||||
5146 | A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); | ||||
5147 | if (NoUnwindAA.isAssumedNoUnwind()) { | ||||
5148 | bool IsVoidTy = F->getReturnType()->isVoidTy(); | ||||
5149 | const AAReturnedValues *RVAA = | ||||
5150 | IsVoidTy ? nullptr | ||||
5151 | : &A.getAAFor<AAReturnedValues>(*this, FnPos, | ||||
5152 | |||||
5153 | DepClassTy::OPTIONAL); | ||||
5154 | if (IsVoidTy || CheckReturnedArgs(*RVAA)) { | ||||
5155 | T.addKnownBits(NOT_CAPTURED_IN_RET); | ||||
5156 | if (T.isKnown(NOT_CAPTURED_IN_MEM)) | ||||
5157 | return ChangeStatus::UNCHANGED; | ||||
5158 | if (NoUnwindAA.isKnownNoUnwind() && | ||||
5159 | (IsVoidTy || RVAA->getState().isAtFixpoint())) { | ||||
5160 | addKnownBits(NOT_CAPTURED_IN_RET); | ||||
5161 | if (isKnown(NOT_CAPTURED_IN_MEM)) | ||||
5162 | return indicateOptimisticFixpoint(); | ||||
5163 | } | ||||
5164 | } | ||||
5165 | } | ||||
5166 | |||||
5167 | auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { | ||||
5168 | const auto &DerefAA = A.getAAFor<AADereferenceable>( | ||||
5169 | *this, IRPosition::value(*O), DepClassTy::OPTIONAL); | ||||
5170 | return DerefAA.getAssumedDereferenceableBytes(); | ||||
5171 | }; | ||||
5172 | |||||
5173 | auto UseCheck = [&](const Use &U, bool &Follow) -> bool { | ||||
5174 | switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { | ||||
5175 | case UseCaptureKind::NO_CAPTURE: | ||||
5176 | return true; | ||||
5177 | case UseCaptureKind::MAY_CAPTURE: | ||||
5178 | return checkUse(A, T, U, Follow); | ||||
5179 | case UseCaptureKind::PASSTHROUGH: | ||||
5180 | Follow = true; | ||||
5181 | return true; | ||||
5182 | } | ||||
5183 | llvm_unreachable("Unexpected use capture kind!")::llvm::llvm_unreachable_internal("Unexpected use capture kind!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5183); | ||||
5184 | }; | ||||
5185 | |||||
5186 | if (!A.checkForAllUses(UseCheck, *this, *V)) | ||||
5187 | return indicatePessimisticFixpoint(); | ||||
5188 | |||||
5189 | AANoCapture::StateType &S = getState(); | ||||
5190 | auto Assumed = S.getAssumed(); | ||||
5191 | S.intersectAssumedBits(T.getAssumed()); | ||||
5192 | if (!isAssumedNoCaptureMaybeReturned()) | ||||
5193 | return indicatePessimisticFixpoint(); | ||||
5194 | return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED | ||||
5195 | : ChangeStatus::CHANGED; | ||||
5196 | } | ||||
5197 | |||||
5198 | /// NoCapture attribute for function arguments. | ||||
5199 | struct AANoCaptureArgument final : AANoCaptureImpl { | ||||
5200 | AANoCaptureArgument(const IRPosition &IRP, Attributor &A) | ||||
5201 | : AANoCaptureImpl(IRP, A) {} | ||||
5202 | |||||
5203 | /// See AbstractAttribute::trackStatistics() | ||||
5204 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture){ static llvm::Statistic NumIRArguments_nocapture = {"attributor" , "NumIRArguments_nocapture", ("Number of " "arguments" " marked '" "nocapture" "'")};; ++(NumIRArguments_nocapture); } } | ||||
5205 | }; | ||||
5206 | |||||
5207 | /// NoCapture attribute for call site arguments. | ||||
5208 | struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { | ||||
5209 | AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
5210 | : AANoCaptureImpl(IRP, A) {} | ||||
5211 | |||||
5212 | /// See AbstractAttribute::initialize(...). | ||||
5213 | void initialize(Attributor &A) override { | ||||
5214 | if (Argument *Arg = getAssociatedArgument()) | ||||
5215 | if (Arg->hasByValAttr()) | ||||
5216 | indicateOptimisticFixpoint(); | ||||
5217 | AANoCaptureImpl::initialize(A); | ||||
5218 | } | ||||
5219 | |||||
5220 | /// See AbstractAttribute::updateImpl(...). | ||||
5221 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5222 | // TODO: Once we have call site specific value information we can provide | ||||
5223 | // call site specific liveness information and then it makes | ||||
5224 | // sense to specialize attributes for call sites arguments instead of | ||||
5225 | // redirecting requests to the callee argument. | ||||
5226 | Argument *Arg = getAssociatedArgument(); | ||||
5227 | if (!Arg) | ||||
5228 | return indicatePessimisticFixpoint(); | ||||
5229 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
5230 | auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
5231 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); | ||||
5232 | } | ||||
5233 | |||||
5234 | /// See AbstractAttribute::trackStatistics() | ||||
5235 | void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture){ static llvm::Statistic NumIRCSArguments_nocapture = {"attributor" , "NumIRCSArguments_nocapture", ("Number of " "call site arguments" " marked '" "nocapture" "'")};; ++(NumIRCSArguments_nocapture ); }}; | ||||
5236 | }; | ||||
5237 | |||||
5238 | /// NoCapture attribute for floating values. | ||||
5239 | struct AANoCaptureFloating final : AANoCaptureImpl { | ||||
5240 | AANoCaptureFloating(const IRPosition &IRP, Attributor &A) | ||||
5241 | : AANoCaptureImpl(IRP, A) {} | ||||
5242 | |||||
5243 | /// See AbstractAttribute::trackStatistics() | ||||
5244 | void trackStatistics() const override { | ||||
5245 | STATS_DECLTRACK_FLOATING_ATTR(nocapture){ static llvm::Statistic NumIRFloating_nocapture = {"attributor" , "NumIRFloating_nocapture", ("Number of floating values known to be '" "nocapture" "'")};; ++(NumIRFloating_nocapture); } | ||||
5246 | } | ||||
5247 | }; | ||||
5248 | |||||
5249 | /// NoCapture attribute for function return value. | ||||
5250 | struct AANoCaptureReturned final : AANoCaptureImpl { | ||||
5251 | AANoCaptureReturned(const IRPosition &IRP, Attributor &A) | ||||
5252 | : AANoCaptureImpl(IRP, A) { | ||||
5253 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5253); | ||||
5254 | } | ||||
5255 | |||||
5256 | /// See AbstractAttribute::initialize(...). | ||||
5257 | void initialize(Attributor &A) override { | ||||
5258 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5258); | ||||
5259 | } | ||||
5260 | |||||
5261 | /// See AbstractAttribute::updateImpl(...). | ||||
5262 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5263 | llvm_unreachable("NoCapture is not applicable to function returns!")::llvm::llvm_unreachable_internal("NoCapture is not applicable to function returns!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5263); | ||||
5264 | } | ||||
5265 | |||||
5266 | /// See AbstractAttribute::trackStatistics() | ||||
5267 | void trackStatistics() const override {} | ||||
5268 | }; | ||||
5269 | |||||
5270 | /// NoCapture attribute deduction for a call site return value. | ||||
5271 | struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { | ||||
5272 | AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
5273 | : AANoCaptureImpl(IRP, A) {} | ||||
5274 | |||||
5275 | /// See AbstractAttribute::initialize(...). | ||||
5276 | void initialize(Attributor &A) override { | ||||
5277 | const Function *F = getAnchorScope(); | ||||
5278 | // Check what state the associated function can actually capture. | ||||
5279 | determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); | ||||
5280 | } | ||||
5281 | |||||
5282 | /// See AbstractAttribute::trackStatistics() | ||||
5283 | void trackStatistics() const override { | ||||
5284 | STATS_DECLTRACK_CSRET_ATTR(nocapture){ static llvm::Statistic NumIRCSReturn_nocapture = {"attributor" , "NumIRCSReturn_nocapture", ("Number of " "call site returns" " marked '" "nocapture" "'")};; ++(NumIRCSReturn_nocapture); } | ||||
5285 | } | ||||
5286 | }; | ||||
5287 | } // namespace | ||||
5288 | |||||
5289 | /// ------------------ Value Simplify Attribute ---------------------------- | ||||
5290 | |||||
5291 | bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { | ||||
5292 | // FIXME: Add a typecast support. | ||||
5293 | SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( | ||||
5294 | SimplifiedAssociatedValue, Other, Ty); | ||||
5295 | if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) | ||||
5296 | return false; | ||||
5297 | |||||
5298 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5299 | if (SimplifiedAssociatedValue)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5300 | dbgs() << "[ValueSimplify] is assumed to be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5301 | << **SimplifiedAssociatedValue << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5302 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5303 | dbgs() << "[ValueSimplify] is assumed to be <none>\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false) | ||||
5304 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (SimplifiedAssociatedValue) dbgs() << "[ValueSimplify] is assumed to be " << **SimplifiedAssociatedValue << "\n"; else dbgs() << "[ValueSimplify] is assumed to be <none>\n" ; }; } } while (false); | ||||
5305 | return true; | ||||
5306 | } | ||||
5307 | |||||
5308 | namespace { | ||||
5309 | struct AAValueSimplifyImpl : AAValueSimplify { | ||||
5310 | AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) | ||||
5311 | : AAValueSimplify(IRP, A) {} | ||||
5312 | |||||
5313 | /// See AbstractAttribute::initialize(...). | ||||
5314 | void initialize(Attributor &A) override { | ||||
5315 | if (getAssociatedValue().getType()->isVoidTy()) | ||||
5316 | indicatePessimisticFixpoint(); | ||||
5317 | if (A.hasSimplificationCallback(getIRPosition())) | ||||
5318 | indicatePessimisticFixpoint(); | ||||
5319 | } | ||||
5320 | |||||
5321 | /// See AbstractAttribute::getAsStr(). | ||||
5322 | const std::string getAsStr() const override { | ||||
5323 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue ) dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; }; } } while (false) | ||||
5324 | dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue ) dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; }; } } while (false) | ||||
5325 | if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue ) dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; }; } } while (false) | ||||
5326 | dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue ) dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; }; } } while (false) | ||||
5327 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue ) dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; }; } } while (false); | ||||
5328 | return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") | ||||
5329 | : "not-simple"; | ||||
5330 | } | ||||
5331 | |||||
5332 | /// See AbstractAttribute::trackStatistics() | ||||
5333 | void trackStatistics() const override {} | ||||
5334 | |||||
5335 | /// See AAValueSimplify::getAssumedSimplifiedValue() | ||||
5336 | Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { | ||||
5337 | return SimplifiedAssociatedValue; | ||||
5338 | } | ||||
5339 | |||||
5340 | /// Ensure the return value is \p V with type \p Ty, if not possible return | ||||
5341 | /// nullptr. If \p Check is true we will only verify such an operation would | ||||
5342 | /// suceed and return a non-nullptr value if that is the case. No IR is | ||||
5343 | /// generated or modified. | ||||
5344 | static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI, | ||||
5345 | bool Check) { | ||||
5346 | if (auto *TypedV = AA::getWithType(V, Ty)) | ||||
5347 | return TypedV; | ||||
5348 | if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty)) | ||||
5349 | return Check ? &V | ||||
5350 | : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty, | ||||
5351 | "", CtxI); | ||||
5352 | return nullptr; | ||||
5353 | } | ||||
5354 | |||||
5355 | /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble. | ||||
5356 | /// If \p Check is true we will only verify such an operation would suceed and | ||||
5357 | /// return a non-nullptr value if that is the case. No IR is generated or | ||||
5358 | /// modified. | ||||
5359 | static Value *reproduceInst(Attributor &A, | ||||
5360 | const AbstractAttribute &QueryingAA, | ||||
5361 | Instruction &I, Type &Ty, Instruction *CtxI, | ||||
5362 | bool Check, ValueToValueMapTy &VMap) { | ||||
5363 | assert(CtxI && "Cannot reproduce an instruction without context!")(static_cast <bool> (CtxI && "Cannot reproduce an instruction without context!" ) ? void (0) : __assert_fail ("CtxI && \"Cannot reproduce an instruction without context!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5363, __extension__ __PRETTY_FUNCTION__)); | ||||
5364 | if (Check && (I.mayReadFromMemory() || | ||||
5365 | !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr, | ||||
5366 | /* TLI */ nullptr))) | ||||
5367 | return nullptr; | ||||
5368 | for (Value *Op : I.operands()) { | ||||
5369 | Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap); | ||||
5370 | if (!NewOp) { | ||||
5371 | assert(Check && "Manifest of new value unexpectedly failed!")(static_cast <bool> (Check && "Manifest of new value unexpectedly failed!" ) ? void (0) : __assert_fail ("Check && \"Manifest of new value unexpectedly failed!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5371, __extension__ __PRETTY_FUNCTION__)); | ||||
5372 | return nullptr; | ||||
5373 | } | ||||
5374 | if (!Check) | ||||
5375 | VMap[Op] = NewOp; | ||||
5376 | } | ||||
5377 | if (Check) | ||||
5378 | return &I; | ||||
5379 | |||||
5380 | Instruction *CloneI = I.clone(); | ||||
5381 | // TODO: Try to salvage debug information here. | ||||
5382 | CloneI->setDebugLoc(DebugLoc()); | ||||
5383 | VMap[&I] = CloneI; | ||||
5384 | CloneI->insertBefore(CtxI); | ||||
5385 | RemapInstruction(CloneI, VMap); | ||||
5386 | return CloneI; | ||||
5387 | } | ||||
5388 | |||||
5389 | /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble. | ||||
5390 | /// If \p Check is true we will only verify such an operation would suceed and | ||||
5391 | /// return a non-nullptr value if that is the case. No IR is generated or | ||||
5392 | /// modified. | ||||
5393 | static Value *reproduceValue(Attributor &A, | ||||
5394 | const AbstractAttribute &QueryingAA, Value &V, | ||||
5395 | Type &Ty, Instruction *CtxI, bool Check, | ||||
5396 | ValueToValueMapTy &VMap) { | ||||
5397 | if (const auto &NewV = VMap.lookup(&V)) | ||||
5398 | return NewV; | ||||
5399 | bool UsedAssumedInformation = false; | ||||
5400 | Optional<Value *> SimpleV = A.getAssumedSimplified( | ||||
5401 | V, QueryingAA, UsedAssumedInformation, AA::Interprocedural); | ||||
5402 | if (!SimpleV.has_value()) | ||||
5403 | return PoisonValue::get(&Ty); | ||||
5404 | Value *EffectiveV = &V; | ||||
5405 | if (SimpleV.value()) | ||||
5406 | EffectiveV = SimpleV.value(); | ||||
5407 | if (auto *C = dyn_cast<Constant>(EffectiveV)) | ||||
5408 | return C; | ||||
5409 | if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI), | ||||
5410 | A.getInfoCache())) | ||||
5411 | return ensureType(A, *EffectiveV, Ty, CtxI, Check); | ||||
5412 | if (auto *I = dyn_cast<Instruction>(EffectiveV)) | ||||
5413 | if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap)) | ||||
5414 | return ensureType(A, *NewV, Ty, CtxI, Check); | ||||
5415 | return nullptr; | ||||
5416 | } | ||||
5417 | |||||
5418 | /// Return a value we can use as replacement for the associated one, or | ||||
5419 | /// nullptr if we don't have one that makes sense. | ||||
5420 | Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const { | ||||
5421 | Value *NewV = SimplifiedAssociatedValue | ||||
5422 | ? SimplifiedAssociatedValue.value() | ||||
5423 | : UndefValue::get(getAssociatedType()); | ||||
5424 | if (NewV && NewV != &getAssociatedValue()) { | ||||
5425 | ValueToValueMapTy VMap; | ||||
5426 | // First verify we can reprduce the value with the required type at the | ||||
5427 | // context location before we actually start modifying the IR. | ||||
5428 | if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, | ||||
5429 | /* CheckOnly */ true, VMap)) | ||||
5430 | return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, | ||||
5431 | /* CheckOnly */ false, VMap); | ||||
5432 | } | ||||
5433 | return nullptr; | ||||
5434 | } | ||||
5435 | |||||
5436 | /// Helper function for querying AAValueSimplify and updating candidate. | ||||
5437 | /// \param IRP The value position we are trying to unify with SimplifiedValue | ||||
5438 | bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, | ||||
5439 | const IRPosition &IRP, bool Simplify = true) { | ||||
5440 | bool UsedAssumedInformation = false; | ||||
5441 | Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); | ||||
5442 | if (Simplify) | ||||
5443 | QueryingValueSimplified = A.getAssumedSimplified( | ||||
5444 | IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural); | ||||
5445 | return unionAssumed(QueryingValueSimplified); | ||||
5446 | } | ||||
5447 | |||||
5448 | /// Returns a candidate is found or not | ||||
5449 | template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { | ||||
5450 | if (!getAssociatedValue().getType()->isIntegerTy()) | ||||
5451 | return false; | ||||
5452 | |||||
5453 | // This will also pass the call base context. | ||||
5454 | const auto &AA = | ||||
5455 | A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); | ||||
5456 | |||||
5457 | Optional<Constant *> COpt = AA.getAssumedConstant(A); | ||||
5458 | |||||
5459 | if (!COpt) { | ||||
5460 | SimplifiedAssociatedValue = llvm::None; | ||||
5461 | A.recordDependence(AA, *this, DepClassTy::OPTIONAL); | ||||
5462 | return true; | ||||
5463 | } | ||||
5464 | if (auto *C = *COpt) { | ||||
5465 | SimplifiedAssociatedValue = C; | ||||
5466 | A.recordDependence(AA, *this, DepClassTy::OPTIONAL); | ||||
5467 | return true; | ||||
5468 | } | ||||
5469 | return false; | ||||
5470 | } | ||||
5471 | |||||
5472 | bool askSimplifiedValueForOtherAAs(Attributor &A) { | ||||
5473 | if (askSimplifiedValueFor<AAValueConstantRange>(A)) | ||||
5474 | return true; | ||||
5475 | if (askSimplifiedValueFor<AAPotentialConstantValues>(A)) | ||||
5476 | return true; | ||||
5477 | return false; | ||||
5478 | } | ||||
5479 | |||||
5480 | /// See AbstractAttribute::manifest(...). | ||||
5481 | ChangeStatus manifest(Attributor &A) override { | ||||
5482 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
5483 | for (auto &U : getAssociatedValue().uses()) { | ||||
5484 | // Check if we need to adjust the insertion point to make sure the IR is | ||||
5485 | // valid. | ||||
5486 | Instruction *IP = dyn_cast<Instruction>(U.getUser()); | ||||
5487 | if (auto *PHI = dyn_cast_or_null<PHINode>(IP)) | ||||
5488 | IP = PHI->getIncomingBlock(U)->getTerminator(); | ||||
5489 | if (auto *NewV = manifestReplacementValue(A, IP)) { | ||||
5490 | LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " << *NewV << " :: " << *this << "\n"; } } while (false) | ||||
5491 | << " -> " << *NewV << " :: " << *this << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " << *NewV << " :: " << *this << "\n"; } } while (false); | ||||
5492 | if (A.changeUseAfterManifest(U, *NewV)) | ||||
5493 | Changed = ChangeStatus::CHANGED; | ||||
5494 | } | ||||
5495 | } | ||||
5496 | |||||
5497 | return Changed | AAValueSimplify::manifest(A); | ||||
5498 | } | ||||
5499 | |||||
5500 | /// See AbstractState::indicatePessimisticFixpoint(...). | ||||
5501 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
5502 | SimplifiedAssociatedValue = &getAssociatedValue(); | ||||
5503 | return AAValueSimplify::indicatePessimisticFixpoint(); | ||||
5504 | } | ||||
5505 | }; | ||||
5506 | |||||
5507 | struct AAValueSimplifyArgument final : AAValueSimplifyImpl { | ||||
5508 | AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) | ||||
5509 | : AAValueSimplifyImpl(IRP, A) {} | ||||
5510 | |||||
5511 | void initialize(Attributor &A) override { | ||||
5512 | AAValueSimplifyImpl::initialize(A); | ||||
5513 | if (!getAnchorScope() || getAnchorScope()->isDeclaration()) | ||||
5514 | indicatePessimisticFixpoint(); | ||||
5515 | if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, | ||||
5516 | Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, | ||||
5517 | /* IgnoreSubsumingPositions */ true)) | ||||
5518 | indicatePessimisticFixpoint(); | ||||
5519 | } | ||||
5520 | |||||
5521 | /// See AbstractAttribute::updateImpl(...). | ||||
5522 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5523 | // Byval is only replacable if it is readonly otherwise we would write into | ||||
5524 | // the replaced value and not the copy that byval creates implicitly. | ||||
5525 | Argument *Arg = getAssociatedArgument(); | ||||
5526 | if (Arg->hasByValAttr()) { | ||||
5527 | // TODO: We probably need to verify synchronization is not an issue, e.g., | ||||
5528 | // there is no race by not copying a constant byval. | ||||
5529 | bool IsKnown; | ||||
5530 | if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) | ||||
5531 | return indicatePessimisticFixpoint(); | ||||
5532 | } | ||||
5533 | |||||
5534 | auto Before = SimplifiedAssociatedValue; | ||||
5535 | |||||
5536 | auto PredForCallSite = [&](AbstractCallSite ACS) { | ||||
5537 | const IRPosition &ACSArgPos = | ||||
5538 | IRPosition::callsite_argument(ACS, getCallSiteArgNo()); | ||||
5539 | // Check if a coresponding argument was found or if it is on not | ||||
5540 | // associated (which can happen for callback calls). | ||||
5541 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) | ||||
5542 | return false; | ||||
5543 | |||||
5544 | // Simplify the argument operand explicitly and check if the result is | ||||
5545 | // valid in the current scope. This avoids refering to simplified values | ||||
5546 | // in other functions, e.g., we don't want to say a an argument in a | ||||
5547 | // static function is actually an argument in a different function. | ||||
5548 | bool UsedAssumedInformation = false; | ||||
5549 | Optional<Constant *> SimpleArgOp = | ||||
5550 | A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); | ||||
5551 | if (!SimpleArgOp) | ||||
5552 | return true; | ||||
5553 | if (!SimpleArgOp.value()) | ||||
5554 | return false; | ||||
5555 | if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) | ||||
5556 | return false; | ||||
5557 | return unionAssumed(*SimpleArgOp); | ||||
5558 | }; | ||||
5559 | |||||
5560 | // Generate a answer specific to a call site context. | ||||
5561 | bool Success; | ||||
5562 | bool UsedAssumedInformation = false; | ||||
5563 | if (hasCallBaseContext() && | ||||
5564 | getCallBaseContext()->getCalledFunction() == Arg->getParent()) | ||||
5565 | Success = PredForCallSite( | ||||
5566 | AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); | ||||
5567 | else | ||||
5568 | Success = A.checkForAllCallSites(PredForCallSite, *this, true, | ||||
5569 | UsedAssumedInformation); | ||||
5570 | |||||
5571 | if (!Success) | ||||
5572 | if (!askSimplifiedValueForOtherAAs(A)) | ||||
5573 | return indicatePessimisticFixpoint(); | ||||
5574 | |||||
5575 | // If a candidate was found in this update, return CHANGED. | ||||
5576 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED | ||||
5577 | : ChangeStatus ::CHANGED; | ||||
5578 | } | ||||
5579 | |||||
5580 | /// See AbstractAttribute::trackStatistics() | ||||
5581 | void trackStatistics() const override { | ||||
5582 | STATS_DECLTRACK_ARG_ATTR(value_simplify){ static llvm::Statistic NumIRArguments_value_simplify = {"attributor" , "NumIRArguments_value_simplify", ("Number of " "arguments" " marked '" "value_simplify" "'")};; ++(NumIRArguments_value_simplify); } | ||||
5583 | } | ||||
5584 | }; | ||||
5585 | |||||
5586 | struct AAValueSimplifyReturned : AAValueSimplifyImpl { | ||||
5587 | AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) | ||||
5588 | : AAValueSimplifyImpl(IRP, A) {} | ||||
5589 | |||||
5590 | /// See AAValueSimplify::getAssumedSimplifiedValue() | ||||
5591 | Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { | ||||
5592 | if (!isValidState()) | ||||
5593 | return nullptr; | ||||
5594 | return SimplifiedAssociatedValue; | ||||
5595 | } | ||||
5596 | |||||
5597 | /// See AbstractAttribute::updateImpl(...). | ||||
5598 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5599 | auto Before = SimplifiedAssociatedValue; | ||||
5600 | |||||
5601 | auto ReturnInstCB = [&](Instruction &I) { | ||||
5602 | auto &RI = cast<ReturnInst>(I); | ||||
5603 | return checkAndUpdate( | ||||
5604 | A, *this, | ||||
5605 | IRPosition::value(*RI.getReturnValue(), getCallBaseContext())); | ||||
5606 | }; | ||||
5607 | |||||
5608 | bool UsedAssumedInformation = false; | ||||
5609 | if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, | ||||
5610 | UsedAssumedInformation)) | ||||
5611 | if (!askSimplifiedValueForOtherAAs(A)) | ||||
5612 | return indicatePessimisticFixpoint(); | ||||
5613 | |||||
5614 | // If a candidate was found in this update, return CHANGED. | ||||
5615 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED | ||||
5616 | : ChangeStatus ::CHANGED; | ||||
5617 | } | ||||
5618 | |||||
5619 | ChangeStatus manifest(Attributor &A) override { | ||||
5620 | // We queried AAValueSimplify for the returned values so they will be | ||||
5621 | // replaced if a simplified form was found. Nothing to do here. | ||||
5622 | return ChangeStatus::UNCHANGED; | ||||
5623 | } | ||||
5624 | |||||
5625 | /// See AbstractAttribute::trackStatistics() | ||||
5626 | void trackStatistics() const override { | ||||
5627 | STATS_DECLTRACK_FNRET_ATTR(value_simplify){ static llvm::Statistic NumIRFunctionReturn_value_simplify = {"attributor", "NumIRFunctionReturn_value_simplify", ("Number of " "function returns" " marked '" "value_simplify" "'")};; ++(NumIRFunctionReturn_value_simplify ); } | ||||
5628 | } | ||||
5629 | }; | ||||
5630 | |||||
5631 | struct AAValueSimplifyFloating : AAValueSimplifyImpl { | ||||
5632 | AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) | ||||
5633 | : AAValueSimplifyImpl(IRP, A) {} | ||||
5634 | |||||
5635 | /// See AbstractAttribute::initialize(...). | ||||
5636 | void initialize(Attributor &A) override { | ||||
5637 | AAValueSimplifyImpl::initialize(A); | ||||
5638 | Value &V = getAnchorValue(); | ||||
5639 | |||||
5640 | // TODO: add other stuffs | ||||
5641 | if (isa<Constant>(V)) | ||||
5642 | indicatePessimisticFixpoint(); | ||||
5643 | } | ||||
5644 | |||||
5645 | /// See AbstractAttribute::updateImpl(...). | ||||
5646 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5647 | auto Before = SimplifiedAssociatedValue; | ||||
5648 | if (!askSimplifiedValueForOtherAAs(A)) | ||||
5649 | return indicatePessimisticFixpoint(); | ||||
5650 | |||||
5651 | // If a candidate was found in this update, return CHANGED. | ||||
5652 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED | ||||
5653 | : ChangeStatus ::CHANGED; | ||||
5654 | } | ||||
5655 | |||||
5656 | /// See AbstractAttribute::trackStatistics() | ||||
5657 | void trackStatistics() const override { | ||||
5658 | STATS_DECLTRACK_FLOATING_ATTR(value_simplify){ static llvm::Statistic NumIRFloating_value_simplify = {"attributor" , "NumIRFloating_value_simplify", ("Number of floating values known to be '" "value_simplify" "'")};; ++(NumIRFloating_value_simplify); } | ||||
5659 | } | ||||
5660 | }; | ||||
5661 | |||||
5662 | struct AAValueSimplifyFunction : AAValueSimplifyImpl { | ||||
5663 | AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) | ||||
5664 | : AAValueSimplifyImpl(IRP, A) {} | ||||
5665 | |||||
5666 | /// See AbstractAttribute::initialize(...). | ||||
5667 | void initialize(Attributor &A) override { | ||||
5668 | SimplifiedAssociatedValue = nullptr; | ||||
5669 | indicateOptimisticFixpoint(); | ||||
5670 | } | ||||
5671 | /// See AbstractAttribute::initialize(...). | ||||
5672 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5673 | llvm_unreachable(::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5674) | ||||
5674 | "AAValueSimplify(Function|CallSite)::updateImpl will not be called")::llvm::llvm_unreachable_internal("AAValueSimplify(Function|CallSite)::updateImpl will not be called" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5674); | ||||
5675 | } | ||||
5676 | /// See AbstractAttribute::trackStatistics() | ||||
5677 | void trackStatistics() const override { | ||||
5678 | STATS_DECLTRACK_FN_ATTR(value_simplify){ static llvm::Statistic NumIRFunction_value_simplify = {"attributor" , "NumIRFunction_value_simplify", ("Number of " "functions" " marked '" "value_simplify" "'")};; ++(NumIRFunction_value_simplify); } | ||||
5679 | } | ||||
5680 | }; | ||||
5681 | |||||
5682 | struct AAValueSimplifyCallSite : AAValueSimplifyFunction { | ||||
5683 | AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) | ||||
5684 | : AAValueSimplifyFunction(IRP, A) {} | ||||
5685 | /// See AbstractAttribute::trackStatistics() | ||||
5686 | void trackStatistics() const override { | ||||
5687 | STATS_DECLTRACK_CS_ATTR(value_simplify){ static llvm::Statistic NumIRCS_value_simplify = {"attributor" , "NumIRCS_value_simplify", ("Number of " "call site" " marked '" "value_simplify" "'")};; ++(NumIRCS_value_simplify); } | ||||
5688 | } | ||||
5689 | }; | ||||
5690 | |||||
5691 | struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { | ||||
5692 | AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
5693 | : AAValueSimplifyImpl(IRP, A) {} | ||||
5694 | |||||
5695 | void initialize(Attributor &A) override { | ||||
5696 | AAValueSimplifyImpl::initialize(A); | ||||
5697 | Function *Fn = getAssociatedFunction(); | ||||
5698 | if (!Fn) { | ||||
5699 | indicatePessimisticFixpoint(); | ||||
5700 | return; | ||||
5701 | } | ||||
5702 | for (Argument &Arg : Fn->args()) { | ||||
5703 | if (Arg.hasReturnedAttr()) { | ||||
5704 | auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()), | ||||
5705 | Arg.getArgNo()); | ||||
5706 | if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && | ||||
5707 | checkAndUpdate(A, *this, IRP)) | ||||
5708 | indicateOptimisticFixpoint(); | ||||
5709 | else | ||||
5710 | indicatePessimisticFixpoint(); | ||||
5711 | return; | ||||
5712 | } | ||||
5713 | } | ||||
5714 | } | ||||
5715 | |||||
5716 | /// See AbstractAttribute::updateImpl(...). | ||||
5717 | ChangeStatus updateImpl(Attributor &A) override { | ||||
5718 | auto Before = SimplifiedAssociatedValue; | ||||
5719 | auto &RetAA = A.getAAFor<AAReturnedValues>( | ||||
5720 | *this, IRPosition::function(*getAssociatedFunction()), | ||||
5721 | DepClassTy::REQUIRED); | ||||
5722 | auto PredForReturned = | ||||
5723 | [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { | ||||
5724 | bool UsedAssumedInformation = false; | ||||
5725 | Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( | ||||
5726 | &RetVal, *cast<CallBase>(getCtxI()), *this, | ||||
5727 | UsedAssumedInformation); | ||||
5728 | SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( | ||||
5729 | SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); | ||||
5730 | return SimplifiedAssociatedValue != Optional<Value *>(nullptr); | ||||
5731 | }; | ||||
5732 | if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) | ||||
5733 | if (!askSimplifiedValueForOtherAAs(A)) | ||||
5734 | return indicatePessimisticFixpoint(); | ||||
5735 | return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED | ||||
5736 | : ChangeStatus ::CHANGED; | ||||
5737 | } | ||||
5738 | |||||
5739 | void trackStatistics() const override { | ||||
5740 | STATS_DECLTRACK_CSRET_ATTR(value_simplify){ static llvm::Statistic NumIRCSReturn_value_simplify = {"attributor" , "NumIRCSReturn_value_simplify", ("Number of " "call site returns" " marked '" "value_simplify" "'")};; ++(NumIRCSReturn_value_simplify ); } | ||||
5741 | } | ||||
5742 | }; | ||||
5743 | |||||
5744 | struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { | ||||
5745 | AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
5746 | : AAValueSimplifyFloating(IRP, A) {} | ||||
5747 | |||||
5748 | /// See AbstractAttribute::manifest(...). | ||||
5749 | ChangeStatus manifest(Attributor &A) override { | ||||
5750 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
5751 | // TODO: We should avoid simplification duplication to begin with. | ||||
5752 | auto *FloatAA = A.lookupAAFor<AAValueSimplify>( | ||||
5753 | IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE); | ||||
5754 | if (FloatAA && FloatAA->getState().isValidState()) | ||||
5755 | return Changed; | ||||
5756 | |||||
5757 | if (auto *NewV = manifestReplacementValue(A, getCtxI())) { | ||||
5758 | Use &U = cast<CallBase>(&getAnchorValue()) | ||||
5759 | ->getArgOperandUse(getCallSiteArgNo()); | ||||
5760 | if (A.changeUseAfterManifest(U, *NewV)) | ||||
5761 | Changed = ChangeStatus::CHANGED; | ||||
5762 | } | ||||
5763 | |||||
5764 | return Changed | AAValueSimplify::manifest(A); | ||||
5765 | } | ||||
5766 | |||||
5767 | void trackStatistics() const override { | ||||
5768 | STATS_DECLTRACK_CSARG_ATTR(value_simplify){ static llvm::Statistic NumIRCSArguments_value_simplify = {"attributor" , "NumIRCSArguments_value_simplify", ("Number of " "call site arguments" " marked '" "value_simplify" "'")};; ++(NumIRCSArguments_value_simplify ); } | ||||
5769 | } | ||||
5770 | }; | ||||
5771 | } // namespace | ||||
5772 | |||||
5773 | /// ----------------------- Heap-To-Stack Conversion --------------------------- | ||||
5774 | namespace { | ||||
5775 | struct AAHeapToStackFunction final : public AAHeapToStack { | ||||
5776 | |||||
5777 | struct AllocationInfo { | ||||
5778 | /// The call that allocates the memory. | ||||
5779 | CallBase *const CB; | ||||
5780 | |||||
5781 | /// The library function id for the allocation. | ||||
5782 | LibFunc LibraryFunctionId = NotLibFunc; | ||||
5783 | |||||
5784 | /// The status wrt. a rewrite. | ||||
5785 | enum { | ||||
5786 | STACK_DUE_TO_USE, | ||||
5787 | STACK_DUE_TO_FREE, | ||||
5788 | INVALID, | ||||
5789 | } Status = STACK_DUE_TO_USE; | ||||
5790 | |||||
5791 | /// Flag to indicate if we encountered a use that might free this allocation | ||||
5792 | /// but which is not in the deallocation infos. | ||||
5793 | bool HasPotentiallyFreeingUnknownUses = false; | ||||
5794 | |||||
5795 | /// Flag to indicate that we should place the new alloca in the function | ||||
5796 | /// entry block rather than where the call site (CB) is. | ||||
5797 | bool MoveAllocaIntoEntry = true; | ||||
5798 | |||||
5799 | /// The set of free calls that use this allocation. | ||||
5800 | SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; | ||||
5801 | }; | ||||
5802 | |||||
5803 | struct DeallocationInfo { | ||||
5804 | /// The call that deallocates the memory. | ||||
5805 | CallBase *const CB; | ||||
5806 | /// The value freed by the call. | ||||
5807 | Value *FreedOp; | ||||
5808 | |||||
5809 | /// Flag to indicate if we don't know all objects this deallocation might | ||||
5810 | /// free. | ||||
5811 | bool MightFreeUnknownObjects = false; | ||||
5812 | |||||
5813 | /// The set of allocation calls that are potentially freed. | ||||
5814 | SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; | ||||
5815 | }; | ||||
5816 | |||||
5817 | AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) | ||||
5818 | : AAHeapToStack(IRP, A) {} | ||||
5819 | |||||
5820 | ~AAHeapToStackFunction() { | ||||
5821 | // Ensure we call the destructor so we release any memory allocated in the | ||||
5822 | // sets. | ||||
5823 | for (auto &It : AllocationInfos) | ||||
5824 | It.second->~AllocationInfo(); | ||||
5825 | for (auto &It : DeallocationInfos) | ||||
5826 | It.second->~DeallocationInfo(); | ||||
5827 | } | ||||
5828 | |||||
5829 | void initialize(Attributor &A) override { | ||||
5830 | AAHeapToStack::initialize(A); | ||||
5831 | |||||
5832 | const Function *F = getAnchorScope(); | ||||
5833 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); | ||||
5834 | |||||
5835 | auto AllocationIdentifierCB = [&](Instruction &I) { | ||||
5836 | CallBase *CB = dyn_cast<CallBase>(&I); | ||||
5837 | if (!CB) | ||||
5838 | return true; | ||||
5839 | if (Value *FreedOp = getFreedOperand(CB, TLI)) { | ||||
5840 | DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp}; | ||||
5841 | return true; | ||||
5842 | } | ||||
5843 | // To do heap to stack, we need to know that the allocation itself is | ||||
5844 | // removable once uses are rewritten, and that we can initialize the | ||||
5845 | // alloca to the same pattern as the original allocation result. | ||||
5846 | if (isRemovableAlloc(CB, TLI)) { | ||||
5847 | auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); | ||||
5848 | if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { | ||||
5849 | AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; | ||||
5850 | AllocationInfos[CB] = AI; | ||||
5851 | if (TLI) | ||||
5852 | TLI->getLibFunc(*CB, AI->LibraryFunctionId); | ||||
5853 | } | ||||
5854 | } | ||||
5855 | return true; | ||||
5856 | }; | ||||
5857 | |||||
5858 | bool UsedAssumedInformation = false; | ||||
5859 | bool Success = A.checkForAllCallLikeInstructions( | ||||
5860 | AllocationIdentifierCB, *this, UsedAssumedInformation, | ||||
5861 | /* CheckBBLivenessOnly */ false, | ||||
5862 | /* CheckPotentiallyDead */ true); | ||||
5863 | (void)Success; | ||||
5864 | assert(Success && "Did not expect the call base visit callback to fail!")(static_cast <bool> (Success && "Did not expect the call base visit callback to fail!" ) ? void (0) : __assert_fail ("Success && \"Did not expect the call base visit callback to fail!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5864, __extension__ __PRETTY_FUNCTION__)); | ||||
5865 | |||||
5866 | Attributor::SimplifictionCallbackTy SCB = | ||||
5867 | [](const IRPosition &, const AbstractAttribute *, | ||||
5868 | bool &) -> Optional<Value *> { return nullptr; }; | ||||
5869 | for (const auto &It : AllocationInfos) | ||||
5870 | A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), | ||||
5871 | SCB); | ||||
5872 | for (const auto &It : DeallocationInfos) | ||||
5873 | A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), | ||||
5874 | SCB); | ||||
5875 | } | ||||
5876 | |||||
5877 | const std::string getAsStr() const override { | ||||
5878 | unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; | ||||
5879 | for (const auto &It : AllocationInfos) { | ||||
5880 | if (It.second->Status == AllocationInfo::INVALID) | ||||
5881 | ++NumInvalidMallocs; | ||||
5882 | else | ||||
5883 | ++NumH2SMallocs; | ||||
5884 | } | ||||
5885 | return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + | ||||
5886 | std::to_string(NumInvalidMallocs); | ||||
5887 | } | ||||
5888 | |||||
5889 | /// See AbstractAttribute::trackStatistics(). | ||||
5890 | void trackStatistics() const override { | ||||
5891 | STATS_DECL(static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };; | ||||
5892 | MallocCalls, Function,static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };; | ||||
5893 | "Number of malloc/calloc/aligned_alloc calls converted to allocas")static llvm::Statistic NumIRFunction_MallocCalls = {"attributor" , "NumIRFunction_MallocCalls", "Number of malloc/calloc/aligned_alloc calls converted to allocas" };;; | ||||
5894 | for (const auto &It : AllocationInfos) | ||||
5895 | if (It.second->Status != AllocationInfo::INVALID) | ||||
5896 | ++BUILD_STAT_NAME(MallocCalls, Function)NumIRFunction_MallocCalls; | ||||
5897 | } | ||||
5898 | |||||
5899 | bool isAssumedHeapToStack(const CallBase &CB) const override { | ||||
5900 | if (isValidState()) | ||||
5901 | if (AllocationInfo *AI = | ||||
5902 | AllocationInfos.lookup(const_cast<CallBase *>(&CB))) | ||||
5903 | return AI->Status != AllocationInfo::INVALID; | ||||
5904 | return false; | ||||
5905 | } | ||||
5906 | |||||
5907 | bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { | ||||
5908 | if (!isValidState()) | ||||
5909 | return false; | ||||
5910 | |||||
5911 | for (const auto &It : AllocationInfos) { | ||||
5912 | AllocationInfo &AI = *It.second; | ||||
5913 | if (AI.Status == AllocationInfo::INVALID) | ||||
5914 | continue; | ||||
5915 | |||||
5916 | if (AI.PotentialFreeCalls.count(&CB)) | ||||
5917 | return true; | ||||
5918 | } | ||||
5919 | |||||
5920 | return false; | ||||
5921 | } | ||||
5922 | |||||
5923 | ChangeStatus manifest(Attributor &A) override { | ||||
5924 | assert(getState().isValidState() &&(static_cast <bool> (getState().isValidState() && "Attempted to manifest an invalid state!") ? void (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5925, __extension__ __PRETTY_FUNCTION__)) | ||||
5925 | "Attempted to manifest an invalid state!")(static_cast <bool> (getState().isValidState() && "Attempted to manifest an invalid state!") ? void (0) : __assert_fail ("getState().isValidState() && \"Attempted to manifest an invalid state!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5925, __extension__ __PRETTY_FUNCTION__)); | ||||
5926 | |||||
5927 | ChangeStatus HasChanged = ChangeStatus::UNCHANGED; | ||||
5928 | Function *F = getAnchorScope(); | ||||
5929 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); | ||||
5930 | |||||
5931 | for (auto &It : AllocationInfos) { | ||||
5932 | AllocationInfo &AI = *It.second; | ||||
5933 | if (AI.Status == AllocationInfo::INVALID) | ||||
5934 | continue; | ||||
5935 | |||||
5936 | for (CallBase *FreeCall : AI.PotentialFreeCalls) { | ||||
5937 | LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"; } } while (false); | ||||
5938 | A.deleteAfterManifest(*FreeCall); | ||||
5939 | HasChanged = ChangeStatus::CHANGED; | ||||
5940 | } | ||||
5941 | |||||
5942 | LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing malloc-like call: " << *AI.CB << "\n"; } } while (false) | ||||
5943 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "H2S: Removing malloc-like call: " << *AI.CB << "\n"; } } while (false); | ||||
5944 | |||||
5945 | auto Remark = [&](OptimizationRemark OR) { | ||||
5946 | LibFunc IsAllocShared; | ||||
5947 | if (TLI->getLibFunc(*AI.CB, IsAllocShared)) | ||||
5948 | if (IsAllocShared == LibFunc___kmpc_alloc_shared) | ||||
5949 | return OR << "Moving globalized variable to the stack."; | ||||
5950 | return OR << "Moving memory allocation from the heap to the stack."; | ||||
5951 | }; | ||||
5952 | if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) | ||||
5953 | A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); | ||||
5954 | else | ||||
5955 | A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); | ||||
5956 | |||||
5957 | const DataLayout &DL = A.getInfoCache().getDL(); | ||||
5958 | Value *Size; | ||||
5959 | Optional<APInt> SizeAPI = getSize(A, *this, AI); | ||||
5960 | if (SizeAPI) { | ||||
5961 | Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); | ||||
5962 | } else { | ||||
5963 | LLVMContext &Ctx = AI.CB->getContext(); | ||||
5964 | ObjectSizeOpts Opts; | ||||
5965 | ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); | ||||
5966 | SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); | ||||
5967 | assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&(static_cast <bool> (SizeOffsetPair != ObjectSizeOffsetEvaluator ::unknown() && cast<ConstantInt>(SizeOffsetPair .second)->isZero()) ? void (0) : __assert_fail ("SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && cast<ConstantInt>(SizeOffsetPair.second)->isZero()" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5968, __extension__ __PRETTY_FUNCTION__)) | ||||
5968 | cast<ConstantInt>(SizeOffsetPair.second)->isZero())(static_cast <bool> (SizeOffsetPair != ObjectSizeOffsetEvaluator ::unknown() && cast<ConstantInt>(SizeOffsetPair .second)->isZero()) ? void (0) : __assert_fail ("SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && cast<ConstantInt>(SizeOffsetPair.second)->isZero()" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5968, __extension__ __PRETTY_FUNCTION__)); | ||||
5969 | Size = SizeOffsetPair.first; | ||||
5970 | } | ||||
5971 | |||||
5972 | Instruction *IP = | ||||
5973 | AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB; | ||||
5974 | |||||
5975 | Align Alignment(1); | ||||
5976 | if (MaybeAlign RetAlign = AI.CB->getRetAlign()) | ||||
5977 | Alignment = std::max(Alignment, *RetAlign); | ||||
5978 | if (Value *Align = getAllocAlignment(AI.CB, TLI)) { | ||||
5979 | Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); | ||||
5980 | assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&(static_cast <bool> (AlignmentAPI && AlignmentAPI .value().getZExtValue() > 0 && "Expected an alignment during manifest!" ) ? void (0) : __assert_fail ("AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 && \"Expected an alignment during manifest!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5981, __extension__ __PRETTY_FUNCTION__)) | ||||
5981 | "Expected an alignment during manifest!")(static_cast <bool> (AlignmentAPI && AlignmentAPI .value().getZExtValue() > 0 && "Expected an alignment during manifest!" ) ? void (0) : __assert_fail ("AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 && \"Expected an alignment during manifest!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5981, __extension__ __PRETTY_FUNCTION__)); | ||||
5982 | Alignment = std::max( | ||||
5983 | Alignment, assumeAligned(AlignmentAPI.value().getZExtValue())); | ||||
5984 | } | ||||
5985 | |||||
5986 | // TODO: Hoist the alloca towards the function entry. | ||||
5987 | unsigned AS = DL.getAllocaAddrSpace(); | ||||
5988 | Instruction *Alloca = | ||||
5989 | new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, | ||||
5990 | AI.CB->getName() + ".h2s", IP); | ||||
5991 | |||||
5992 | if (Alloca->getType() != AI.CB->getType()) | ||||
5993 | Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( | ||||
5994 | Alloca, AI.CB->getType(), "malloc_cast", AI.CB); | ||||
5995 | |||||
5996 | auto *I8Ty = Type::getInt8Ty(F->getContext()); | ||||
5997 | auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); | ||||
5998 | assert(InitVal &&(static_cast <bool> (InitVal && "Must be able to materialize initial memory state of allocation" ) ? void (0) : __assert_fail ("InitVal && \"Must be able to materialize initial memory state of allocation\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5999, __extension__ __PRETTY_FUNCTION__)) | ||||
5999 | "Must be able to materialize initial memory state of allocation")(static_cast <bool> (InitVal && "Must be able to materialize initial memory state of allocation" ) ? void (0) : __assert_fail ("InitVal && \"Must be able to materialize initial memory state of allocation\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 5999, __extension__ __PRETTY_FUNCTION__)); | ||||
6000 | |||||
6001 | A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca); | ||||
6002 | |||||
6003 | if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { | ||||
6004 | auto *NBB = II->getNormalDest(); | ||||
6005 | BranchInst::Create(NBB, AI.CB->getParent()); | ||||
6006 | A.deleteAfterManifest(*AI.CB); | ||||
6007 | } else { | ||||
6008 | A.deleteAfterManifest(*AI.CB); | ||||
6009 | } | ||||
6010 | |||||
6011 | // Initialize the alloca with the same value as used by the allocation | ||||
6012 | // function. We can skip undef as the initial value of an alloc is | ||||
6013 | // undef, and the memset would simply end up being DSEd. | ||||
6014 | if (!isa<UndefValue>(InitVal)) { | ||||
6015 | IRBuilder<> Builder(Alloca->getNextNode()); | ||||
6016 | // TODO: Use alignment above if align!=1 | ||||
6017 | Builder.CreateMemSet(Alloca, InitVal, Size, None); | ||||
6018 | } | ||||
6019 | HasChanged = ChangeStatus::CHANGED; | ||||
6020 | } | ||||
6021 | |||||
6022 | return HasChanged; | ||||
6023 | } | ||||
6024 | |||||
6025 | Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, | ||||
6026 | Value &V) { | ||||
6027 | bool UsedAssumedInformation = false; | ||||
6028 | Optional<Constant *> SimpleV = | ||||
6029 | A.getAssumedConstant(V, AA, UsedAssumedInformation); | ||||
6030 | if (!SimpleV) | ||||
6031 | return APInt(64, 0); | ||||
6032 | if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value())) | ||||
6033 | return CI->getValue(); | ||||
6034 | return llvm::None; | ||||
6035 | } | ||||
6036 | |||||
6037 | Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, | ||||
6038 | AllocationInfo &AI) { | ||||
6039 | auto Mapper = [&](const Value *V) -> const Value * { | ||||
6040 | bool UsedAssumedInformation = false; | ||||
6041 | if (Optional<Constant *> SimpleV = | ||||
6042 | A.getAssumedConstant(*V, AA, UsedAssumedInformation)) | ||||
6043 | if (*SimpleV) | ||||
6044 | return *SimpleV; | ||||
6045 | return V; | ||||
6046 | }; | ||||
6047 | |||||
6048 | const Function *F = getAnchorScope(); | ||||
6049 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); | ||||
6050 | return getAllocSize(AI.CB, TLI, Mapper); | ||||
6051 | } | ||||
6052 | |||||
6053 | /// Collection of all malloc-like calls in a function with associated | ||||
6054 | /// information. | ||||
6055 | MapVector<CallBase *, AllocationInfo *> AllocationInfos; | ||||
6056 | |||||
6057 | /// Collection of all free-like calls in a function with associated | ||||
6058 | /// information. | ||||
6059 | MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; | ||||
6060 | |||||
6061 | ChangeStatus updateImpl(Attributor &A) override; | ||||
6062 | }; | ||||
6063 | |||||
6064 | ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { | ||||
6065 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
6066 | const Function *F = getAnchorScope(); | ||||
6067 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); | ||||
6068 | |||||
6069 | const auto &LivenessAA = | ||||
6070 | A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); | ||||
6071 | |||||
6072 | MustBeExecutedContextExplorer &Explorer = | ||||
6073 | A.getInfoCache().getMustBeExecutedContextExplorer(); | ||||
6074 | |||||
6075 | bool StackIsAccessibleByOtherThreads = | ||||
6076 | A.getInfoCache().stackIsAccessibleByOtherThreads(); | ||||
6077 | |||||
6078 | LoopInfo *LI = | ||||
6079 | A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F); | ||||
6080 | Optional<bool> MayContainIrreducibleControl; | ||||
6081 | auto IsInLoop = [&](BasicBlock &BB) { | ||||
6082 | if (&F->getEntryBlock() == &BB) | ||||
6083 | return false; | ||||
6084 | if (!MayContainIrreducibleControl.has_value()) | ||||
6085 | MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI); | ||||
6086 | if (MayContainIrreducibleControl.value()) | ||||
6087 | return true; | ||||
6088 | if (!LI) | ||||
6089 | return true; | ||||
6090 | return LI->getLoopFor(&BB) != nullptr; | ||||
6091 | }; | ||||
6092 | |||||
6093 | // Flag to ensure we update our deallocation information at most once per | ||||
6094 | // updateImpl call and only if we use the free check reasoning. | ||||
6095 | bool HasUpdatedFrees = false; | ||||
6096 | |||||
6097 | auto UpdateFrees = [&]() { | ||||
6098 | HasUpdatedFrees = true; | ||||
6099 | |||||
6100 | for (auto &It : DeallocationInfos) { | ||||
6101 | DeallocationInfo &DI = *It.second; | ||||
6102 | // For now we cannot use deallocations that have unknown inputs, skip | ||||
6103 | // them. | ||||
6104 | if (DI.MightFreeUnknownObjects) | ||||
6105 | continue; | ||||
6106 | |||||
6107 | // No need to analyze dead calls, ignore them instead. | ||||
6108 | bool UsedAssumedInformation = false; | ||||
6109 | if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, | ||||
6110 | /* CheckBBLivenessOnly */ true)) | ||||
6111 | continue; | ||||
6112 | |||||
6113 | // Use the non-optimistic version to get the freed object. | ||||
6114 | Value *Obj = getUnderlyingObject(DI.FreedOp); | ||||
6115 | if (!Obj) { | ||||
6116 | LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Unknown underlying object for free!\n" ; } } while (false); | ||||
6117 | DI.MightFreeUnknownObjects = true; | ||||
6118 | continue; | ||||
6119 | } | ||||
6120 | |||||
6121 | // Free of null and undef can be ignored as no-ops (or UB in the latter | ||||
6122 | // case). | ||||
6123 | if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) | ||||
6124 | continue; | ||||
6125 | |||||
6126 | CallBase *ObjCB = dyn_cast<CallBase>(Obj); | ||||
6127 | if (!ObjCB) { | ||||
6128 | LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Objdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Free of a non-call object: " << *Obj << "\n"; } } while (false) | ||||
6129 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Free of a non-call object: " << *Obj << "\n"; } } while (false); | ||||
6130 | DI.MightFreeUnknownObjects = true; | ||||
6131 | continue; | ||||
6132 | } | ||||
6133 | |||||
6134 | AllocationInfo *AI = AllocationInfos.lookup(ObjCB); | ||||
6135 | if (!AI) { | ||||
6136 | LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Objdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Free of a non-allocation object: " << *Obj << "\n"; } } while (false) | ||||
6137 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Free of a non-allocation object: " << *Obj << "\n"; } } while (false); | ||||
6138 | DI.MightFreeUnknownObjects = true; | ||||
6139 | continue; | ||||
6140 | } | ||||
6141 | |||||
6142 | DI.PotentialAllocationCalls.insert(ObjCB); | ||||
6143 | } | ||||
6144 | }; | ||||
6145 | |||||
6146 | auto FreeCheck = [&](AllocationInfo &AI) { | ||||
6147 | // If the stack is not accessible by other threads, the "must-free" logic | ||||
6148 | // doesn't apply as the pointer could be shared and needs to be places in | ||||
6149 | // "shareable" memory. | ||||
6150 | if (!StackIsAccessibleByOtherThreads) { | ||||
6151 | auto &NoSyncAA = | ||||
6152 | A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); | ||||
6153 | if (!NoSyncAA.isAssumedNoSync()) { | ||||
6154 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by " "other threads and function is not nosync:\n"; } } while (false ) | ||||
6155 | dbgs() << "[H2S] found an escaping use, stack is not accessible by "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by " "other threads and function is not nosync:\n"; } } while (false ) | ||||
6156 | "other threads and function is not nosync:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] found an escaping use, stack is not accessible by " "other threads and function is not nosync:\n"; } } while (false ); | ||||
6157 | return false; | ||||
6158 | } | ||||
6159 | } | ||||
6160 | if (!HasUpdatedFrees) | ||||
6161 | UpdateFrees(); | ||||
6162 | |||||
6163 | // TODO: Allow multi exit functions that have different free calls. | ||||
6164 | if (AI.PotentialFreeCalls.size() != 1) { | ||||
6165 | LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] did not find one free call but " << AI.PotentialFreeCalls.size() << "\n"; } } while (false) | ||||
6166 | << AI.PotentialFreeCalls.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] did not find one free call but " << AI.PotentialFreeCalls.size() << "\n"; } } while (false); | ||||
6167 | return false; | ||||
6168 | } | ||||
6169 | CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); | ||||
6170 | DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); | ||||
6171 | if (!DI) { | ||||
6172 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call " << *UniqueFree << "\n"; } } while (false) | ||||
6173 | dbgs() << "[H2S] unique free call was not known as deallocation call "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call " << *UniqueFree << "\n"; } } while (false) | ||||
6174 | << *UniqueFree << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call was not known as deallocation call " << *UniqueFree << "\n"; } } while (false); | ||||
6175 | return false; | ||||
6176 | } | ||||
6177 | if (DI->MightFreeUnknownObjects) { | ||||
6178 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might free unknown allocations\n" ; } } while (false) | ||||
6179 | dbgs() << "[H2S] unique free call might free unknown allocations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might free unknown allocations\n" ; } } while (false); | ||||
6180 | return false; | ||||
6181 | } | ||||
6182 | if (DI->PotentialAllocationCalls.empty()) | ||||
6183 | return true; | ||||
6184 | if (DI->PotentialAllocationCalls.size() > 1) { | ||||
6185 | LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might free " << DI->PotentialAllocationCalls.size() << " different allocations\n" ; } } while (false) | ||||
6186 | << DI->PotentialAllocationCalls.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might free " << DI->PotentialAllocationCalls.size() << " different allocations\n" ; } } while (false) | ||||
6187 | << " different allocations\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might free " << DI->PotentialAllocationCalls.size() << " different allocations\n" ; } } while (false); | ||||
6188 | return false; | ||||
6189 | } | ||||
6190 | if (*DI->PotentialAllocationCalls.begin() != AI.CB) { | ||||
6191 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but " << **DI->PotentialAllocationCalls.begin() << "\n" ; } } while (false) | ||||
6192 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but " << **DI->PotentialAllocationCalls.begin() << "\n" ; } } while (false) | ||||
6193 | << "[H2S] unique free call not known to free this allocation but "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but " << **DI->PotentialAllocationCalls.begin() << "\n" ; } } while (false) | ||||
6194 | << **DI->PotentialAllocationCalls.begin() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call not known to free this allocation but " << **DI->PotentialAllocationCalls.begin() << "\n" ; } } while (false); | ||||
6195 | return false; | ||||
6196 | } | ||||
6197 | Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); | ||||
6198 | if (!Explorer.findInContextOf(UniqueFree, CtxI)) { | ||||
6199 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation " << *UniqueFree << "\n"; } } while (false) | ||||
6200 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation " << *UniqueFree << "\n"; } } while (false) | ||||
6201 | << "[H2S] unique free call might not be executed with the allocation "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation " << *UniqueFree << "\n"; } } while (false) | ||||
6202 | << *UniqueFree << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] unique free call might not be executed with the allocation " << *UniqueFree << "\n"; } } while (false); | ||||
6203 | return false; | ||||
6204 | } | ||||
6205 | return true; | ||||
6206 | }; | ||||
6207 | |||||
6208 | auto UsesCheck = [&](AllocationInfo &AI) { | ||||
6209 | bool ValidUsesOnly = true; | ||||
6210 | |||||
6211 | auto Pred = [&](const Use &U, bool &Follow) -> bool { | ||||
6212 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
6213 | if (isa<LoadInst>(UserI)) | ||||
6214 | return true; | ||||
6215 | if (auto *SI = dyn_cast<StoreInst>(UserI)) { | ||||
6216 | if (SI->getValueOperand() == U.get()) { | ||||
6217 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n"; } } while (false) | ||||
6218 | << "[H2S] escaping store to memory: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n"; } } while (false); | ||||
6219 | ValidUsesOnly = false; | ||||
6220 | } else { | ||||
6221 | // A store into the malloc'ed memory is fine. | ||||
6222 | } | ||||
6223 | return true; | ||||
6224 | } | ||||
6225 | if (auto *CB = dyn_cast<CallBase>(UserI)) { | ||||
6226 | if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) | ||||
6227 | return true; | ||||
6228 | if (DeallocationInfos.count(CB)) { | ||||
6229 | AI.PotentialFreeCalls.insert(CB); | ||||
6230 | return true; | ||||
6231 | } | ||||
6232 | |||||
6233 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
6234 | |||||
6235 | const auto &NoCaptureAA = A.getAAFor<AANoCapture>( | ||||
6236 | *this, IRPosition::callsite_argument(*CB, ArgNo), | ||||
6237 | DepClassTy::OPTIONAL); | ||||
6238 | |||||
6239 | // If a call site argument use is nofree, we are fine. | ||||
6240 | const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( | ||||
6241 | *this, IRPosition::callsite_argument(*CB, ArgNo), | ||||
6242 | DepClassTy::OPTIONAL); | ||||
6243 | |||||
6244 | bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); | ||||
6245 | bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); | ||||
6246 | if (MaybeCaptured || | ||||
6247 | (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && | ||||
6248 | MaybeFreed)) { | ||||
6249 | AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; | ||||
6250 | |||||
6251 | // Emit a missed remark if this is missed OpenMP globalization. | ||||
6252 | auto Remark = [&](OptimizationRemarkMissed ORM) { | ||||
6253 | return ORM | ||||
6254 | << "Could not move globalized variable to the stack. " | ||||
6255 | "Variable is potentially captured in call. Mark " | ||||
6256 | "parameter as `__attribute__((noescape))` to override."; | ||||
6257 | }; | ||||
6258 | |||||
6259 | if (ValidUsesOnly && | ||||
6260 | AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) | ||||
6261 | A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); | ||||
6262 | |||||
6263 | LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Bad user: " << *UserI << "\n"; } } while (false); | ||||
6264 | ValidUsesOnly = false; | ||||
6265 | } | ||||
6266 | return true; | ||||
6267 | } | ||||
6268 | |||||
6269 | if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || | ||||
6270 | isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { | ||||
6271 | Follow = true; | ||||
6272 | return true; | ||||
6273 | } | ||||
6274 | // Unknown user for which we can not track uses further (in a way that | ||||
6275 | // makes sense). | ||||
6276 | LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Unknown user: " << *UserI << "\n"; } } while (false); | ||||
6277 | ValidUsesOnly = false; | ||||
6278 | return true; | ||||
6279 | }; | ||||
6280 | if (!A.checkForAllUses(Pred, *this, *AI.CB)) | ||||
6281 | return false; | ||||
6282 | return ValidUsesOnly; | ||||
6283 | }; | ||||
6284 | |||||
6285 | // The actual update starts here. We look at all allocations and depending on | ||||
6286 | // their status perform the appropriate check(s). | ||||
6287 | for (auto &It : AllocationInfos) { | ||||
6288 | AllocationInfo &AI = *It.second; | ||||
6289 | if (AI.Status == AllocationInfo::INVALID) | ||||
6290 | continue; | ||||
6291 | |||||
6292 | if (Value *Align = getAllocAlignment(AI.CB, TLI)) { | ||||
6293 | Optional<APInt> APAlign = getAPInt(A, *this, *Align); | ||||
6294 | if (!APAlign) { | ||||
6295 | // Can't generate an alloca which respects the required alignment | ||||
6296 | // on the allocation. | ||||
6297 | LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CBdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB << "\n"; } } while (false) | ||||
6298 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB << "\n"; } } while (false); | ||||
6299 | AI.Status = AllocationInfo::INVALID; | ||||
6300 | Changed = ChangeStatus::CHANGED; | ||||
6301 | continue; | ||||
6302 | } | ||||
6303 | if (APAlign->ugt(llvm::Value::MaximumAlignment) || | ||||
6304 | !APAlign->isPowerOf2()) { | ||||
6305 | LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAligndo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Invalid allocation alignment: " << APAlign << "\n"; } } while (false) | ||||
6306 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[H2S] Invalid allocation alignment: " << APAlign << "\n"; } } while (false); | ||||
6307 | AI.Status = AllocationInfo::INVALID; | ||||
6308 | Changed = ChangeStatus::CHANGED; | ||||
6309 | continue; | ||||
6310 | } | ||||
6311 | } | ||||
6312 | |||||
6313 | Optional<APInt> Size = getSize(A, *this, AI); | ||||
6314 | if (MaxHeapToStackSize != -1) { | ||||
6315 | if (!Size || Size.value().ugt(MaxHeapToStackSize)) { | ||||
6316 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6317 | if (!Size)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6318 | dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6319 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6320 | dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6321 | << MaxHeapToStackSize << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false) | ||||
6322 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { if (!Size) dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; else dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " << MaxHeapToStackSize << "\n"; }; } } while (false); | ||||
6323 | |||||
6324 | AI.Status = AllocationInfo::INVALID; | ||||
6325 | Changed = ChangeStatus::CHANGED; | ||||
6326 | continue; | ||||
6327 | } | ||||
6328 | } | ||||
6329 | |||||
6330 | switch (AI.Status) { | ||||
6331 | case AllocationInfo::STACK_DUE_TO_USE: | ||||
6332 | if (UsesCheck(AI)) | ||||
6333 | break; | ||||
6334 | AI.Status = AllocationInfo::STACK_DUE_TO_FREE; | ||||
6335 | [[fallthrough]]; | ||||
6336 | case AllocationInfo::STACK_DUE_TO_FREE: | ||||
6337 | if (FreeCheck(AI)) | ||||
6338 | break; | ||||
6339 | AI.Status = AllocationInfo::INVALID; | ||||
6340 | Changed = ChangeStatus::CHANGED; | ||||
6341 | break; | ||||
6342 | case AllocationInfo::INVALID: | ||||
6343 | llvm_unreachable("Invalid allocations should never reach this point!")::llvm::llvm_unreachable_internal("Invalid allocations should never reach this point!" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6343); | ||||
6344 | }; | ||||
6345 | |||||
6346 | // Check if we still think we can move it into the entry block. | ||||
6347 | if (AI.MoveAllocaIntoEntry && | ||||
6348 | (!Size.has_value() || IsInLoop(*AI.CB->getParent()))) | ||||
6349 | AI.MoveAllocaIntoEntry = false; | ||||
6350 | } | ||||
6351 | |||||
6352 | return Changed; | ||||
6353 | } | ||||
6354 | } // namespace | ||||
6355 | |||||
6356 | /// ----------------------- Privatizable Pointers ------------------------------ | ||||
6357 | namespace { | ||||
6358 | struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { | ||||
6359 | AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) | ||||
6360 | : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} | ||||
6361 | |||||
6362 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
6363 | AAPrivatizablePtr::indicatePessimisticFixpoint(); | ||||
6364 | PrivatizableType = nullptr; | ||||
6365 | return ChangeStatus::CHANGED; | ||||
6366 | } | ||||
6367 | |||||
6368 | /// Identify the type we can chose for a private copy of the underlying | ||||
6369 | /// argument. None means it is not clear yet, nullptr means there is none. | ||||
6370 | virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; | ||||
6371 | |||||
6372 | /// Return a privatizable type that encloses both T0 and T1. | ||||
6373 | /// TODO: This is merely a stub for now as we should manage a mapping as well. | ||||
6374 | Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { | ||||
6375 | if (!T0) | ||||
6376 | return T1; | ||||
6377 | if (!T1) | ||||
6378 | return T0; | ||||
6379 | if (T0 == T1) | ||||
6380 | return T0; | ||||
6381 | return nullptr; | ||||
6382 | } | ||||
6383 | |||||
6384 | Optional<Type *> getPrivatizableType() const override { | ||||
6385 | return PrivatizableType; | ||||
6386 | } | ||||
6387 | |||||
6388 | const std::string getAsStr() const override { | ||||
6389 | return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; | ||||
6390 | } | ||||
6391 | |||||
6392 | protected: | ||||
6393 | Optional<Type *> PrivatizableType; | ||||
6394 | }; | ||||
6395 | |||||
6396 | // TODO: Do this for call site arguments (probably also other values) as well. | ||||
6397 | |||||
6398 | struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { | ||||
6399 | AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) | ||||
6400 | : AAPrivatizablePtrImpl(IRP, A) {} | ||||
6401 | |||||
6402 | /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) | ||||
6403 | Optional<Type *> identifyPrivatizableType(Attributor &A) override { | ||||
6404 | // If this is a byval argument and we know all the call sites (so we can | ||||
6405 | // rewrite them), there is no need to check them explicitly. | ||||
6406 | bool UsedAssumedInformation = false; | ||||
6407 | SmallVector<Attribute, 1> Attrs; | ||||
6408 | getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true); | ||||
6409 | if (!Attrs.empty() && | ||||
6410 | A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, | ||||
6411 | true, UsedAssumedInformation)) | ||||
6412 | return Attrs[0].getValueAsType(); | ||||
6413 | |||||
6414 | Optional<Type *> Ty; | ||||
6415 | unsigned ArgNo = getIRPosition().getCallSiteArgNo(); | ||||
6416 | |||||
6417 | // Make sure the associated call site argument has the same type at all call | ||||
6418 | // sites and it is an allocation we know is safe to privatize, for now that | ||||
6419 | // means we only allow alloca instructions. | ||||
6420 | // TODO: We can additionally analyze the accesses in the callee to create | ||||
6421 | // the type from that information instead. That is a little more | ||||
6422 | // involved and will be done in a follow up patch. | ||||
6423 | auto CallSiteCheck = [&](AbstractCallSite ACS) { | ||||
6424 | IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); | ||||
6425 | // Check if a coresponding argument was found or if it is one not | ||||
6426 | // associated (which can happen for callback calls). | ||||
6427 | if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) | ||||
6428 | return false; | ||||
6429 | |||||
6430 | // Check that all call sites agree on a type. | ||||
6431 | auto &PrivCSArgAA = | ||||
6432 | A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); | ||||
6433 | Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); | ||||
6434 | |||||
6435 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6436 | dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6437 | if (CSTy && CSTy.value())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6438 | CSTy.value()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6439 | else if (CSTy)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6440 | dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6441 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6442 | dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false) | ||||
6443 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; if (CSTy && CSTy .value()) CSTy.value()->print(dbgs()); else if (CSTy) dbgs () << "<nullptr>"; else dbgs() << "<none>" ; }; } } while (false); | ||||
6444 | |||||
6445 | Ty = combineTypes(Ty, CSTy); | ||||
6446 | |||||
6447 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6448 | dbgs() << " : New Type: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6449 | if (Ty && Ty.value())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6450 | Ty.value()->print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6451 | else if (Ty)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6452 | dbgs() << "<nullptr>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6453 | elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6454 | dbgs() << "<none>";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6455 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false) | ||||
6456 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << " : New Type: "; if (Ty && Ty.value()) Ty.value()->print(dbgs()); else if (Ty) dbgs( ) << "<nullptr>"; else dbgs() << "<none>" ; dbgs() << "\n"; }; } } while (false); | ||||
6457 | |||||
6458 | return !Ty || Ty.value(); | ||||
6459 | }; | ||||
6460 | |||||
6461 | if (!A.checkForAllCallSites(CallSiteCheck, *this, true, | ||||
6462 | UsedAssumedInformation)) | ||||
6463 | return nullptr; | ||||
6464 | return Ty; | ||||
6465 | } | ||||
6466 | |||||
6467 | /// See AbstractAttribute::updateImpl(...). | ||||
6468 | ChangeStatus updateImpl(Attributor &A) override { | ||||
6469 | PrivatizableType = identifyPrivatizableType(A); | ||||
6470 | if (!PrivatizableType) | ||||
6471 | return ChangeStatus::UNCHANGED; | ||||
6472 | if (!PrivatizableType.value()) | ||||
6473 | return indicatePessimisticFixpoint(); | ||||
6474 | |||||
6475 | // The dependence is optional so we don't give up once we give up on the | ||||
6476 | // alignment. | ||||
6477 | A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), | ||||
6478 | DepClassTy::OPTIONAL); | ||||
6479 | |||||
6480 | // Avoid arguments with padding for now. | ||||
6481 | if (!getIRPosition().hasAttr(Attribute::ByVal) && | ||||
6482 | !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) { | ||||
6483 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Padding detected\n" ; } } while (false); | ||||
6484 | return indicatePessimisticFixpoint(); | ||||
6485 | } | ||||
6486 | |||||
6487 | // Collect the types that will replace the privatizable type in the function | ||||
6488 | // signature. | ||||
6489 | SmallVector<Type *, 16> ReplacementTypes; | ||||
6490 | identifyReplacementTypes(*PrivatizableType, ReplacementTypes); | ||||
6491 | |||||
6492 | // Verify callee and caller agree on how the promoted argument would be | ||||
6493 | // passed. | ||||
6494 | Function &Fn = *getIRPosition().getAnchorScope(); | ||||
6495 | const auto *TTI = | ||||
6496 | A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); | ||||
6497 | if (!TTI) { | ||||
6498 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Missing TTI for function " << Fn.getName() << "\n"; } } while (false) | ||||
6499 | << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Missing TTI for function " << Fn.getName() << "\n"; } } while (false); | ||||
6500 | return indicatePessimisticFixpoint(); | ||||
6501 | } | ||||
6502 | |||||
6503 | auto CallSiteCheck = [&](AbstractCallSite ACS) { | ||||
6504 | CallBase *CB = ACS.getInstruction(); | ||||
6505 | return TTI->areTypesABICompatible( | ||||
6506 | CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); | ||||
6507 | }; | ||||
6508 | bool UsedAssumedInformation = false; | ||||
6509 | if (!A.checkForAllCallSites(CallSiteCheck, *this, true, | ||||
6510 | UsedAssumedInformation)) { | ||||
6511 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false) | ||||
6512 | dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false) | ||||
6513 | << Fn.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " << Fn.getName() << "\n"; } } while (false); | ||||
6514 | return indicatePessimisticFixpoint(); | ||||
6515 | } | ||||
6516 | |||||
6517 | // Register a rewrite of the argument. | ||||
6518 | Argument *Arg = getAssociatedArgument(); | ||||
6519 | if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { | ||||
6520 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n" ; } } while (false); | ||||
6521 | return indicatePessimisticFixpoint(); | ||||
6522 | } | ||||
6523 | |||||
6524 | unsigned ArgNo = Arg->getArgNo(); | ||||
6525 | |||||
6526 | // Helper to check if for the given call site the associated argument is | ||||
6527 | // passed to a callback where the privatization would be different. | ||||
6528 | auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { | ||||
6529 | SmallVector<const Use *, 4> CallbackUses; | ||||
6530 | AbstractCallSite::getCallbackUses(CB, CallbackUses); | ||||
6531 | for (const Use *U : CallbackUses) { | ||||
6532 | AbstractCallSite CBACS(U); | ||||
6533 | assert(CBACS && CBACS.isCallbackCall())(static_cast <bool> (CBACS && CBACS.isCallbackCall ()) ? void (0) : __assert_fail ("CBACS && CBACS.isCallbackCall()" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6533, __extension__ __PRETTY_FUNCTION__)); | ||||
6534 | for (Argument &CBArg : CBACS.getCalledFunction()->args()) { | ||||
6535 | int CBArgNo = CBACS.getCallArgOperandNo(CBArg); | ||||
6536 | |||||
6537 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6538 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6539 | << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6540 | << "check if can be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6541 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6542 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6543 | "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6544 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6545 | << ")\n[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6546 | << CBACS.getCallArgOperand(CBArg) << " vs "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6547 | << CB.getArgOperand(ArgNo) << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6548 | << "[AAPrivatizablePtr] " << CBArg << " : "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6549 | << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false) | ||||
6550 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << "check if can be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ")\n[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperand(CBArg) << " vs " << CB.getArgOperand(ArgNo) << "\n" << "[AAPrivatizablePtr] " << CBArg << " : " << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; }; } } while (false); | ||||
6551 | |||||
6552 | if (CBArgNo != int(ArgNo)) | ||||
6553 | continue; | ||||
6554 | const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( | ||||
6555 | *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); | ||||
6556 | if (CBArgPrivAA.isValidState()) { | ||||
6557 | auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); | ||||
6558 | if (!CBArgPrivTy) | ||||
6559 | continue; | ||||
6560 | if (CBArgPrivTy.value() == PrivatizableType) | ||||
6561 | continue; | ||||
6562 | } | ||||
6563 | |||||
6564 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6565 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6566 | << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6567 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6568 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6569 | "callback ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6570 | << CBArgNo << "@" << CBACS.getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6571 | << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6572 | "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6573 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "callback (" << CBArgNo << "@" << CBACS.getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false); | ||||
6574 | return false; | ||||
6575 | } | ||||
6576 | } | ||||
6577 | return true; | ||||
6578 | }; | ||||
6579 | |||||
6580 | // Helper to check if for the given call site the associated argument is | ||||
6581 | // passed to a direct call where the privatization would be different. | ||||
6582 | auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { | ||||
6583 | CallBase *DC = cast<CallBase>(ACS.getInstruction()); | ||||
6584 | int DCArgNo = ACS.getCallArgOperandNo(ArgNo); | ||||
6585 | assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&(static_cast <bool> (DCArgNo >= 0 && unsigned (DCArgNo) < DC->arg_size() && "Expected a direct call operand for callback call operand" ) ? void (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && \"Expected a direct call operand for callback call operand\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6586, __extension__ __PRETTY_FUNCTION__)) | ||||
6586 | "Expected a direct call operand for callback call operand")(static_cast <bool> (DCArgNo >= 0 && unsigned (DCArgNo) < DC->arg_size() && "Expected a direct call operand for callback call operand" ) ? void (0) : __assert_fail ("DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && \"Expected a direct call operand for callback call operand\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6586, __extension__ __PRETTY_FUNCTION__)); | ||||
6587 | |||||
6588 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6589 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6590 | << " check if be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6591 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6592 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6593 | "direct call of ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6594 | << DCArgNo << "@" << DC->getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6595 | << ").\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false) | ||||
6596 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " check if be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << DCArgNo << "@" << DC ->getCalledFunction()->getName() << ").\n"; }; } } while (false); | ||||
6597 | |||||
6598 | Function *DCCallee = DC->getCalledFunction(); | ||||
6599 | if (unsigned(DCArgNo) < DCCallee->arg_size()) { | ||||
6600 | const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( | ||||
6601 | *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), | ||||
6602 | DepClassTy::REQUIRED); | ||||
6603 | if (DCArgPrivAA.isValidState()) { | ||||
6604 | auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); | ||||
6605 | if (!DCArgPrivTy) | ||||
6606 | return true; | ||||
6607 | if (DCArgPrivTy.value() == PrivatizableType) | ||||
6608 | return true; | ||||
6609 | } | ||||
6610 | } | ||||
6611 | |||||
6612 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6613 | dbgs() << "[AAPrivatizablePtr] Argument " << *Argdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6614 | << " cannot be privatized in the context of its parent ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6615 | << Arg->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6616 | << ")\n[AAPrivatizablePtr] because it is an argument in a "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6617 | "direct call of ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6618 | << ACS.getInstruction()->getCalledFunction()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6619 | << ").\n[AAPrivatizablePtr] for which the argument "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6620 | "privatization is not compatible.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false) | ||||
6621 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { { dbgs() << "[AAPrivatizablePtr] Argument " << *Arg << " cannot be privatized in the context of its parent (" << Arg->getParent()->getName() << ")\n[AAPrivatizablePtr] because it is an argument in a " "direct call of (" << ACS.getInstruction()->getCalledFunction ()->getName() << ").\n[AAPrivatizablePtr] for which the argument " "privatization is not compatible.\n"; }; } } while (false); | ||||
6622 | return false; | ||||
6623 | }; | ||||
6624 | |||||
6625 | // Helper to check if the associated argument is used at the given abstract | ||||
6626 | // call site in a way that is incompatible with the privatization assumed | ||||
6627 | // here. | ||||
6628 | auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { | ||||
6629 | if (ACS.isDirectCall()) | ||||
6630 | return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); | ||||
6631 | if (ACS.isCallbackCall()) | ||||
6632 | return IsCompatiblePrivArgOfDirectCS(ACS); | ||||
6633 | return false; | ||||
6634 | }; | ||||
6635 | |||||
6636 | if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, | ||||
6637 | UsedAssumedInformation)) | ||||
6638 | return indicatePessimisticFixpoint(); | ||||
6639 | |||||
6640 | return ChangeStatus::UNCHANGED; | ||||
6641 | } | ||||
6642 | |||||
6643 | /// Given a type to private \p PrivType, collect the constituates (which are | ||||
6644 | /// used) in \p ReplacementTypes. | ||||
6645 | static void | ||||
6646 | identifyReplacementTypes(Type *PrivType, | ||||
6647 | SmallVectorImpl<Type *> &ReplacementTypes) { | ||||
6648 | // TODO: For now we expand the privatization type to the fullest which can | ||||
6649 | // lead to dead arguments that need to be removed later. | ||||
6650 | assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!" ) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6650, __extension__ __PRETTY_FUNCTION__)); | ||||
6651 | |||||
6652 | // Traverse the type, extract constituate types on the outermost level. | ||||
6653 | if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { | ||||
6654 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) | ||||
6655 | ReplacementTypes.push_back(PrivStructType->getElementType(u)); | ||||
6656 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { | ||||
6657 | ReplacementTypes.append(PrivArrayType->getNumElements(), | ||||
6658 | PrivArrayType->getElementType()); | ||||
6659 | } else { | ||||
6660 | ReplacementTypes.push_back(PrivType); | ||||
6661 | } | ||||
6662 | } | ||||
6663 | |||||
6664 | /// Initialize \p Base according to the type \p PrivType at position \p IP. | ||||
6665 | /// The values needed are taken from the arguments of \p F starting at | ||||
6666 | /// position \p ArgNo. | ||||
6667 | static void createInitialization(Type *PrivType, Value &Base, Function &F, | ||||
6668 | unsigned ArgNo, Instruction &IP) { | ||||
6669 | assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!" ) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6669, __extension__ __PRETTY_FUNCTION__)); | ||||
6670 | |||||
6671 | IRBuilder<NoFolder> IRB(&IP); | ||||
6672 | const DataLayout &DL = F.getParent()->getDataLayout(); | ||||
6673 | |||||
6674 | // Traverse the type, build GEPs and stores. | ||||
6675 | if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { | ||||
6676 | const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); | ||||
6677 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { | ||||
6678 | Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); | ||||
6679 | Value *Ptr = | ||||
6680 | constructPointer(PointeeTy, PrivType, &Base, | ||||
6681 | PrivStructLayout->getElementOffset(u), IRB, DL); | ||||
6682 | new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); | ||||
6683 | } | ||||
6684 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { | ||||
6685 | Type *PointeeTy = PrivArrayType->getElementType(); | ||||
6686 | Type *PointeePtrTy = PointeeTy->getPointerTo(); | ||||
6687 | uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); | ||||
6688 | for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { | ||||
6689 | Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, | ||||
6690 | u * PointeeTySize, IRB, DL); | ||||
6691 | new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); | ||||
6692 | } | ||||
6693 | } else { | ||||
6694 | new StoreInst(F.getArg(ArgNo), &Base, &IP); | ||||
6695 | } | ||||
6696 | } | ||||
6697 | |||||
6698 | /// Extract values from \p Base according to the type \p PrivType at the | ||||
6699 | /// call position \p ACS. The values are appended to \p ReplacementValues. | ||||
6700 | void createReplacementValues(Align Alignment, Type *PrivType, | ||||
6701 | AbstractCallSite ACS, Value *Base, | ||||
6702 | SmallVectorImpl<Value *> &ReplacementValues) { | ||||
6703 | assert(Base && "Expected base value!")(static_cast <bool> (Base && "Expected base value!" ) ? void (0) : __assert_fail ("Base && \"Expected base value!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6703, __extension__ __PRETTY_FUNCTION__)); | ||||
6704 | assert(PrivType && "Expected privatizable type!")(static_cast <bool> (PrivType && "Expected privatizable type!" ) ? void (0) : __assert_fail ("PrivType && \"Expected privatizable type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6704, __extension__ __PRETTY_FUNCTION__)); | ||||
6705 | Instruction *IP = ACS.getInstruction(); | ||||
6706 | |||||
6707 | IRBuilder<NoFolder> IRB(IP); | ||||
6708 | const DataLayout &DL = IP->getModule()->getDataLayout(); | ||||
6709 | |||||
6710 | Type *PrivPtrType = PrivType->getPointerTo(); | ||||
6711 | if (Base->getType() != PrivPtrType) | ||||
6712 | Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( | ||||
6713 | Base, PrivPtrType, "", ACS.getInstruction()); | ||||
6714 | |||||
6715 | // Traverse the type, build GEPs and loads. | ||||
6716 | if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { | ||||
6717 | const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); | ||||
6718 | for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { | ||||
6719 | Type *PointeeTy = PrivStructType->getElementType(u); | ||||
6720 | Value *Ptr = | ||||
6721 | constructPointer(PointeeTy->getPointerTo(), PrivType, Base, | ||||
6722 | PrivStructLayout->getElementOffset(u), IRB, DL); | ||||
6723 | LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); | ||||
6724 | L->setAlignment(Alignment); | ||||
6725 | ReplacementValues.push_back(L); | ||||
6726 | } | ||||
6727 | } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { | ||||
6728 | Type *PointeeTy = PrivArrayType->getElementType(); | ||||
6729 | uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); | ||||
6730 | Type *PointeePtrTy = PointeeTy->getPointerTo(); | ||||
6731 | for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { | ||||
6732 | Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, | ||||
6733 | u * PointeeTySize, IRB, DL); | ||||
6734 | LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); | ||||
6735 | L->setAlignment(Alignment); | ||||
6736 | ReplacementValues.push_back(L); | ||||
6737 | } | ||||
6738 | } else { | ||||
6739 | LoadInst *L = new LoadInst(PrivType, Base, "", IP); | ||||
6740 | L->setAlignment(Alignment); | ||||
6741 | ReplacementValues.push_back(L); | ||||
6742 | } | ||||
6743 | } | ||||
6744 | |||||
6745 | /// See AbstractAttribute::manifest(...) | ||||
6746 | ChangeStatus manifest(Attributor &A) override { | ||||
6747 | if (!PrivatizableType) | ||||
6748 | return ChangeStatus::UNCHANGED; | ||||
6749 | assert(PrivatizableType.value() && "Expected privatizable type!")(static_cast <bool> (PrivatizableType.value() && "Expected privatizable type!") ? void (0) : __assert_fail ("PrivatizableType.value() && \"Expected privatizable type!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 6749, __extension__ __PRETTY_FUNCTION__)); | ||||
6750 | |||||
6751 | // Collect all tail calls in the function as we cannot allow new allocas to | ||||
6752 | // escape into tail recursion. | ||||
6753 | // TODO: Be smarter about new allocas escaping into tail calls. | ||||
6754 | SmallVector<CallInst *, 16> TailCalls; | ||||
6755 | bool UsedAssumedInformation = false; | ||||
6756 | if (!A.checkForAllInstructions( | ||||
6757 | [&](Instruction &I) { | ||||
6758 | CallInst &CI = cast<CallInst>(I); | ||||
6759 | if (CI.isTailCall()) | ||||
6760 | TailCalls.push_back(&CI); | ||||
6761 | return true; | ||||
6762 | }, | ||||
6763 | *this, {Instruction::Call}, UsedAssumedInformation)) | ||||
6764 | return ChangeStatus::UNCHANGED; | ||||
6765 | |||||
6766 | Argument *Arg = getAssociatedArgument(); | ||||
6767 | // Query AAAlign attribute for alignment of associated argument to | ||||
6768 | // determine the best alignment of loads. | ||||
6769 | const auto &AlignAA = | ||||
6770 | A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); | ||||
6771 | |||||
6772 | // Callback to repair the associated function. A new alloca is placed at the | ||||
6773 | // beginning and initialized with the values passed through arguments. The | ||||
6774 | // new alloca replaces the use of the old pointer argument. | ||||
6775 | Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = | ||||
6776 | [=](const Attributor::ArgumentReplacementInfo &ARI, | ||||
6777 | Function &ReplacementFn, Function::arg_iterator ArgIt) { | ||||
6778 | BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); | ||||
6779 | Instruction *IP = &*EntryBB.getFirstInsertionPt(); | ||||
6780 | const DataLayout &DL = IP->getModule()->getDataLayout(); | ||||
6781 | unsigned AS = DL.getAllocaAddrSpace(); | ||||
6782 | Instruction *AI = new AllocaInst(PrivatizableType.value(), AS, | ||||
6783 | Arg->getName() + ".priv", IP); | ||||
6784 | createInitialization(PrivatizableType.value(), *AI, ReplacementFn, | ||||
6785 | ArgIt->getArgNo(), *IP); | ||||
6786 | |||||
6787 | if (AI->getType() != Arg->getType()) | ||||
6788 | AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( | ||||
6789 | AI, Arg->getType(), "", IP); | ||||
6790 | Arg->replaceAllUsesWith(AI); | ||||
6791 | |||||
6792 | for (CallInst *CI : TailCalls) | ||||
6793 | CI->setTailCall(false); | ||||
6794 | }; | ||||
6795 | |||||
6796 | // Callback to repair a call site of the associated function. The elements | ||||
6797 | // of the privatizable type are loaded prior to the call and passed to the | ||||
6798 | // new function version. | ||||
6799 | Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = | ||||
6800 | [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, | ||||
6801 | AbstractCallSite ACS, | ||||
6802 | SmallVectorImpl<Value *> &NewArgOperands) { | ||||
6803 | // When no alignment is specified for the load instruction, | ||||
6804 | // natural alignment is assumed. | ||||
6805 | createReplacementValues( | ||||
6806 | AlignAA.getAssumedAlign(), *PrivatizableType, ACS, | ||||
6807 | ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), | ||||
6808 | NewArgOperands); | ||||
6809 | }; | ||||
6810 | |||||
6811 | // Collect the types that will replace the privatizable type in the function | ||||
6812 | // signature. | ||||
6813 | SmallVector<Type *, 16> ReplacementTypes; | ||||
6814 | identifyReplacementTypes(*PrivatizableType, ReplacementTypes); | ||||
6815 | |||||
6816 | // Register a rewrite of the argument. | ||||
6817 | if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, | ||||
6818 | std::move(FnRepairCB), | ||||
6819 | std::move(ACSRepairCB))) | ||||
6820 | return ChangeStatus::CHANGED; | ||||
6821 | return ChangeStatus::UNCHANGED; | ||||
6822 | } | ||||
6823 | |||||
6824 | /// See AbstractAttribute::trackStatistics() | ||||
6825 | void trackStatistics() const override { | ||||
6826 | STATS_DECLTRACK_ARG_ATTR(privatizable_ptr){ static llvm::Statistic NumIRArguments_privatizable_ptr = {"attributor" , "NumIRArguments_privatizable_ptr", ("Number of " "arguments" " marked '" "privatizable_ptr" "'")};; ++(NumIRArguments_privatizable_ptr ); }; | ||||
6827 | } | ||||
6828 | }; | ||||
6829 | |||||
6830 | struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { | ||||
6831 | AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) | ||||
6832 | : AAPrivatizablePtrImpl(IRP, A) {} | ||||
6833 | |||||
6834 | /// See AbstractAttribute::initialize(...). | ||||
6835 | void initialize(Attributor &A) override { | ||||
6836 | // TODO: We can privatize more than arguments. | ||||
6837 | indicatePessimisticFixpoint(); | ||||
6838 | } | ||||
6839 | |||||
6840 | ChangeStatus updateImpl(Attributor &A) override { | ||||
6841 | llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"::llvm::llvm_unreachable_internal("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" "updateImpl will not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 6842) | ||||
6842 | "updateImpl will not be called")::llvm::llvm_unreachable_internal("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" "updateImpl will not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 6842); | ||||
6843 | } | ||||
6844 | |||||
6845 | /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) | ||||
6846 | Optional<Type *> identifyPrivatizableType(Attributor &A) override { | ||||
6847 | Value *Obj = getUnderlyingObject(&getAssociatedValue()); | ||||
6848 | if (!Obj) { | ||||
6849 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] No underlying object found!\n" ; } } while (false); | ||||
6850 | return nullptr; | ||||
6851 | } | ||||
6852 | |||||
6853 | if (auto *AI = dyn_cast<AllocaInst>(Obj)) | ||||
6854 | if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) | ||||
6855 | if (CI->isOne()) | ||||
6856 | return AI->getAllocatedType(); | ||||
6857 | if (auto *Arg = dyn_cast<Argument>(Obj)) { | ||||
6858 | auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( | ||||
6859 | *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); | ||||
6860 | if (PrivArgAA.isAssumedPrivatizablePtr()) | ||||
6861 | return PrivArgAA.getPrivatizableType(); | ||||
6862 | } | ||||
6863 | |||||
6864 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " "alloca nor privatizable argument: " << *Obj << "!\n" ; } } while (false) | ||||
6865 | "alloca nor privatizable argument: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " "alloca nor privatizable argument: " << *Obj << "!\n" ; } } while (false) | ||||
6866 | << *Obj << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " "alloca nor privatizable argument: " << *Obj << "!\n" ; } } while (false); | ||||
6867 | return nullptr; | ||||
6868 | } | ||||
6869 | |||||
6870 | /// See AbstractAttribute::trackStatistics() | ||||
6871 | void trackStatistics() const override { | ||||
6872 | STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr){ static llvm::Statistic NumIRFloating_privatizable_ptr = {"attributor" , "NumIRFloating_privatizable_ptr", ("Number of floating values known to be '" "privatizable_ptr" "'")};; ++(NumIRFloating_privatizable_ptr ); }; | ||||
6873 | } | ||||
6874 | }; | ||||
6875 | |||||
6876 | struct AAPrivatizablePtrCallSiteArgument final | ||||
6877 | : public AAPrivatizablePtrFloating { | ||||
6878 | AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
6879 | : AAPrivatizablePtrFloating(IRP, A) {} | ||||
6880 | |||||
6881 | /// See AbstractAttribute::initialize(...). | ||||
6882 | void initialize(Attributor &A) override { | ||||
6883 | if (getIRPosition().hasAttr(Attribute::ByVal)) | ||||
6884 | indicateOptimisticFixpoint(); | ||||
6885 | } | ||||
6886 | |||||
6887 | /// See AbstractAttribute::updateImpl(...). | ||||
6888 | ChangeStatus updateImpl(Attributor &A) override { | ||||
6889 | PrivatizableType = identifyPrivatizableType(A); | ||||
6890 | if (!PrivatizableType) | ||||
6891 | return ChangeStatus::UNCHANGED; | ||||
6892 | if (!PrivatizableType.value()) | ||||
6893 | return indicatePessimisticFixpoint(); | ||||
6894 | |||||
6895 | const IRPosition &IRP = getIRPosition(); | ||||
6896 | auto &NoCaptureAA = | ||||
6897 | A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); | ||||
6898 | if (!NoCaptureAA.isAssumedNoCapture()) { | ||||
6899 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n" ; } } while (false); | ||||
6900 | return indicatePessimisticFixpoint(); | ||||
6901 | } | ||||
6902 | |||||
6903 | auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); | ||||
6904 | if (!NoAliasAA.isAssumedNoAlias()) { | ||||
6905 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer might alias!\n" ; } } while (false); | ||||
6906 | return indicatePessimisticFixpoint(); | ||||
6907 | } | ||||
6908 | |||||
6909 | bool IsKnown; | ||||
6910 | if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { | ||||
6911 | LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPrivatizablePtr] pointer is written!\n" ; } } while (false); | ||||
6912 | return indicatePessimisticFixpoint(); | ||||
6913 | } | ||||
6914 | |||||
6915 | return ChangeStatus::UNCHANGED; | ||||
6916 | } | ||||
6917 | |||||
6918 | /// See AbstractAttribute::trackStatistics() | ||||
6919 | void trackStatistics() const override { | ||||
6920 | STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr){ static llvm::Statistic NumIRCSArguments_privatizable_ptr = { "attributor", "NumIRCSArguments_privatizable_ptr", ("Number of " "call site arguments" " marked '" "privatizable_ptr" "'")};; ++(NumIRCSArguments_privatizable_ptr); }; | ||||
6921 | } | ||||
6922 | }; | ||||
6923 | |||||
6924 | struct AAPrivatizablePtrCallSiteReturned final | ||||
6925 | : public AAPrivatizablePtrFloating { | ||||
6926 | AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
6927 | : AAPrivatizablePtrFloating(IRP, A) {} | ||||
6928 | |||||
6929 | /// See AbstractAttribute::initialize(...). | ||||
6930 | void initialize(Attributor &A) override { | ||||
6931 | // TODO: We can privatize more than arguments. | ||||
6932 | indicatePessimisticFixpoint(); | ||||
6933 | } | ||||
6934 | |||||
6935 | /// See AbstractAttribute::trackStatistics() | ||||
6936 | void trackStatistics() const override { | ||||
6937 | STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr){ static llvm::Statistic NumIRCSReturn_privatizable_ptr = {"attributor" , "NumIRCSReturn_privatizable_ptr", ("Number of " "call site returns" " marked '" "privatizable_ptr" "'")};; ++(NumIRCSReturn_privatizable_ptr ); }; | ||||
6938 | } | ||||
6939 | }; | ||||
6940 | |||||
6941 | struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { | ||||
6942 | AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) | ||||
6943 | : AAPrivatizablePtrFloating(IRP, A) {} | ||||
6944 | |||||
6945 | /// See AbstractAttribute::initialize(...). | ||||
6946 | void initialize(Attributor &A) override { | ||||
6947 | // TODO: We can privatize more than arguments. | ||||
6948 | indicatePessimisticFixpoint(); | ||||
6949 | } | ||||
6950 | |||||
6951 | /// See AbstractAttribute::trackStatistics() | ||||
6952 | void trackStatistics() const override { | ||||
6953 | STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr){ static llvm::Statistic NumIRFunctionReturn_privatizable_ptr = {"attributor", "NumIRFunctionReturn_privatizable_ptr", ("Number of " "function returns" " marked '" "privatizable_ptr" "'")};; ++ (NumIRFunctionReturn_privatizable_ptr); }; | ||||
6954 | } | ||||
6955 | }; | ||||
6956 | } // namespace | ||||
6957 | |||||
6958 | /// -------------------- Memory Behavior Attributes ---------------------------- | ||||
6959 | /// Includes read-none, read-only, and write-only. | ||||
6960 | /// ---------------------------------------------------------------------------- | ||||
6961 | namespace { | ||||
6962 | struct AAMemoryBehaviorImpl : public AAMemoryBehavior { | ||||
6963 | AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) | ||||
6964 | : AAMemoryBehavior(IRP, A) {} | ||||
6965 | |||||
6966 | /// See AbstractAttribute::initialize(...). | ||||
6967 | void initialize(Attributor &A) override { | ||||
6968 | intersectAssumedBits(BEST_STATE); | ||||
6969 | getKnownStateFromValue(getIRPosition(), getState()); | ||||
6970 | AAMemoryBehavior::initialize(A); | ||||
6971 | } | ||||
6972 | |||||
6973 | /// Return the memory behavior information encoded in the IR for \p IRP. | ||||
6974 | static void getKnownStateFromValue(const IRPosition &IRP, | ||||
6975 | BitIntegerState &State, | ||||
6976 | bool IgnoreSubsumingPositions = false) { | ||||
6977 | SmallVector<Attribute, 2> Attrs; | ||||
6978 | IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); | ||||
6979 | for (const Attribute &Attr : Attrs) { | ||||
6980 | switch (Attr.getKindAsEnum()) { | ||||
6981 | case Attribute::ReadNone: | ||||
6982 | State.addKnownBits(NO_ACCESSES); | ||||
6983 | break; | ||||
6984 | case Attribute::ReadOnly: | ||||
6985 | State.addKnownBits(NO_WRITES); | ||||
6986 | break; | ||||
6987 | case Attribute::WriteOnly: | ||||
6988 | State.addKnownBits(NO_READS); | ||||
6989 | break; | ||||
6990 | default: | ||||
6991 | llvm_unreachable("Unexpected attribute!")::llvm::llvm_unreachable_internal("Unexpected attribute!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 6991); | ||||
6992 | } | ||||
6993 | } | ||||
6994 | |||||
6995 | if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { | ||||
6996 | if (!I->mayReadFromMemory()) | ||||
6997 | State.addKnownBits(NO_READS); | ||||
6998 | if (!I->mayWriteToMemory()) | ||||
6999 | State.addKnownBits(NO_WRITES); | ||||
7000 | } | ||||
7001 | } | ||||
7002 | |||||
7003 | /// See AbstractAttribute::getDeducedAttributes(...). | ||||
7004 | void getDeducedAttributes(LLVMContext &Ctx, | ||||
7005 | SmallVectorImpl<Attribute> &Attrs) const override { | ||||
7006 | assert(Attrs.size() == 0)(static_cast <bool> (Attrs.size() == 0) ? void (0) : __assert_fail ("Attrs.size() == 0", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 7006, __extension__ __PRETTY_FUNCTION__)); | ||||
7007 | if (isAssumedReadNone()) | ||||
7008 | Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); | ||||
7009 | else if (isAssumedReadOnly()) | ||||
7010 | Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); | ||||
7011 | else if (isAssumedWriteOnly()) | ||||
7012 | Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); | ||||
7013 | assert(Attrs.size() <= 1)(static_cast <bool> (Attrs.size() <= 1) ? void (0) : __assert_fail ("Attrs.size() <= 1", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 7013, __extension__ __PRETTY_FUNCTION__)); | ||||
7014 | } | ||||
7015 | |||||
7016 | /// See AbstractAttribute::manifest(...). | ||||
7017 | ChangeStatus manifest(Attributor &A) override { | ||||
7018 | if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) | ||||
7019 | return ChangeStatus::UNCHANGED; | ||||
7020 | |||||
7021 | const IRPosition &IRP = getIRPosition(); | ||||
7022 | |||||
7023 | // Check if we would improve the existing attributes first. | ||||
7024 | SmallVector<Attribute, 4> DeducedAttrs; | ||||
7025 | getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); | ||||
7026 | if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { | ||||
7027 | return IRP.hasAttr(Attr.getKindAsEnum(), | ||||
7028 | /* IgnoreSubsumingPositions */ true); | ||||
7029 | })) | ||||
7030 | return ChangeStatus::UNCHANGED; | ||||
7031 | |||||
7032 | // Clear existing attributes. | ||||
7033 | IRP.removeAttrs(AttrKinds); | ||||
7034 | |||||
7035 | // Use the generic manifest method. | ||||
7036 | return IRAttribute::manifest(A); | ||||
7037 | } | ||||
7038 | |||||
7039 | /// See AbstractState::getAsStr(). | ||||
7040 | const std::string getAsStr() const override { | ||||
7041 | if (isAssumedReadNone()) | ||||
7042 | return "readnone"; | ||||
7043 | if (isAssumedReadOnly()) | ||||
7044 | return "readonly"; | ||||
7045 | if (isAssumedWriteOnly()) | ||||
7046 | return "writeonly"; | ||||
7047 | return "may-read/write"; | ||||
7048 | } | ||||
7049 | |||||
7050 | /// The set of IR attributes AAMemoryBehavior deals with. | ||||
7051 | static const Attribute::AttrKind AttrKinds[3]; | ||||
7052 | }; | ||||
7053 | |||||
7054 | const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { | ||||
7055 | Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; | ||||
7056 | |||||
7057 | /// Memory behavior attribute for a floating value. | ||||
7058 | struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { | ||||
7059 | AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) | ||||
7060 | : AAMemoryBehaviorImpl(IRP, A) {} | ||||
7061 | |||||
7062 | /// See AbstractAttribute::updateImpl(...). | ||||
7063 | ChangeStatus updateImpl(Attributor &A) override; | ||||
7064 | |||||
7065 | /// See AbstractAttribute::trackStatistics() | ||||
7066 | void trackStatistics() const override { | ||||
7067 | if (isAssumedReadNone()) | ||||
7068 | STATS_DECLTRACK_FLOATING_ATTR(readnone){ static llvm::Statistic NumIRFloating_readnone = {"attributor" , "NumIRFloating_readnone", ("Number of floating values known to be '" "readnone" "'")};; ++(NumIRFloating_readnone); } | ||||
7069 | else if (isAssumedReadOnly()) | ||||
7070 | STATS_DECLTRACK_FLOATING_ATTR(readonly){ static llvm::Statistic NumIRFloating_readonly = {"attributor" , "NumIRFloating_readonly", ("Number of floating values known to be '" "readonly" "'")};; ++(NumIRFloating_readonly); } | ||||
7071 | else if (isAssumedWriteOnly()) | ||||
7072 | STATS_DECLTRACK_FLOATING_ATTR(writeonly){ static llvm::Statistic NumIRFloating_writeonly = {"attributor" , "NumIRFloating_writeonly", ("Number of floating values known to be '" "writeonly" "'")};; ++(NumIRFloating_writeonly); } | ||||
7073 | } | ||||
7074 | |||||
7075 | private: | ||||
7076 | /// Return true if users of \p UserI might access the underlying | ||||
7077 | /// variable/location described by \p U and should therefore be analyzed. | ||||
7078 | bool followUsersOfUseIn(Attributor &A, const Use &U, | ||||
7079 | const Instruction *UserI); | ||||
7080 | |||||
7081 | /// Update the state according to the effect of use \p U in \p UserI. | ||||
7082 | void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); | ||||
7083 | }; | ||||
7084 | |||||
7085 | /// Memory behavior attribute for function argument. | ||||
7086 | struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { | ||||
7087 | AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) | ||||
7088 | : AAMemoryBehaviorFloating(IRP, A) {} | ||||
7089 | |||||
7090 | /// See AbstractAttribute::initialize(...). | ||||
7091 | void initialize(Attributor &A) override { | ||||
7092 | intersectAssumedBits(BEST_STATE); | ||||
7093 | const IRPosition &IRP = getIRPosition(); | ||||
7094 | // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we | ||||
7095 | // can query it when we use has/getAttr. That would allow us to reuse the | ||||
7096 | // initialize of the base class here. | ||||
7097 | bool HasByVal = | ||||
7098 | IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); | ||||
7099 | getKnownStateFromValue(IRP, getState(), | ||||
7100 | /* IgnoreSubsumingPositions */ HasByVal); | ||||
7101 | |||||
7102 | // Initialize the use vector with all direct uses of the associated value. | ||||
7103 | Argument *Arg = getAssociatedArgument(); | ||||
7104 | if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) | ||||
7105 | indicatePessimisticFixpoint(); | ||||
7106 | } | ||||
7107 | |||||
7108 | ChangeStatus manifest(Attributor &A) override { | ||||
7109 | // TODO: Pointer arguments are not supported on vectors of pointers yet. | ||||
7110 | if (!getAssociatedValue().getType()->isPointerTy()) | ||||
7111 | return ChangeStatus::UNCHANGED; | ||||
7112 | |||||
7113 | // TODO: From readattrs.ll: "inalloca parameters are always | ||||
7114 | // considered written" | ||||
7115 | if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { | ||||
7116 | removeKnownBits(NO_WRITES); | ||||
7117 | removeAssumedBits(NO_WRITES); | ||||
7118 | } | ||||
7119 | return AAMemoryBehaviorFloating::manifest(A); | ||||
7120 | } | ||||
7121 | |||||
7122 | /// See AbstractAttribute::trackStatistics() | ||||
7123 | void trackStatistics() const override { | ||||
7124 | if (isAssumedReadNone()) | ||||
7125 | STATS_DECLTRACK_ARG_ATTR(readnone){ static llvm::Statistic NumIRArguments_readnone = {"attributor" , "NumIRArguments_readnone", ("Number of " "arguments" " marked '" "readnone" "'")};; ++(NumIRArguments_readnone); } | ||||
7126 | else if (isAssumedReadOnly()) | ||||
7127 | STATS_DECLTRACK_ARG_ATTR(readonly){ static llvm::Statistic NumIRArguments_readonly = {"attributor" , "NumIRArguments_readonly", ("Number of " "arguments" " marked '" "readonly" "'")};; ++(NumIRArguments_readonly); } | ||||
7128 | else if (isAssumedWriteOnly()) | ||||
7129 | STATS_DECLTRACK_ARG_ATTR(writeonly){ static llvm::Statistic NumIRArguments_writeonly = {"attributor" , "NumIRArguments_writeonly", ("Number of " "arguments" " marked '" "writeonly" "'")};; ++(NumIRArguments_writeonly); } | ||||
7130 | } | ||||
7131 | }; | ||||
7132 | |||||
7133 | struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { | ||||
7134 | AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
7135 | : AAMemoryBehaviorArgument(IRP, A) {} | ||||
7136 | |||||
7137 | /// See AbstractAttribute::initialize(...). | ||||
7138 | void initialize(Attributor &A) override { | ||||
7139 | // If we don't have an associated attribute this is either a variadic call | ||||
7140 | // or an indirect call, either way, nothing to do here. | ||||
7141 | Argument *Arg = getAssociatedArgument(); | ||||
7142 | if (!Arg) { | ||||
7143 | indicatePessimisticFixpoint(); | ||||
7144 | return; | ||||
7145 | } | ||||
7146 | if (Arg->hasByValAttr()) { | ||||
7147 | addKnownBits(NO_WRITES); | ||||
7148 | removeKnownBits(NO_READS); | ||||
7149 | removeAssumedBits(NO_READS); | ||||
7150 | } | ||||
7151 | AAMemoryBehaviorArgument::initialize(A); | ||||
7152 | if (getAssociatedFunction()->isDeclaration()) | ||||
7153 | indicatePessimisticFixpoint(); | ||||
7154 | } | ||||
7155 | |||||
7156 | /// See AbstractAttribute::updateImpl(...). | ||||
7157 | ChangeStatus updateImpl(Attributor &A) override { | ||||
7158 | // TODO: Once we have call site specific value information we can provide | ||||
7159 | // call site specific liveness liveness information and then it makes | ||||
7160 | // sense to specialize attributes for call sites arguments instead of | ||||
7161 | // redirecting requests to the callee argument. | ||||
7162 | Argument *Arg = getAssociatedArgument(); | ||||
7163 | const IRPosition &ArgPos = IRPosition::argument(*Arg); | ||||
7164 | auto &ArgAA = | ||||
7165 | A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); | ||||
7166 | return clampStateAndIndicateChange(getState(), ArgAA.getState()); | ||||
7167 | } | ||||
7168 | |||||
7169 | /// See AbstractAttribute::trackStatistics() | ||||
7170 | void trackStatistics() const override { | ||||
7171 | if (isAssumedReadNone()) | ||||
7172 | STATS_DECLTRACK_CSARG_ATTR(readnone){ static llvm::Statistic NumIRCSArguments_readnone = {"attributor" , "NumIRCSArguments_readnone", ("Number of " "call site arguments" " marked '" "readnone" "'")};; ++(NumIRCSArguments_readnone) ; } | ||||
7173 | else if (isAssumedReadOnly()) | ||||
7174 | STATS_DECLTRACK_CSARG_ATTR(readonly){ static llvm::Statistic NumIRCSArguments_readonly = {"attributor" , "NumIRCSArguments_readonly", ("Number of " "call site arguments" " marked '" "readonly" "'")};; ++(NumIRCSArguments_readonly) ; } | ||||
7175 | else if (isAssumedWriteOnly()) | ||||
7176 | STATS_DECLTRACK_CSARG_ATTR(writeonly){ static llvm::Statistic NumIRCSArguments_writeonly = {"attributor" , "NumIRCSArguments_writeonly", ("Number of " "call site arguments" " marked '" "writeonly" "'")};; ++(NumIRCSArguments_writeonly ); } | ||||
7177 | } | ||||
7178 | }; | ||||
7179 | |||||
7180 | /// Memory behavior attribute for a call site return position. | ||||
7181 | struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { | ||||
7182 | AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
7183 | : AAMemoryBehaviorFloating(IRP, A) {} | ||||
7184 | |||||
7185 | /// See AbstractAttribute::initialize(...). | ||||
7186 | void initialize(Attributor &A) override { | ||||
7187 | AAMemoryBehaviorImpl::initialize(A); | ||||
7188 | Function *F = getAssociatedFunction(); | ||||
7189 | if (!F || F->isDeclaration()) | ||||
7190 | indicatePessimisticFixpoint(); | ||||
7191 | } | ||||
7192 | |||||
7193 | /// See AbstractAttribute::manifest(...). | ||||
7194 | ChangeStatus manifest(Attributor &A) override { | ||||
7195 | // We do not annotate returned values. | ||||
7196 | return ChangeStatus::UNCHANGED; | ||||
7197 | } | ||||
7198 | |||||
7199 | /// See AbstractAttribute::trackStatistics() | ||||
7200 | void trackStatistics() const override {} | ||||
7201 | }; | ||||
7202 | |||||
7203 | /// An AA to represent the memory behavior function attributes. | ||||
7204 | struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { | ||||
7205 | AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) | ||||
7206 | : AAMemoryBehaviorImpl(IRP, A) {} | ||||
7207 | |||||
7208 | /// See AbstractAttribute::updateImpl(Attributor &A). | ||||
7209 | ChangeStatus updateImpl(Attributor &A) override; | ||||
7210 | |||||
7211 | /// See AbstractAttribute::manifest(...). | ||||
7212 | ChangeStatus manifest(Attributor &A) override { | ||||
7213 | Function &F = cast<Function>(getAnchorValue()); | ||||
7214 | if (isAssumedReadNone()) { | ||||
7215 | F.removeFnAttr(Attribute::ArgMemOnly); | ||||
7216 | F.removeFnAttr(Attribute::InaccessibleMemOnly); | ||||
7217 | F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); | ||||
7218 | } | ||||
7219 | return AAMemoryBehaviorImpl::manifest(A); | ||||
7220 | } | ||||
7221 | |||||
7222 | /// See AbstractAttribute::trackStatistics() | ||||
7223 | void trackStatistics() const override { | ||||
7224 | if (isAssumedReadNone()) | ||||
7225 | STATS_DECLTRACK_FN_ATTR(readnone){ static llvm::Statistic NumIRFunction_readnone = {"attributor" , "NumIRFunction_readnone", ("Number of " "functions" " marked '" "readnone" "'")};; ++(NumIRFunction_readnone); } | ||||
7226 | else if (isAssumedReadOnly()) | ||||
7227 | STATS_DECLTRACK_FN_ATTR(readonly){ static llvm::Statistic NumIRFunction_readonly = {"attributor" , "NumIRFunction_readonly", ("Number of " "functions" " marked '" "readonly" "'")};; ++(NumIRFunction_readonly); } | ||||
7228 | else if (isAssumedWriteOnly()) | ||||
7229 | STATS_DECLTRACK_FN_ATTR(writeonly){ static llvm::Statistic NumIRFunction_writeonly = {"attributor" , "NumIRFunction_writeonly", ("Number of " "functions" " marked '" "writeonly" "'")};; ++(NumIRFunction_writeonly); } | ||||
7230 | } | ||||
7231 | }; | ||||
7232 | |||||
7233 | /// AAMemoryBehavior attribute for call sites. | ||||
7234 | struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { | ||||
7235 | AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) | ||||
7236 | : AAMemoryBehaviorImpl(IRP, A) {} | ||||
7237 | |||||
7238 | /// See AbstractAttribute::initialize(...). | ||||
7239 | void initialize(Attributor &A) override { | ||||
7240 | AAMemoryBehaviorImpl::initialize(A); | ||||
7241 | Function *F = getAssociatedFunction(); | ||||
7242 | if (!F || F->isDeclaration()) | ||||
7243 | indicatePessimisticFixpoint(); | ||||
7244 | } | ||||
7245 | |||||
7246 | /// See AbstractAttribute::updateImpl(...). | ||||
7247 | ChangeStatus updateImpl(Attributor &A) override { | ||||
7248 | // TODO: Once we have call site specific value information we can provide | ||||
7249 | // call site specific liveness liveness information and then it makes | ||||
7250 | // sense to specialize attributes for call sites arguments instead of | ||||
7251 | // redirecting requests to the callee argument. | ||||
7252 | Function *F = getAssociatedFunction(); | ||||
7253 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
7254 | auto &FnAA = | ||||
7255 | A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); | ||||
7256 | return clampStateAndIndicateChange(getState(), FnAA.getState()); | ||||
7257 | } | ||||
7258 | |||||
7259 | /// See AbstractAttribute::trackStatistics() | ||||
7260 | void trackStatistics() const override { | ||||
7261 | if (isAssumedReadNone()) | ||||
7262 | STATS_DECLTRACK_CS_ATTR(readnone){ static llvm::Statistic NumIRCS_readnone = {"attributor", "NumIRCS_readnone" , ("Number of " "call site" " marked '" "readnone" "'")};; ++ (NumIRCS_readnone); } | ||||
7263 | else if (isAssumedReadOnly()) | ||||
7264 | STATS_DECLTRACK_CS_ATTR(readonly){ static llvm::Statistic NumIRCS_readonly = {"attributor", "NumIRCS_readonly" , ("Number of " "call site" " marked '" "readonly" "'")};; ++ (NumIRCS_readonly); } | ||||
7265 | else if (isAssumedWriteOnly()) | ||||
7266 | STATS_DECLTRACK_CS_ATTR(writeonly){ static llvm::Statistic NumIRCS_writeonly = {"attributor", "NumIRCS_writeonly" , ("Number of " "call site" " marked '" "writeonly" "'")};; ++ (NumIRCS_writeonly); } | ||||
7267 | } | ||||
7268 | }; | ||||
7269 | |||||
7270 | ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { | ||||
7271 | |||||
7272 | // The current assumed state used to determine a change. | ||||
7273 | auto AssumedState = getAssumed(); | ||||
7274 | |||||
7275 | auto CheckRWInst = [&](Instruction &I) { | ||||
7276 | // If the instruction has an own memory behavior state, use it to restrict | ||||
7277 | // the local state. No further analysis is required as the other memory | ||||
7278 | // state is as optimistic as it gets. | ||||
7279 | if (const auto *CB = dyn_cast<CallBase>(&I)) { | ||||
7280 | const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( | ||||
7281 | *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); | ||||
7282 | intersectAssumedBits(MemBehaviorAA.getAssumed()); | ||||
7283 | return !isAtFixpoint(); | ||||
7284 | } | ||||
7285 | |||||
7286 | // Remove access kind modifiers if necessary. | ||||
7287 | if (I.mayReadFromMemory()) | ||||
7288 | removeAssumedBits(NO_READS); | ||||
7289 | if (I.mayWriteToMemory()) | ||||
7290 | removeAssumedBits(NO_WRITES); | ||||
7291 | return !isAtFixpoint(); | ||||
7292 | }; | ||||
7293 | |||||
7294 | bool UsedAssumedInformation = false; | ||||
7295 | if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, | ||||
7296 | UsedAssumedInformation)) | ||||
7297 | return indicatePessimisticFixpoint(); | ||||
7298 | |||||
7299 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED | ||||
7300 | : ChangeStatus::UNCHANGED; | ||||
7301 | } | ||||
7302 | |||||
7303 | ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { | ||||
7304 | |||||
7305 | const IRPosition &IRP = getIRPosition(); | ||||
7306 | const IRPosition &FnPos = IRPosition::function_scope(IRP); | ||||
7307 | AAMemoryBehavior::StateType &S = getState(); | ||||
7308 | |||||
7309 | // First, check the function scope. We take the known information and we avoid | ||||
7310 | // work if the assumed information implies the current assumed information for | ||||
7311 | // this attribute. This is a valid for all but byval arguments. | ||||
7312 | Argument *Arg = IRP.getAssociatedArgument(); | ||||
7313 | AAMemoryBehavior::base_t FnMemAssumedState = | ||||
7314 | AAMemoryBehavior::StateType::getWorstState(); | ||||
7315 | if (!Arg || !Arg->hasByValAttr()) { | ||||
7316 | const auto &FnMemAA = | ||||
7317 | A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); | ||||
7318 | FnMemAssumedState = FnMemAA.getAssumed(); | ||||
7319 | S.addKnownBits(FnMemAA.getKnown()); | ||||
7320 | if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) | ||||
7321 | return ChangeStatus::UNCHANGED; | ||||
7322 | } | ||||
7323 | |||||
7324 | // The current assumed state used to determine a change. | ||||
7325 | auto AssumedState = S.getAssumed(); | ||||
7326 | |||||
7327 | // Make sure the value is not captured (except through "return"), if | ||||
7328 | // it is, any information derived would be irrelevant anyway as we cannot | ||||
7329 | // check the potential aliases introduced by the capture. However, no need | ||||
7330 | // to fall back to anythign less optimistic than the function state. | ||||
7331 | const auto &ArgNoCaptureAA = | ||||
7332 | A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); | ||||
7333 | if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { | ||||
7334 | S.intersectAssumedBits(FnMemAssumedState); | ||||
7335 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED | ||||
7336 | : ChangeStatus::UNCHANGED; | ||||
7337 | } | ||||
7338 | |||||
7339 | // Visit and expand uses until all are analyzed or a fixpoint is reached. | ||||
7340 | auto UsePred = [&](const Use &U, bool &Follow) -> bool { | ||||
7341 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
7342 | LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI << " \n"; } } while (false) | ||||
7343 | << " \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI << " \n"; } } while (false); | ||||
7344 | |||||
7345 | // Droppable users, e.g., llvm::assume does not actually perform any action. | ||||
7346 | if (UserI->isDroppable()) | ||||
7347 | return true; | ||||
7348 | |||||
7349 | // Check if the users of UserI should also be visited. | ||||
7350 | Follow = followUsersOfUseIn(A, U, UserI); | ||||
7351 | |||||
7352 | // If UserI might touch memory we analyze the use in detail. | ||||
7353 | if (UserI->mayReadOrWriteMemory()) | ||||
7354 | analyzeUseIn(A, U, UserI); | ||||
7355 | |||||
7356 | return !isAtFixpoint(); | ||||
7357 | }; | ||||
7358 | |||||
7359 | if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) | ||||
7360 | return indicatePessimisticFixpoint(); | ||||
7361 | |||||
7362 | return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED | ||||
7363 | : ChangeStatus::UNCHANGED; | ||||
7364 | } | ||||
7365 | |||||
7366 | bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, | ||||
7367 | const Instruction *UserI) { | ||||
7368 | // The loaded value is unrelated to the pointer argument, no need to | ||||
7369 | // follow the users of the load. | ||||
7370 | if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI)) | ||||
7371 | return false; | ||||
7372 | |||||
7373 | // By default we follow all uses assuming UserI might leak information on U, | ||||
7374 | // we have special handling for call sites operands though. | ||||
7375 | const auto *CB = dyn_cast<CallBase>(UserI); | ||||
7376 | if (!CB || !CB->isArgOperand(&U)) | ||||
7377 | return true; | ||||
7378 | |||||
7379 | // If the use is a call argument known not to be captured, the users of | ||||
7380 | // the call do not need to be visited because they have to be unrelated to | ||||
7381 | // the input. Note that this check is not trivial even though we disallow | ||||
7382 | // general capturing of the underlying argument. The reason is that the | ||||
7383 | // call might the argument "through return", which we allow and for which we | ||||
7384 | // need to check call users. | ||||
7385 | if (U.get()->getType()->isPointerTy()) { | ||||
7386 | unsigned ArgNo = CB->getArgOperandNo(&U); | ||||
7387 | const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( | ||||
7388 | *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); | ||||
7389 | return !ArgNoCaptureAA.isAssumedNoCapture(); | ||||
7390 | } | ||||
7391 | |||||
7392 | return true; | ||||
7393 | } | ||||
7394 | |||||
7395 | void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, | ||||
7396 | const Instruction *UserI) { | ||||
7397 | assert(UserI->mayReadOrWriteMemory())(static_cast <bool> (UserI->mayReadOrWriteMemory()) ? void (0) : __assert_fail ("UserI->mayReadOrWriteMemory()" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7397, __extension__ __PRETTY_FUNCTION__)); | ||||
7398 | |||||
7399 | switch (UserI->getOpcode()) { | ||||
7400 | default: | ||||
7401 | // TODO: Handle all atomics and other side-effect operations we know of. | ||||
7402 | break; | ||||
7403 | case Instruction::Load: | ||||
7404 | // Loads cause the NO_READS property to disappear. | ||||
7405 | removeAssumedBits(NO_READS); | ||||
7406 | return; | ||||
7407 | |||||
7408 | case Instruction::Store: | ||||
7409 | // Stores cause the NO_WRITES property to disappear if the use is the | ||||
7410 | // pointer operand. Note that while capturing was taken care of somewhere | ||||
7411 | // else we need to deal with stores of the value that is not looked through. | ||||
7412 | if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) | ||||
7413 | removeAssumedBits(NO_WRITES); | ||||
7414 | else | ||||
7415 | indicatePessimisticFixpoint(); | ||||
7416 | return; | ||||
7417 | |||||
7418 | case Instruction::Call: | ||||
7419 | case Instruction::CallBr: | ||||
7420 | case Instruction::Invoke: { | ||||
7421 | // For call sites we look at the argument memory behavior attribute (this | ||||
7422 | // could be recursive!) in order to restrict our own state. | ||||
7423 | const auto *CB = cast<CallBase>(UserI); | ||||
7424 | |||||
7425 | // Give up on operand bundles. | ||||
7426 | if (CB->isBundleOperand(&U)) { | ||||
7427 | indicatePessimisticFixpoint(); | ||||
7428 | return; | ||||
7429 | } | ||||
7430 | |||||
7431 | // Calling a function does read the function pointer, maybe write it if the | ||||
7432 | // function is self-modifying. | ||||
7433 | if (CB->isCallee(&U)) { | ||||
7434 | removeAssumedBits(NO_READS); | ||||
7435 | break; | ||||
7436 | } | ||||
7437 | |||||
7438 | // Adjust the possible access behavior based on the information on the | ||||
7439 | // argument. | ||||
7440 | IRPosition Pos; | ||||
7441 | if (U.get()->getType()->isPointerTy()) | ||||
7442 | Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); | ||||
7443 | else | ||||
7444 | Pos = IRPosition::callsite_function(*CB); | ||||
7445 | const auto &MemBehaviorAA = | ||||
7446 | A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); | ||||
7447 | // "assumed" has at most the same bits as the MemBehaviorAA assumed | ||||
7448 | // and at least "known". | ||||
7449 | intersectAssumedBits(MemBehaviorAA.getAssumed()); | ||||
7450 | return; | ||||
7451 | } | ||||
7452 | }; | ||||
7453 | |||||
7454 | // Generally, look at the "may-properties" and adjust the assumed state if we | ||||
7455 | // did not trigger special handling before. | ||||
7456 | if (UserI->mayReadFromMemory()) | ||||
7457 | removeAssumedBits(NO_READS); | ||||
7458 | if (UserI->mayWriteToMemory()) | ||||
7459 | removeAssumedBits(NO_WRITES); | ||||
7460 | } | ||||
7461 | } // namespace | ||||
7462 | |||||
7463 | /// -------------------- Memory Locations Attributes --------------------------- | ||||
7464 | /// Includes read-none, argmemonly, inaccessiblememonly, | ||||
7465 | /// inaccessiblememorargmemonly | ||||
7466 | /// ---------------------------------------------------------------------------- | ||||
7467 | |||||
7468 | std::string AAMemoryLocation::getMemoryLocationsAsStr( | ||||
7469 | AAMemoryLocation::MemoryLocationsKind MLK) { | ||||
7470 | if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) | ||||
7471 | return "all memory"; | ||||
7472 | if (MLK == AAMemoryLocation::NO_LOCATIONS) | ||||
7473 | return "no memory"; | ||||
7474 | std::string S = "memory:"; | ||||
7475 | if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) | ||||
7476 | S += "stack,"; | ||||
7477 | if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) | ||||
7478 | S += "constant,"; | ||||
7479 | if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) | ||||
7480 | S += "internal global,"; | ||||
7481 | if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) | ||||
7482 | S += "external global,"; | ||||
7483 | if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) | ||||
7484 | S += "argument,"; | ||||
7485 | if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) | ||||
7486 | S += "inaccessible,"; | ||||
7487 | if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) | ||||
7488 | S += "malloced,"; | ||||
7489 | if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) | ||||
7490 | S += "unknown,"; | ||||
7491 | S.pop_back(); | ||||
7492 | return S; | ||||
7493 | } | ||||
7494 | |||||
7495 | namespace { | ||||
7496 | struct AAMemoryLocationImpl : public AAMemoryLocation { | ||||
7497 | |||||
7498 | AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) | ||||
7499 | : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { | ||||
7500 | AccessKind2Accesses.fill(nullptr); | ||||
7501 | } | ||||
7502 | |||||
7503 | ~AAMemoryLocationImpl() { | ||||
7504 | // The AccessSets are allocated via a BumpPtrAllocator, we call | ||||
7505 | // the destructor manually. | ||||
7506 | for (AccessSet *AS : AccessKind2Accesses) | ||||
7507 | if (AS) | ||||
7508 | AS->~AccessSet(); | ||||
7509 | } | ||||
7510 | |||||
7511 | /// See AbstractAttribute::initialize(...). | ||||
7512 | void initialize(Attributor &A) override { | ||||
7513 | intersectAssumedBits(BEST_STATE); | ||||
7514 | getKnownStateFromValue(A, getIRPosition(), getState()); | ||||
7515 | AAMemoryLocation::initialize(A); | ||||
7516 | } | ||||
7517 | |||||
7518 | /// Return the memory behavior information encoded in the IR for \p IRP. | ||||
7519 | static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, | ||||
7520 | BitIntegerState &State, | ||||
7521 | bool IgnoreSubsumingPositions = false) { | ||||
7522 | // For internal functions we ignore `argmemonly` and | ||||
7523 | // `inaccessiblememorargmemonly` as we might break it via interprocedural | ||||
7524 | // constant propagation. It is unclear if this is the best way but it is | ||||
7525 | // unlikely this will cause real performance problems. If we are deriving | ||||
7526 | // attributes for the anchor function we even remove the attribute in | ||||
7527 | // addition to ignoring it. | ||||
7528 | bool UseArgMemOnly = true; | ||||
7529 | Function *AnchorFn = IRP.getAnchorScope(); | ||||
7530 | if (AnchorFn && A.isRunOn(*AnchorFn)) | ||||
7531 | UseArgMemOnly = !AnchorFn->hasLocalLinkage(); | ||||
7532 | |||||
7533 | SmallVector<Attribute, 2> Attrs; | ||||
7534 | IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); | ||||
7535 | for (const Attribute &Attr : Attrs) { | ||||
7536 | switch (Attr.getKindAsEnum()) { | ||||
7537 | case Attribute::ReadNone: | ||||
7538 | State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); | ||||
7539 | break; | ||||
7540 | case Attribute::InaccessibleMemOnly: | ||||
7541 | State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); | ||||
7542 | break; | ||||
7543 | case Attribute::ArgMemOnly: | ||||
7544 | if (UseArgMemOnly) | ||||
7545 | State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); | ||||
7546 | else | ||||
7547 | IRP.removeAttrs({Attribute::ArgMemOnly}); | ||||
7548 | break; | ||||
7549 | case Attribute::InaccessibleMemOrArgMemOnly: | ||||
7550 | if (UseArgMemOnly) | ||||
7551 | State.addKnownBits(inverseLocation( | ||||
7552 | NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); | ||||
7553 | else | ||||
7554 | IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); | ||||
7555 | break; | ||||
7556 | default: | ||||
7557 | llvm_unreachable("Unexpected attribute!")::llvm::llvm_unreachable_internal("Unexpected attribute!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 7557); | ||||
7558 | } | ||||
7559 | } | ||||
7560 | } | ||||
7561 | |||||
7562 | /// See AbstractAttribute::getDeducedAttributes(...). | ||||
7563 | void getDeducedAttributes(LLVMContext &Ctx, | ||||
7564 | SmallVectorImpl<Attribute> &Attrs) const override { | ||||
7565 | assert(Attrs.size() == 0)(static_cast <bool> (Attrs.size() == 0) ? void (0) : __assert_fail ("Attrs.size() == 0", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 7565, __extension__ __PRETTY_FUNCTION__)); | ||||
7566 | if (isAssumedReadNone()) { | ||||
7567 | Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); | ||||
7568 | } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { | ||||
7569 | if (isAssumedInaccessibleMemOnly()) | ||||
7570 | Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); | ||||
7571 | else if (isAssumedArgMemOnly()) | ||||
7572 | Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); | ||||
7573 | else if (isAssumedInaccessibleOrArgMemOnly()) | ||||
7574 | Attrs.push_back( | ||||
7575 | Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); | ||||
7576 | } | ||||
7577 | assert(Attrs.size() <= 1)(static_cast <bool> (Attrs.size() <= 1) ? void (0) : __assert_fail ("Attrs.size() <= 1", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 7577, __extension__ __PRETTY_FUNCTION__)); | ||||
7578 | } | ||||
7579 | |||||
7580 | /// See AbstractAttribute::manifest(...). | ||||
7581 | ChangeStatus manifest(Attributor &A) override { | ||||
7582 | const IRPosition &IRP = getIRPosition(); | ||||
7583 | |||||
7584 | // Check if we would improve the existing attributes first. | ||||
7585 | SmallVector<Attribute, 4> DeducedAttrs; | ||||
7586 | getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); | ||||
7587 | if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { | ||||
7588 | return IRP.hasAttr(Attr.getKindAsEnum(), | ||||
7589 | /* IgnoreSubsumingPositions */ true); | ||||
7590 | })) | ||||
7591 | return ChangeStatus::UNCHANGED; | ||||
7592 | |||||
7593 | // Clear existing attributes. | ||||
7594 | IRP.removeAttrs(AttrKinds); | ||||
7595 | if (isAssumedReadNone()) | ||||
7596 | IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); | ||||
7597 | |||||
7598 | // Use the generic manifest method. | ||||
7599 | return IRAttribute::manifest(A); | ||||
7600 | } | ||||
7601 | |||||
7602 | /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). | ||||
7603 | bool checkForAllAccessesToMemoryKind( | ||||
7604 | function_ref<bool(const Instruction *, const Value *, AccessKind, | ||||
7605 | MemoryLocationsKind)> | ||||
7606 | Pred, | ||||
7607 | MemoryLocationsKind RequestedMLK) const override { | ||||
7608 | if (!isValidState()) | ||||
7609 | return false; | ||||
7610 | |||||
7611 | MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); | ||||
7612 | if (AssumedMLK == NO_LOCATIONS) | ||||
7613 | return true; | ||||
7614 | |||||
7615 | unsigned Idx = 0; | ||||
7616 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; | ||||
7617 | CurMLK *= 2, ++Idx) { | ||||
7618 | if (CurMLK & RequestedMLK) | ||||
7619 | continue; | ||||
7620 | |||||
7621 | if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) | ||||
7622 | for (const AccessInfo &AI : *Accesses) | ||||
7623 | if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) | ||||
7624 | return false; | ||||
7625 | } | ||||
7626 | |||||
7627 | return true; | ||||
7628 | } | ||||
7629 | |||||
7630 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
7631 | // If we give up and indicate a pessimistic fixpoint this instruction will | ||||
7632 | // become an access for all potential access kinds: | ||||
7633 | // TODO: Add pointers for argmemonly and globals to improve the results of | ||||
7634 | // checkForAllAccessesToMemoryKind. | ||||
7635 | bool Changed = false; | ||||
7636 | MemoryLocationsKind KnownMLK = getKnown(); | ||||
7637 | Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); | ||||
7638 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) | ||||
7639 | if (!(CurMLK & KnownMLK)) | ||||
7640 | updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, | ||||
7641 | getAccessKindFromInst(I)); | ||||
7642 | return AAMemoryLocation::indicatePessimisticFixpoint(); | ||||
7643 | } | ||||
7644 | |||||
7645 | protected: | ||||
7646 | /// Helper struct to tie together an instruction that has a read or write | ||||
7647 | /// effect with the pointer it accesses (if any). | ||||
7648 | struct AccessInfo { | ||||
7649 | |||||
7650 | /// The instruction that caused the access. | ||||
7651 | const Instruction *I; | ||||
7652 | |||||
7653 | /// The base pointer that is accessed, or null if unknown. | ||||
7654 | const Value *Ptr; | ||||
7655 | |||||
7656 | /// The kind of access (read/write/read+write). | ||||
7657 | AccessKind Kind; | ||||
7658 | |||||
7659 | bool operator==(const AccessInfo &RHS) const { | ||||
7660 | return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; | ||||
7661 | } | ||||
7662 | bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { | ||||
7663 | if (LHS.I != RHS.I) | ||||
7664 | return LHS.I < RHS.I; | ||||
7665 | if (LHS.Ptr != RHS.Ptr) | ||||
7666 | return LHS.Ptr < RHS.Ptr; | ||||
7667 | if (LHS.Kind != RHS.Kind) | ||||
7668 | return LHS.Kind < RHS.Kind; | ||||
7669 | return false; | ||||
7670 | } | ||||
7671 | }; | ||||
7672 | |||||
7673 | /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the | ||||
7674 | /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. | ||||
7675 | using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; | ||||
7676 | std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses; | ||||
7677 | |||||
7678 | /// Categorize the pointer arguments of CB that might access memory in | ||||
7679 | /// AccessedLoc and update the state and access map accordingly. | ||||
7680 | void | ||||
7681 | categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, | ||||
7682 | AAMemoryLocation::StateType &AccessedLocs, | ||||
7683 | bool &Changed); | ||||
7684 | |||||
7685 | /// Return the kind(s) of location that may be accessed by \p V. | ||||
7686 | AAMemoryLocation::MemoryLocationsKind | ||||
7687 | categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); | ||||
7688 | |||||
7689 | /// Return the access kind as determined by \p I. | ||||
7690 | AccessKind getAccessKindFromInst(const Instruction *I) { | ||||
7691 | AccessKind AK = READ_WRITE; | ||||
7692 | if (I) { | ||||
7693 | AK = I->mayReadFromMemory() ? READ : NONE; | ||||
7694 | AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); | ||||
7695 | } | ||||
7696 | return AK; | ||||
7697 | } | ||||
7698 | |||||
7699 | /// Update the state \p State and the AccessKind2Accesses given that \p I is | ||||
7700 | /// an access of kind \p AK to a \p MLK memory location with the access | ||||
7701 | /// pointer \p Ptr. | ||||
7702 | void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, | ||||
7703 | MemoryLocationsKind MLK, const Instruction *I, | ||||
7704 | const Value *Ptr, bool &Changed, | ||||
7705 | AccessKind AK = READ_WRITE) { | ||||
7706 | |||||
7707 | assert(isPowerOf2_32(MLK) && "Expected a single location set!")(static_cast <bool> (isPowerOf2_32(MLK) && "Expected a single location set!" ) ? void (0) : __assert_fail ("isPowerOf2_32(MLK) && \"Expected a single location set!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7707, __extension__ __PRETTY_FUNCTION__)); | ||||
7708 | auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; | ||||
7709 | if (!Accesses) | ||||
7710 | Accesses = new (Allocator) AccessSet(); | ||||
7711 | Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; | ||||
7712 | State.removeAssumedBits(MLK); | ||||
7713 | } | ||||
7714 | |||||
7715 | /// Determine the underlying locations kinds for \p Ptr, e.g., globals or | ||||
7716 | /// arguments, and update the state and access map accordingly. | ||||
7717 | void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, | ||||
7718 | AAMemoryLocation::StateType &State, bool &Changed); | ||||
7719 | |||||
7720 | /// Used to allocate access sets. | ||||
7721 | BumpPtrAllocator &Allocator; | ||||
7722 | |||||
7723 | /// The set of IR attributes AAMemoryLocation deals with. | ||||
7724 | static const Attribute::AttrKind AttrKinds[4]; | ||||
7725 | }; | ||||
7726 | |||||
7727 | const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { | ||||
7728 | Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, | ||||
7729 | Attribute::InaccessibleMemOrArgMemOnly}; | ||||
7730 | |||||
7731 | void AAMemoryLocationImpl::categorizePtrValue( | ||||
7732 | Attributor &A, const Instruction &I, const Value &Ptr, | ||||
7733 | AAMemoryLocation::StateType &State, bool &Changed) { | ||||
7734 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for " << Ptr << " [" << getMemoryLocationsAsStr( State.getAssumed()) << "]\n"; } } while (false) | ||||
7735 | << Ptr << " ["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for " << Ptr << " [" << getMemoryLocationsAsStr( State.getAssumed()) << "]\n"; } } while (false) | ||||
7736 | << getMemoryLocationsAsStr(State.getAssumed()) << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize pointer locations for " << Ptr << " [" << getMemoryLocationsAsStr( State.getAssumed()) << "]\n"; } } while (false); | ||||
7737 | |||||
7738 | SmallSetVector<Value *, 8> Objects; | ||||
7739 | bool UsedAssumedInformation = false; | ||||
7740 | if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, | ||||
7741 | UsedAssumedInformation, | ||||
7742 | AA::Intraprocedural)) { | ||||
7743 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n" ; } } while (false) | ||||
7744 | dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n" ; } } while (false); | ||||
7745 | updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, | ||||
7746 | getAccessKindFromInst(&I)); | ||||
7747 | return; | ||||
7748 | } | ||||
7749 | |||||
7750 | for (Value *Obj : Objects) { | ||||
7751 | // TODO: recognize the TBAA used for constant accesses. | ||||
7752 | MemoryLocationsKind MLK = NO_LOCATIONS; | ||||
7753 | if (isa<UndefValue>(Obj)) | ||||
7754 | continue; | ||||
7755 | if (isa<Argument>(Obj)) { | ||||
7756 | // TODO: For now we do not treat byval arguments as local copies performed | ||||
7757 | // on the call edge, though, we should. To make that happen we need to | ||||
7758 | // teach various passes, e.g., DSE, about the copy effect of a byval. That | ||||
7759 | // would also allow us to mark functions only accessing byval arguments as | ||||
7760 | // readnone again, arguably their accesses have no effect outside of the | ||||
7761 | // function, like accesses to allocas. | ||||
7762 | MLK = NO_ARGUMENT_MEM; | ||||
7763 | } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { | ||||
7764 | // Reading constant memory is not treated as a read "effect" by the | ||||
7765 | // function attr pass so we won't neither. Constants defined by TBAA are | ||||
7766 | // similar. (We know we do not write it because it is constant.) | ||||
7767 | if (auto *GVar = dyn_cast<GlobalVariable>(GV)) | ||||
7768 | if (GVar->isConstant()) | ||||
7769 | continue; | ||||
7770 | |||||
7771 | if (GV->hasLocalLinkage()) | ||||
7772 | MLK = NO_GLOBAL_INTERNAL_MEM; | ||||
7773 | else | ||||
7774 | MLK = NO_GLOBAL_EXTERNAL_MEM; | ||||
7775 | } else if (isa<ConstantPointerNull>(Obj) && | ||||
7776 | !NullPointerIsDefined(getAssociatedFunction(), | ||||
7777 | Ptr.getType()->getPointerAddressSpace())) { | ||||
7778 | continue; | ||||
7779 | } else if (isa<AllocaInst>(Obj)) { | ||||
7780 | MLK = NO_LOCAL_MEM; | ||||
7781 | } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { | ||||
7782 | const auto &NoAliasAA = A.getAAFor<AANoAlias>( | ||||
7783 | *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); | ||||
7784 | if (NoAliasAA.isAssumedNoAlias()) | ||||
7785 | MLK = NO_MALLOCED_MEM; | ||||
7786 | else | ||||
7787 | MLK = NO_UNKOWN_MEM; | ||||
7788 | } else { | ||||
7789 | MLK = NO_UNKOWN_MEM; | ||||
7790 | } | ||||
7791 | |||||
7792 | assert(MLK != NO_LOCATIONS && "No location specified!")(static_cast <bool> (MLK != NO_LOCATIONS && "No location specified!" ) ? void (0) : __assert_fail ("MLK != NO_LOCATIONS && \"No location specified!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7792, __extension__ __PRETTY_FUNCTION__)); | ||||
7793 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " << *Obj << " -> " << getMemoryLocationsAsStr (MLK) << "\n"; } } while (false) | ||||
7794 | << *Obj << " -> " << getMemoryLocationsAsStr(MLK)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " << *Obj << " -> " << getMemoryLocationsAsStr (MLK) << "\n"; } } while (false) | ||||
7795 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " << *Obj << " -> " << getMemoryLocationsAsStr (MLK) << "\n"; } } while (false); | ||||
7796 | updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, | ||||
7797 | getAccessKindFromInst(&I)); | ||||
7798 | } | ||||
7799 | |||||
7800 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " << getMemoryLocationsAsStr(State.getAssumed()) << "\n"; } } while (false) | ||||
7801 | dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " << getMemoryLocationsAsStr(State.getAssumed()) << "\n"; } } while (false) | ||||
7802 | << getMemoryLocationsAsStr(State.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " << getMemoryLocationsAsStr(State.getAssumed()) << "\n"; } } while (false); | ||||
7803 | } | ||||
7804 | |||||
7805 | void AAMemoryLocationImpl::categorizeArgumentPointerLocations( | ||||
7806 | Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, | ||||
7807 | bool &Changed) { | ||||
7808 | for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { | ||||
7809 | |||||
7810 | // Skip non-pointer arguments. | ||||
7811 | const Value *ArgOp = CB.getArgOperand(ArgNo); | ||||
7812 | if (!ArgOp->getType()->isPtrOrPtrVectorTy()) | ||||
7813 | continue; | ||||
7814 | |||||
7815 | // Skip readnone arguments. | ||||
7816 | const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); | ||||
7817 | const auto &ArgOpMemLocationAA = | ||||
7818 | A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); | ||||
7819 | |||||
7820 | if (ArgOpMemLocationAA.isAssumedReadNone()) | ||||
7821 | continue; | ||||
7822 | |||||
7823 | // Categorize potentially accessed pointer arguments as if there was an | ||||
7824 | // access instruction with them as pointer. | ||||
7825 | categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); | ||||
7826 | } | ||||
7827 | } | ||||
7828 | |||||
7829 | AAMemoryLocation::MemoryLocationsKind | ||||
7830 | AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, | ||||
7831 | bool &Changed) { | ||||
7832 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize accessed locations for " << I << "\n"; } } while (false) | ||||
7833 | << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize accessed locations for " << I << "\n"; } } while (false); | ||||
7834 | |||||
7835 | AAMemoryLocation::StateType AccessedLocs; | ||||
7836 | AccessedLocs.intersectAssumedBits(NO_LOCATIONS); | ||||
7837 | |||||
7838 | if (auto *CB = dyn_cast<CallBase>(&I)) { | ||||
7839 | |||||
7840 | // First check if we assume any memory is access is visible. | ||||
7841 | const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( | ||||
7842 | *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); | ||||
7843 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize call site: " << I << " [" << CBMemLocationAA << "]\n" ; } } while (false) | ||||
7844 | << " [" << CBMemLocationAA << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize call site: " << I << " [" << CBMemLocationAA << "]\n" ; } } while (false); | ||||
7845 | |||||
7846 | if (CBMemLocationAA.isAssumedReadNone()) | ||||
7847 | return NO_LOCATIONS; | ||||
7848 | |||||
7849 | if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { | ||||
7850 | updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, | ||||
7851 | Changed, getAccessKindFromInst(&I)); | ||||
7852 | return AccessedLocs.getAssumed(); | ||||
7853 | } | ||||
7854 | |||||
7855 | uint32_t CBAssumedNotAccessedLocs = | ||||
7856 | CBMemLocationAA.getAssumedNotAccessedLocation(); | ||||
7857 | |||||
7858 | // Set the argmemonly and global bit as we handle them separately below. | ||||
7859 | uint32_t CBAssumedNotAccessedLocsNoArgMem = | ||||
7860 | CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; | ||||
7861 | |||||
7862 | for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { | ||||
7863 | if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) | ||||
7864 | continue; | ||||
7865 | updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, | ||||
7866 | getAccessKindFromInst(&I)); | ||||
7867 | } | ||||
7868 | |||||
7869 | // Now handle global memory if it might be accessed. This is slightly tricky | ||||
7870 | // as NO_GLOBAL_MEM has multiple bits set. | ||||
7871 | bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); | ||||
7872 | if (HasGlobalAccesses) { | ||||
7873 | auto AccessPred = [&](const Instruction *, const Value *Ptr, | ||||
7874 | AccessKind Kind, MemoryLocationsKind MLK) { | ||||
7875 | updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, | ||||
7876 | getAccessKindFromInst(&I)); | ||||
7877 | return true; | ||||
7878 | }; | ||||
7879 | if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( | ||||
7880 | AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) | ||||
7881 | return AccessedLocs.getWorstState(); | ||||
7882 | } | ||||
7883 | |||||
7884 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false) | ||||
7885 | dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false) | ||||
7886 | << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false); | ||||
7887 | |||||
7888 | // Now handle argument memory if it might be accessed. | ||||
7889 | bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); | ||||
7890 | if (HasArgAccesses) | ||||
7891 | categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); | ||||
7892 | |||||
7893 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false) | ||||
7894 | dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false) | ||||
7895 | << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"; } } while (false); | ||||
7896 | |||||
7897 | return AccessedLocs.getAssumed(); | ||||
7898 | } | ||||
7899 | |||||
7900 | if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { | ||||
7901 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " << I << " [" << *Ptr << "]\n"; } } while (false) | ||||
7902 | dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " << I << " [" << *Ptr << "]\n"; } } while (false) | ||||
7903 | << I << " [" << *Ptr << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " << I << " [" << *Ptr << "]\n"; } } while (false); | ||||
7904 | categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); | ||||
7905 | return AccessedLocs.getAssumed(); | ||||
7906 | } | ||||
7907 | |||||
7908 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " << I << "\n"; } } while (false) | ||||
7909 | << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " << I << "\n"; } } while (false); | ||||
7910 | updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, | ||||
7911 | getAccessKindFromInst(&I)); | ||||
7912 | return AccessedLocs.getAssumed(); | ||||
7913 | } | ||||
7914 | |||||
7915 | /// An AA to represent the memory behavior function attributes. | ||||
7916 | struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { | ||||
7917 | AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) | ||||
7918 | : AAMemoryLocationImpl(IRP, A) {} | ||||
7919 | |||||
7920 | /// See AbstractAttribute::updateImpl(Attributor &A). | ||||
7921 | ChangeStatus updateImpl(Attributor &A) override { | ||||
7922 | |||||
7923 | const auto &MemBehaviorAA = | ||||
7924 | A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); | ||||
7925 | if (MemBehaviorAA.isAssumedReadNone()) { | ||||
7926 | if (MemBehaviorAA.isKnownReadNone()) | ||||
7927 | return indicateOptimisticFixpoint(); | ||||
7928 | assert(isAssumedReadNone() &&(static_cast <bool> (isAssumedReadNone() && "AAMemoryLocation was not read-none but AAMemoryBehavior was!" ) ? void (0) : __assert_fail ("isAssumedReadNone() && \"AAMemoryLocation was not read-none but AAMemoryBehavior was!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7929, __extension__ __PRETTY_FUNCTION__)) | ||||
7929 | "AAMemoryLocation was not read-none but AAMemoryBehavior was!")(static_cast <bool> (isAssumedReadNone() && "AAMemoryLocation was not read-none but AAMemoryBehavior was!" ) ? void (0) : __assert_fail ("isAssumedReadNone() && \"AAMemoryLocation was not read-none but AAMemoryBehavior was!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 7929, __extension__ __PRETTY_FUNCTION__)); | ||||
7930 | A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); | ||||
7931 | return ChangeStatus::UNCHANGED; | ||||
7932 | } | ||||
7933 | |||||
7934 | // The current assumed state used to determine a change. | ||||
7935 | auto AssumedState = getAssumed(); | ||||
7936 | bool Changed = false; | ||||
7937 | |||||
7938 | auto CheckRWInst = [&](Instruction &I) { | ||||
7939 | MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); | ||||
7940 | LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations for " << I << ": " << getMemoryLocationsAsStr(MLK ) << "\n"; } } while (false) | ||||
7941 | << ": " << getMemoryLocationsAsStr(MLK) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAMemoryLocation] Accessed locations for " << I << ": " << getMemoryLocationsAsStr(MLK ) << "\n"; } } while (false); | ||||
7942 | removeAssumedBits(inverseLocation(MLK, false, false)); | ||||
7943 | // Stop once only the valid bit set in the *not assumed location*, thus | ||||
7944 | // once we don't actually exclude any memory locations in the state. | ||||
7945 | return getAssumedNotAccessedLocation() != VALID_STATE; | ||||
7946 | }; | ||||
7947 | |||||
7948 | bool UsedAssumedInformation = false; | ||||
7949 | if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, | ||||
7950 | UsedAssumedInformation)) | ||||
7951 | return indicatePessimisticFixpoint(); | ||||
7952 | |||||
7953 | Changed |= AssumedState != getAssumed(); | ||||
7954 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
7955 | } | ||||
7956 | |||||
7957 | /// See AbstractAttribute::trackStatistics() | ||||
7958 | void trackStatistics() const override { | ||||
7959 | if (isAssumedReadNone()) | ||||
7960 | STATS_DECLTRACK_FN_ATTR(readnone){ static llvm::Statistic NumIRFunction_readnone = {"attributor" , "NumIRFunction_readnone", ("Number of " "functions" " marked '" "readnone" "'")};; ++(NumIRFunction_readnone); } | ||||
7961 | else if (isAssumedArgMemOnly()) | ||||
7962 | STATS_DECLTRACK_FN_ATTR(argmemonly){ static llvm::Statistic NumIRFunction_argmemonly = {"attributor" , "NumIRFunction_argmemonly", ("Number of " "functions" " marked '" "argmemonly" "'")};; ++(NumIRFunction_argmemonly); } | ||||
7963 | else if (isAssumedInaccessibleMemOnly()) | ||||
7964 | STATS_DECLTRACK_FN_ATTR(inaccessiblememonly){ static llvm::Statistic NumIRFunction_inaccessiblememonly = { "attributor", "NumIRFunction_inaccessiblememonly", ("Number of " "functions" " marked '" "inaccessiblememonly" "'")};; ++(NumIRFunction_inaccessiblememonly ); } | ||||
7965 | else if (isAssumedInaccessibleOrArgMemOnly()) | ||||
7966 | STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly){ static llvm::Statistic NumIRFunction_inaccessiblememorargmemonly = {"attributor", "NumIRFunction_inaccessiblememorargmemonly" , ("Number of " "functions" " marked '" "inaccessiblememorargmemonly" "'")};; ++(NumIRFunction_inaccessiblememorargmemonly); } | ||||
7967 | } | ||||
7968 | }; | ||||
7969 | |||||
7970 | /// AAMemoryLocation attribute for call sites. | ||||
7971 | struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { | ||||
7972 | AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) | ||||
7973 | : AAMemoryLocationImpl(IRP, A) {} | ||||
7974 | |||||
7975 | /// See AbstractAttribute::initialize(...). | ||||
7976 | void initialize(Attributor &A) override { | ||||
7977 | AAMemoryLocationImpl::initialize(A); | ||||
7978 | Function *F = getAssociatedFunction(); | ||||
7979 | if (!F || F->isDeclaration()) | ||||
7980 | indicatePessimisticFixpoint(); | ||||
7981 | } | ||||
7982 | |||||
7983 | /// See AbstractAttribute::updateImpl(...). | ||||
7984 | ChangeStatus updateImpl(Attributor &A) override { | ||||
7985 | // TODO: Once we have call site specific value information we can provide | ||||
7986 | // call site specific liveness liveness information and then it makes | ||||
7987 | // sense to specialize attributes for call sites arguments instead of | ||||
7988 | // redirecting requests to the callee argument. | ||||
7989 | Function *F = getAssociatedFunction(); | ||||
7990 | const IRPosition &FnPos = IRPosition::function(*F); | ||||
7991 | auto &FnAA = | ||||
7992 | A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); | ||||
7993 | bool Changed = false; | ||||
7994 | auto AccessPred = [&](const Instruction *I, const Value *Ptr, | ||||
7995 | AccessKind Kind, MemoryLocationsKind MLK) { | ||||
7996 | updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, | ||||
7997 | getAccessKindFromInst(I)); | ||||
7998 | return true; | ||||
7999 | }; | ||||
8000 | if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) | ||||
8001 | return indicatePessimisticFixpoint(); | ||||
8002 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
8003 | } | ||||
8004 | |||||
8005 | /// See AbstractAttribute::trackStatistics() | ||||
8006 | void trackStatistics() const override { | ||||
8007 | if (isAssumedReadNone()) | ||||
8008 | STATS_DECLTRACK_CS_ATTR(readnone){ static llvm::Statistic NumIRCS_readnone = {"attributor", "NumIRCS_readnone" , ("Number of " "call site" " marked '" "readnone" "'")};; ++ (NumIRCS_readnone); } | ||||
8009 | } | ||||
8010 | }; | ||||
8011 | } // namespace | ||||
8012 | |||||
8013 | /// ------------------ Value Constant Range Attribute ------------------------- | ||||
8014 | |||||
8015 | namespace { | ||||
8016 | struct AAValueConstantRangeImpl : AAValueConstantRange { | ||||
8017 | using StateType = IntegerRangeState; | ||||
8018 | AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) | ||||
8019 | : AAValueConstantRange(IRP, A) {} | ||||
8020 | |||||
8021 | /// See AbstractAttribute::initialize(..). | ||||
8022 | void initialize(Attributor &A) override { | ||||
8023 | if (A.hasSimplificationCallback(getIRPosition())) { | ||||
8024 | indicatePessimisticFixpoint(); | ||||
8025 | return; | ||||
8026 | } | ||||
8027 | |||||
8028 | // Intersect a range given by SCEV. | ||||
8029 | intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); | ||||
8030 | |||||
8031 | // Intersect a range given by LVI. | ||||
8032 | intersectKnown(getConstantRangeFromLVI(A, getCtxI())); | ||||
8033 | } | ||||
8034 | |||||
8035 | /// See AbstractAttribute::getAsStr(). | ||||
8036 | const std::string getAsStr() const override { | ||||
8037 | std::string Str; | ||||
8038 | llvm::raw_string_ostream OS(Str); | ||||
8039 | OS << "range(" << getBitWidth() << ")<"; | ||||
8040 | getKnown().print(OS); | ||||
8041 | OS << " / "; | ||||
8042 | getAssumed().print(OS); | ||||
8043 | OS << ">"; | ||||
8044 | return OS.str(); | ||||
8045 | } | ||||
8046 | |||||
8047 | /// Helper function to get a SCEV expr for the associated value at program | ||||
8048 | /// point \p I. | ||||
8049 | const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { | ||||
8050 | if (!getAnchorScope()) | ||||
8051 | return nullptr; | ||||
8052 | |||||
8053 | ScalarEvolution *SE = | ||||
8054 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( | ||||
8055 | *getAnchorScope()); | ||||
8056 | |||||
8057 | LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( | ||||
8058 | *getAnchorScope()); | ||||
8059 | |||||
8060 | if (!SE || !LI) | ||||
8061 | return nullptr; | ||||
8062 | |||||
8063 | const SCEV *S = SE->getSCEV(&getAssociatedValue()); | ||||
8064 | if (!I) | ||||
8065 | return S; | ||||
8066 | |||||
8067 | return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); | ||||
8068 | } | ||||
8069 | |||||
8070 | /// Helper function to get a range from SCEV for the associated value at | ||||
8071 | /// program point \p I. | ||||
8072 | ConstantRange getConstantRangeFromSCEV(Attributor &A, | ||||
8073 | const Instruction *I = nullptr) const { | ||||
8074 | if (!getAnchorScope()) | ||||
8075 | return getWorstState(getBitWidth()); | ||||
8076 | |||||
8077 | ScalarEvolution *SE = | ||||
8078 | A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( | ||||
8079 | *getAnchorScope()); | ||||
8080 | |||||
8081 | const SCEV *S = getSCEV(A, I); | ||||
8082 | if (!SE || !S) | ||||
8083 | return getWorstState(getBitWidth()); | ||||
8084 | |||||
8085 | return SE->getUnsignedRange(S); | ||||
8086 | } | ||||
8087 | |||||
8088 | /// Helper function to get a range from LVI for the associated value at | ||||
8089 | /// program point \p I. | ||||
8090 | ConstantRange | ||||
8091 | getConstantRangeFromLVI(Attributor &A, | ||||
8092 | const Instruction *CtxI = nullptr) const { | ||||
8093 | if (!getAnchorScope()) | ||||
8094 | return getWorstState(getBitWidth()); | ||||
8095 | |||||
8096 | LazyValueInfo *LVI = | ||||
8097 | A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( | ||||
8098 | *getAnchorScope()); | ||||
8099 | |||||
8100 | if (!LVI || !CtxI) | ||||
8101 | return getWorstState(getBitWidth()); | ||||
8102 | return LVI->getConstantRange(&getAssociatedValue(), | ||||
8103 | const_cast<Instruction *>(CtxI)); | ||||
8104 | } | ||||
8105 | |||||
8106 | /// Return true if \p CtxI is valid for querying outside analyses. | ||||
8107 | /// This basically makes sure we do not ask intra-procedural analysis | ||||
8108 | /// about a context in the wrong function or a context that violates | ||||
8109 | /// dominance assumptions they might have. The \p AllowAACtxI flag indicates | ||||
8110 | /// if the original context of this AA is OK or should be considered invalid. | ||||
8111 | bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, | ||||
8112 | const Instruction *CtxI, | ||||
8113 | bool AllowAACtxI) const { | ||||
8114 | if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) | ||||
8115 | return false; | ||||
8116 | |||||
8117 | // Our context might be in a different function, neither intra-procedural | ||||
8118 | // analysis (ScalarEvolution nor LazyValueInfo) can handle that. | ||||
8119 | if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) | ||||
8120 | return false; | ||||
8121 | |||||
8122 | // If the context is not dominated by the value there are paths to the | ||||
8123 | // context that do not define the value. This cannot be handled by | ||||
8124 | // LazyValueInfo so we need to bail. | ||||
8125 | if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { | ||||
8126 | InformationCache &InfoCache = A.getInfoCache(); | ||||
8127 | const DominatorTree *DT = | ||||
8128 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( | ||||
8129 | *I->getFunction()); | ||||
8130 | return DT && DT->dominates(I, CtxI); | ||||
8131 | } | ||||
8132 | |||||
8133 | return true; | ||||
8134 | } | ||||
8135 | |||||
8136 | /// See AAValueConstantRange::getKnownConstantRange(..). | ||||
8137 | ConstantRange | ||||
8138 | getKnownConstantRange(Attributor &A, | ||||
8139 | const Instruction *CtxI = nullptr) const override { | ||||
8140 | if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, | ||||
8141 | /* AllowAACtxI */ false)) | ||||
8142 | return getKnown(); | ||||
8143 | |||||
8144 | ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); | ||||
8145 | ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); | ||||
8146 | return getKnown().intersectWith(SCEVR).intersectWith(LVIR); | ||||
8147 | } | ||||
8148 | |||||
8149 | /// See AAValueConstantRange::getAssumedConstantRange(..). | ||||
8150 | ConstantRange | ||||
8151 | getAssumedConstantRange(Attributor &A, | ||||
8152 | const Instruction *CtxI = nullptr) const override { | ||||
8153 | // TODO: Make SCEV use Attributor assumption. | ||||
8154 | // We may be able to bound a variable range via assumptions in | ||||
8155 | // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to | ||||
8156 | // evolve to x^2 + x, then we can say that y is in [2, 12]. | ||||
8157 | if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, | ||||
8158 | /* AllowAACtxI */ false)) | ||||
8159 | return getAssumed(); | ||||
8160 | |||||
8161 | ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); | ||||
8162 | ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); | ||||
8163 | return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); | ||||
8164 | } | ||||
8165 | |||||
8166 | /// Helper function to create MDNode for range metadata. | ||||
8167 | static MDNode * | ||||
8168 | getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, | ||||
8169 | const ConstantRange &AssumedConstantRange) { | ||||
8170 | Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( | ||||
8171 | Ty, AssumedConstantRange.getLower())), | ||||
8172 | ConstantAsMetadata::get(ConstantInt::get( | ||||
8173 | Ty, AssumedConstantRange.getUpper()))}; | ||||
8174 | return MDNode::get(Ctx, LowAndHigh); | ||||
8175 | } | ||||
8176 | |||||
8177 | /// Return true if \p Assumed is included in \p KnownRanges. | ||||
8178 | static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { | ||||
8179 | |||||
8180 | if (Assumed.isFullSet()) | ||||
8181 | return false; | ||||
8182 | |||||
8183 | if (!KnownRanges) | ||||
8184 | return true; | ||||
8185 | |||||
8186 | // If multiple ranges are annotated in IR, we give up to annotate assumed | ||||
8187 | // range for now. | ||||
8188 | |||||
8189 | // TODO: If there exists a known range which containts assumed range, we | ||||
8190 | // can say assumed range is better. | ||||
8191 | if (KnownRanges->getNumOperands() > 2) | ||||
8192 | return false; | ||||
8193 | |||||
8194 | ConstantInt *Lower = | ||||
8195 | mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); | ||||
8196 | ConstantInt *Upper = | ||||
8197 | mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); | ||||
8198 | |||||
8199 | ConstantRange Known(Lower->getValue(), Upper->getValue()); | ||||
8200 | return Known.contains(Assumed) && Known != Assumed; | ||||
8201 | } | ||||
8202 | |||||
8203 | /// Helper function to set range metadata. | ||||
8204 | static bool | ||||
8205 | setRangeMetadataIfisBetterRange(Instruction *I, | ||||
8206 | const ConstantRange &AssumedConstantRange) { | ||||
8207 | auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); | ||||
8208 | if (isBetterRange(AssumedConstantRange, OldRangeMD)) { | ||||
8209 | if (!AssumedConstantRange.isEmptySet()) { | ||||
8210 | I->setMetadata(LLVMContext::MD_range, | ||||
8211 | getMDNodeForConstantRange(I->getType(), I->getContext(), | ||||
8212 | AssumedConstantRange)); | ||||
8213 | return true; | ||||
8214 | } | ||||
8215 | } | ||||
8216 | return false; | ||||
8217 | } | ||||
8218 | |||||
8219 | /// See AbstractAttribute::manifest() | ||||
8220 | ChangeStatus manifest(Attributor &A) override { | ||||
8221 | ChangeStatus Changed = ChangeStatus::UNCHANGED; | ||||
8222 | ConstantRange AssumedConstantRange = getAssumedConstantRange(A); | ||||
8223 | assert(!AssumedConstantRange.isFullSet() && "Invalid state")(static_cast <bool> (!AssumedConstantRange.isFullSet() && "Invalid state") ? void (0) : __assert_fail ("!AssumedConstantRange.isFullSet() && \"Invalid state\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8223, __extension__ __PRETTY_FUNCTION__)); | ||||
8224 | |||||
8225 | auto &V = getAssociatedValue(); | ||||
8226 | if (!AssumedConstantRange.isEmptySet() && | ||||
8227 | !AssumedConstantRange.isSingleElement()) { | ||||
8228 | if (Instruction *I = dyn_cast<Instruction>(&V)) { | ||||
8229 | assert(I == getCtxI() && "Should not annotate an instruction which is "(static_cast <bool> (I == getCtxI() && "Should not annotate an instruction which is " "not the context instruction") ? void (0) : __assert_fail ("I == getCtxI() && \"Should not annotate an instruction which is \" \"not the context instruction\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8230, __extension__ __PRETTY_FUNCTION__)) | ||||
8230 | "not the context instruction")(static_cast <bool> (I == getCtxI() && "Should not annotate an instruction which is " "not the context instruction") ? void (0) : __assert_fail ("I == getCtxI() && \"Should not annotate an instruction which is \" \"not the context instruction\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8230, __extension__ __PRETTY_FUNCTION__)); | ||||
8231 | if (isa<CallInst>(I) || isa<LoadInst>(I)) | ||||
8232 | if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) | ||||
8233 | Changed = ChangeStatus::CHANGED; | ||||
8234 | } | ||||
8235 | } | ||||
8236 | |||||
8237 | return Changed; | ||||
8238 | } | ||||
8239 | }; | ||||
8240 | |||||
8241 | struct AAValueConstantRangeArgument final | ||||
8242 | : AAArgumentFromCallSiteArguments< | ||||
8243 | AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, | ||||
8244 | true /* BridgeCallBaseContext */> { | ||||
8245 | using Base = AAArgumentFromCallSiteArguments< | ||||
8246 | AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, | ||||
8247 | true /* BridgeCallBaseContext */>; | ||||
8248 | AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) | ||||
8249 | : Base(IRP, A) {} | ||||
8250 | |||||
8251 | /// See AbstractAttribute::initialize(..). | ||||
8252 | void initialize(Attributor &A) override { | ||||
8253 | if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { | ||||
8254 | indicatePessimisticFixpoint(); | ||||
8255 | } else { | ||||
8256 | Base::initialize(A); | ||||
8257 | } | ||||
8258 | } | ||||
8259 | |||||
8260 | /// See AbstractAttribute::trackStatistics() | ||||
8261 | void trackStatistics() const override { | ||||
8262 | STATS_DECLTRACK_ARG_ATTR(value_range){ static llvm::Statistic NumIRArguments_value_range = {"attributor" , "NumIRArguments_value_range", ("Number of " "arguments" " marked '" "value_range" "'")};; ++(NumIRArguments_value_range); } | ||||
8263 | } | ||||
8264 | }; | ||||
8265 | |||||
8266 | struct AAValueConstantRangeReturned | ||||
8267 | : AAReturnedFromReturnedValues<AAValueConstantRange, | ||||
8268 | AAValueConstantRangeImpl, | ||||
8269 | AAValueConstantRangeImpl::StateType, | ||||
8270 | /* PropogateCallBaseContext */ true> { | ||||
8271 | using Base = | ||||
8272 | AAReturnedFromReturnedValues<AAValueConstantRange, | ||||
8273 | AAValueConstantRangeImpl, | ||||
8274 | AAValueConstantRangeImpl::StateType, | ||||
8275 | /* PropogateCallBaseContext */ true>; | ||||
8276 | AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) | ||||
8277 | : Base(IRP, A) {} | ||||
8278 | |||||
8279 | /// See AbstractAttribute::initialize(...). | ||||
8280 | void initialize(Attributor &A) override {} | ||||
8281 | |||||
8282 | /// See AbstractAttribute::trackStatistics() | ||||
8283 | void trackStatistics() const override { | ||||
8284 | STATS_DECLTRACK_FNRET_ATTR(value_range){ static llvm::Statistic NumIRFunctionReturn_value_range = {"attributor" , "NumIRFunctionReturn_value_range", ("Number of " "function returns" " marked '" "value_range" "'")};; ++(NumIRFunctionReturn_value_range ); } | ||||
8285 | } | ||||
8286 | }; | ||||
8287 | |||||
8288 | struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { | ||||
8289 | AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) | ||||
8290 | : AAValueConstantRangeImpl(IRP, A) {} | ||||
8291 | |||||
8292 | /// See AbstractAttribute::initialize(...). | ||||
8293 | void initialize(Attributor &A) override { | ||||
8294 | AAValueConstantRangeImpl::initialize(A); | ||||
8295 | if (isAtFixpoint()) | ||||
8296 | return; | ||||
8297 | |||||
8298 | Value &V = getAssociatedValue(); | ||||
8299 | |||||
8300 | if (auto *C = dyn_cast<ConstantInt>(&V)) { | ||||
8301 | unionAssumed(ConstantRange(C->getValue())); | ||||
8302 | indicateOptimisticFixpoint(); | ||||
8303 | return; | ||||
8304 | } | ||||
8305 | |||||
8306 | if (isa<UndefValue>(&V)) { | ||||
8307 | // Collapse the undef state to 0. | ||||
8308 | unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); | ||||
8309 | indicateOptimisticFixpoint(); | ||||
8310 | return; | ||||
8311 | } | ||||
8312 | |||||
8313 | if (isa<CallBase>(&V)) | ||||
8314 | return; | ||||
8315 | |||||
8316 | if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) | ||||
8317 | return; | ||||
8318 | |||||
8319 | // If it is a load instruction with range metadata, use it. | ||||
8320 | if (LoadInst *LI = dyn_cast<LoadInst>(&V)) | ||||
8321 | if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { | ||||
8322 | intersectKnown(getConstantRangeFromMetadata(*RangeMD)); | ||||
8323 | return; | ||||
8324 | } | ||||
8325 | |||||
8326 | // We can work with PHI and select instruction as we traverse their operands | ||||
8327 | // during update. | ||||
8328 | if (isa<SelectInst>(V) || isa<PHINode>(V)) | ||||
8329 | return; | ||||
8330 | |||||
8331 | // Otherwise we give up. | ||||
8332 | indicatePessimisticFixpoint(); | ||||
8333 | |||||
8334 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] We give up: " << getAssociatedValue() << "\n"; } } while (false ) | ||||
8335 | << getAssociatedValue() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] We give up: " << getAssociatedValue() << "\n"; } } while (false ); | ||||
8336 | } | ||||
8337 | |||||
8338 | bool calculateBinaryOperator( | ||||
8339 | Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, | ||||
8340 | const Instruction *CtxI, | ||||
8341 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { | ||||
8342 | Value *LHS = BinOp->getOperand(0); | ||||
8343 | Value *RHS = BinOp->getOperand(1); | ||||
8344 | |||||
8345 | // Simplify the operands first. | ||||
8346 | bool UsedAssumedInformation = false; | ||||
8347 | const auto &SimplifiedLHS = A.getAssumedSimplified( | ||||
8348 | IRPosition::value(*LHS, getCallBaseContext()), *this, | ||||
8349 | UsedAssumedInformation, AA::Interprocedural); | ||||
8350 | if (!SimplifiedLHS.has_value()) | ||||
8351 | return true; | ||||
8352 | if (!SimplifiedLHS.value()) | ||||
8353 | return false; | ||||
8354 | LHS = *SimplifiedLHS; | ||||
8355 | |||||
8356 | const auto &SimplifiedRHS = A.getAssumedSimplified( | ||||
8357 | IRPosition::value(*RHS, getCallBaseContext()), *this, | ||||
8358 | UsedAssumedInformation, AA::Interprocedural); | ||||
8359 | if (!SimplifiedRHS.has_value()) | ||||
8360 | return true; | ||||
8361 | if (!SimplifiedRHS.value()) | ||||
8362 | return false; | ||||
8363 | RHS = *SimplifiedRHS; | ||||
8364 | |||||
8365 | // TODO: Allow non integers as well. | ||||
8366 | if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) | ||||
8367 | return false; | ||||
8368 | |||||
8369 | auto &LHSAA = A.getAAFor<AAValueConstantRange>( | ||||
8370 | *this, IRPosition::value(*LHS, getCallBaseContext()), | ||||
8371 | DepClassTy::REQUIRED); | ||||
8372 | QuerriedAAs.push_back(&LHSAA); | ||||
8373 | auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); | ||||
8374 | |||||
8375 | auto &RHSAA = A.getAAFor<AAValueConstantRange>( | ||||
8376 | *this, IRPosition::value(*RHS, getCallBaseContext()), | ||||
8377 | DepClassTy::REQUIRED); | ||||
8378 | QuerriedAAs.push_back(&RHSAA); | ||||
8379 | auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); | ||||
8380 | |||||
8381 | auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); | ||||
8382 | |||||
8383 | T.unionAssumed(AssumedRange); | ||||
8384 | |||||
8385 | // TODO: Track a known state too. | ||||
8386 | |||||
8387 | return T.isValidState(); | ||||
8388 | } | ||||
8389 | |||||
8390 | bool calculateCastInst( | ||||
8391 | Attributor &A, CastInst *CastI, IntegerRangeState &T, | ||||
8392 | const Instruction *CtxI, | ||||
8393 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { | ||||
8394 | assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!")(static_cast <bool> (CastI->getNumOperands() == 1 && "Expected cast to be unary!") ? void (0) : __assert_fail ("CastI->getNumOperands() == 1 && \"Expected cast to be unary!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8394, __extension__ __PRETTY_FUNCTION__)); | ||||
8395 | // TODO: Allow non integers as well. | ||||
8396 | Value *OpV = CastI->getOperand(0); | ||||
8397 | |||||
8398 | // Simplify the operand first. | ||||
8399 | bool UsedAssumedInformation = false; | ||||
8400 | const auto &SimplifiedOpV = A.getAssumedSimplified( | ||||
8401 | IRPosition::value(*OpV, getCallBaseContext()), *this, | ||||
8402 | UsedAssumedInformation, AA::Interprocedural); | ||||
8403 | if (!SimplifiedOpV.has_value()) | ||||
8404 | return true; | ||||
8405 | if (!SimplifiedOpV.value()) | ||||
8406 | return false; | ||||
8407 | OpV = *SimplifiedOpV; | ||||
8408 | |||||
8409 | if (!OpV->getType()->isIntegerTy()) | ||||
8410 | return false; | ||||
8411 | |||||
8412 | auto &OpAA = A.getAAFor<AAValueConstantRange>( | ||||
8413 | *this, IRPosition::value(*OpV, getCallBaseContext()), | ||||
8414 | DepClassTy::REQUIRED); | ||||
8415 | QuerriedAAs.push_back(&OpAA); | ||||
8416 | T.unionAssumed( | ||||
8417 | OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); | ||||
8418 | return T.isValidState(); | ||||
8419 | } | ||||
8420 | |||||
8421 | bool | ||||
8422 | calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, | ||||
8423 | const Instruction *CtxI, | ||||
8424 | SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { | ||||
8425 | Value *LHS = CmpI->getOperand(0); | ||||
8426 | Value *RHS = CmpI->getOperand(1); | ||||
8427 | |||||
8428 | // Simplify the operands first. | ||||
8429 | bool UsedAssumedInformation = false; | ||||
8430 | const auto &SimplifiedLHS = A.getAssumedSimplified( | ||||
8431 | IRPosition::value(*LHS, getCallBaseContext()), *this, | ||||
8432 | UsedAssumedInformation, AA::Interprocedural); | ||||
8433 | if (!SimplifiedLHS.has_value()) | ||||
8434 | return true; | ||||
8435 | if (!SimplifiedLHS.value()) | ||||
8436 | return false; | ||||
8437 | LHS = *SimplifiedLHS; | ||||
8438 | |||||
8439 | const auto &SimplifiedRHS = A.getAssumedSimplified( | ||||
8440 | IRPosition::value(*RHS, getCallBaseContext()), *this, | ||||
8441 | UsedAssumedInformation, AA::Interprocedural); | ||||
8442 | if (!SimplifiedRHS.has_value()) | ||||
8443 | return true; | ||||
8444 | if (!SimplifiedRHS.value()) | ||||
8445 | return false; | ||||
8446 | RHS = *SimplifiedRHS; | ||||
8447 | |||||
8448 | // TODO: Allow non integers as well. | ||||
8449 | if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) | ||||
8450 | return false; | ||||
8451 | |||||
8452 | auto &LHSAA = A.getAAFor<AAValueConstantRange>( | ||||
8453 | *this, IRPosition::value(*LHS, getCallBaseContext()), | ||||
8454 | DepClassTy::REQUIRED); | ||||
8455 | QuerriedAAs.push_back(&LHSAA); | ||||
8456 | auto &RHSAA = A.getAAFor<AAValueConstantRange>( | ||||
8457 | *this, IRPosition::value(*RHS, getCallBaseContext()), | ||||
8458 | DepClassTy::REQUIRED); | ||||
8459 | QuerriedAAs.push_back(&RHSAA); | ||||
8460 | auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); | ||||
8461 | auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); | ||||
8462 | |||||
8463 | // If one of them is empty set, we can't decide. | ||||
8464 | if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) | ||||
8465 | return true; | ||||
8466 | |||||
8467 | bool MustTrue = false, MustFalse = false; | ||||
8468 | |||||
8469 | auto AllowedRegion = | ||||
8470 | ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); | ||||
8471 | |||||
8472 | if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) | ||||
8473 | MustFalse = true; | ||||
8474 | |||||
8475 | if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) | ||||
8476 | MustTrue = true; | ||||
8477 | |||||
8478 | assert((!MustTrue || !MustFalse) &&(static_cast <bool> ((!MustTrue || !MustFalse) && "Either MustTrue or MustFalse should be false!") ? void (0) : __assert_fail ("(!MustTrue || !MustFalse) && \"Either MustTrue or MustFalse should be false!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8479, __extension__ __PRETTY_FUNCTION__)) | ||||
8479 | "Either MustTrue or MustFalse should be false!")(static_cast <bool> ((!MustTrue || !MustFalse) && "Either MustTrue or MustFalse should be false!") ? void (0) : __assert_fail ("(!MustTrue || !MustFalse) && \"Either MustTrue or MustFalse should be false!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8479, __extension__ __PRETTY_FUNCTION__)); | ||||
8480 | |||||
8481 | if (MustTrue) | ||||
8482 | T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); | ||||
8483 | else if (MustFalse) | ||||
8484 | T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); | ||||
8485 | else | ||||
8486 | T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); | ||||
8487 | |||||
8488 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAAdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA << " " << RHSAA << "\n"; } } while (false) | ||||
8489 | << " " << RHSAA << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA << " " << RHSAA << "\n"; } } while (false); | ||||
8490 | |||||
8491 | // TODO: Track a known state too. | ||||
8492 | return T.isValidState(); | ||||
8493 | } | ||||
8494 | |||||
8495 | /// See AbstractAttribute::updateImpl(...). | ||||
8496 | ChangeStatus updateImpl(Attributor &A) override { | ||||
8497 | |||||
8498 | IntegerRangeState T(getBitWidth()); | ||||
8499 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { | ||||
8500 | Instruction *I = dyn_cast<Instruction>(&V); | ||||
8501 | if (!I || isa<CallBase>(I)) { | ||||
8502 | |||||
8503 | // Simplify the operand first. | ||||
8504 | bool UsedAssumedInformation = false; | ||||
8505 | const auto &SimplifiedOpV = A.getAssumedSimplified( | ||||
8506 | IRPosition::value(V, getCallBaseContext()), *this, | ||||
8507 | UsedAssumedInformation, AA::Interprocedural); | ||||
8508 | if (!SimplifiedOpV.has_value()) | ||||
8509 | return true; | ||||
8510 | if (!SimplifiedOpV.value()) | ||||
8511 | return false; | ||||
8512 | Value *VPtr = *SimplifiedOpV; | ||||
8513 | |||||
8514 | // If the value is not instruction, we query AA to Attributor. | ||||
8515 | const auto &AA = A.getAAFor<AAValueConstantRange>( | ||||
8516 | *this, IRPosition::value(*VPtr, getCallBaseContext()), | ||||
8517 | DepClassTy::REQUIRED); | ||||
8518 | |||||
8519 | // Clamp operator is not used to utilize a program point CtxI. | ||||
8520 | T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); | ||||
8521 | |||||
8522 | return T.isValidState(); | ||||
8523 | } | ||||
8524 | |||||
8525 | SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; | ||||
8526 | if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { | ||||
8527 | if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) | ||||
8528 | return false; | ||||
8529 | } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { | ||||
8530 | if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) | ||||
8531 | return false; | ||||
8532 | } else if (auto *CastI = dyn_cast<CastInst>(I)) { | ||||
8533 | if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) | ||||
8534 | return false; | ||||
8535 | } else { | ||||
8536 | // Give up with other instructions. | ||||
8537 | // TODO: Add other instructions | ||||
8538 | |||||
8539 | T.indicatePessimisticFixpoint(); | ||||
8540 | return false; | ||||
8541 | } | ||||
8542 | |||||
8543 | // Catch circular reasoning in a pessimistic way for now. | ||||
8544 | // TODO: Check how the range evolves and if we stripped anything, see also | ||||
8545 | // AADereferenceable or AAAlign for similar situations. | ||||
8546 | for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { | ||||
8547 | if (QueriedAA != this) | ||||
8548 | continue; | ||||
8549 | // If we are in a stady state we do not need to worry. | ||||
8550 | if (T.getAssumed() == getState().getAssumed()) | ||||
8551 | continue; | ||||
8552 | T.indicatePessimisticFixpoint(); | ||||
8553 | } | ||||
8554 | |||||
8555 | return T.isValidState(); | ||||
8556 | }; | ||||
8557 | |||||
8558 | if (!VisitValueCB(getAssociatedValue(), getCtxI())) | ||||
8559 | return indicatePessimisticFixpoint(); | ||||
8560 | |||||
8561 | // Ensure that long def-use chains can't cause circular reasoning either by | ||||
8562 | // introducing a cutoff below. | ||||
8563 | if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) | ||||
8564 | return ChangeStatus::UNCHANGED; | ||||
8565 | if (++NumChanges > MaxNumChanges) { | ||||
8566 | LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChangesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] performed " << NumChanges << " but only " << MaxNumChanges << " are allowed to avoid cyclic reasoning."; } } while (false) | ||||
8567 | << " but only " << MaxNumChangesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] performed " << NumChanges << " but only " << MaxNumChanges << " are allowed to avoid cyclic reasoning."; } } while (false) | ||||
8568 | << " are allowed to avoid cyclic reasoning.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAValueConstantRange] performed " << NumChanges << " but only " << MaxNumChanges << " are allowed to avoid cyclic reasoning."; } } while (false); | ||||
8569 | return indicatePessimisticFixpoint(); | ||||
8570 | } | ||||
8571 | return ChangeStatus::CHANGED; | ||||
8572 | } | ||||
8573 | |||||
8574 | /// See AbstractAttribute::trackStatistics() | ||||
8575 | void trackStatistics() const override { | ||||
8576 | STATS_DECLTRACK_FLOATING_ATTR(value_range){ static llvm::Statistic NumIRFloating_value_range = {"attributor" , "NumIRFloating_value_range", ("Number of floating values known to be '" "value_range" "'")};; ++(NumIRFloating_value_range); } | ||||
8577 | } | ||||
8578 | |||||
8579 | /// Tracker to bail after too many widening steps of the constant range. | ||||
8580 | int NumChanges = 0; | ||||
8581 | |||||
8582 | /// Upper bound for the number of allowed changes (=widening steps) for the | ||||
8583 | /// constant range before we give up. | ||||
8584 | static constexpr int MaxNumChanges = 5; | ||||
8585 | }; | ||||
8586 | |||||
8587 | struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { | ||||
8588 | AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) | ||||
8589 | : AAValueConstantRangeImpl(IRP, A) {} | ||||
8590 | |||||
8591 | /// See AbstractAttribute::initialize(...). | ||||
8592 | ChangeStatus updateImpl(Attributor &A) override { | ||||
8593 | llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "::llvm::llvm_unreachable_internal("AAValueConstantRange(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 8594) | ||||
8594 | "not be called")::llvm::llvm_unreachable_internal("AAValueConstantRange(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 8594); | ||||
8595 | } | ||||
8596 | |||||
8597 | /// See AbstractAttribute::trackStatistics() | ||||
8598 | void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range){ static llvm::Statistic NumIRFunction_value_range = {"attributor" , "NumIRFunction_value_range", ("Number of " "functions" " marked '" "value_range" "'")};; ++(NumIRFunction_value_range); } } | ||||
8599 | }; | ||||
8600 | |||||
8601 | struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { | ||||
8602 | AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) | ||||
8603 | : AAValueConstantRangeFunction(IRP, A) {} | ||||
8604 | |||||
8605 | /// See AbstractAttribute::trackStatistics() | ||||
8606 | void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range){ static llvm::Statistic NumIRCS_value_range = {"attributor", "NumIRCS_value_range", ("Number of " "call site" " marked '" "value_range" "'")};; ++(NumIRCS_value_range); } } | ||||
8607 | }; | ||||
8608 | |||||
8609 | struct AAValueConstantRangeCallSiteReturned | ||||
8610 | : AACallSiteReturnedFromReturned<AAValueConstantRange, | ||||
8611 | AAValueConstantRangeImpl, | ||||
8612 | AAValueConstantRangeImpl::StateType, | ||||
8613 | /* IntroduceCallBaseContext */ true> { | ||||
8614 | AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
8615 | : AACallSiteReturnedFromReturned<AAValueConstantRange, | ||||
8616 | AAValueConstantRangeImpl, | ||||
8617 | AAValueConstantRangeImpl::StateType, | ||||
8618 | /* IntroduceCallBaseContext */ true>(IRP, | ||||
8619 | A) { | ||||
8620 | } | ||||
8621 | |||||
8622 | /// See AbstractAttribute::initialize(...). | ||||
8623 | void initialize(Attributor &A) override { | ||||
8624 | // If it is a load instruction with range metadata, use the metadata. | ||||
8625 | if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) | ||||
8626 | if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) | ||||
8627 | intersectKnown(getConstantRangeFromMetadata(*RangeMD)); | ||||
8628 | |||||
8629 | AAValueConstantRangeImpl::initialize(A); | ||||
8630 | } | ||||
8631 | |||||
8632 | /// See AbstractAttribute::trackStatistics() | ||||
8633 | void trackStatistics() const override { | ||||
8634 | STATS_DECLTRACK_CSRET_ATTR(value_range){ static llvm::Statistic NumIRCSReturn_value_range = {"attributor" , "NumIRCSReturn_value_range", ("Number of " "call site returns" " marked '" "value_range" "'")};; ++(NumIRCSReturn_value_range ); } | ||||
8635 | } | ||||
8636 | }; | ||||
8637 | struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { | ||||
8638 | AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
8639 | : AAValueConstantRangeFloating(IRP, A) {} | ||||
8640 | |||||
8641 | /// See AbstractAttribute::manifest() | ||||
8642 | ChangeStatus manifest(Attributor &A) override { | ||||
8643 | return ChangeStatus::UNCHANGED; | ||||
8644 | } | ||||
8645 | |||||
8646 | /// See AbstractAttribute::trackStatistics() | ||||
8647 | void trackStatistics() const override { | ||||
8648 | STATS_DECLTRACK_CSARG_ATTR(value_range){ static llvm::Statistic NumIRCSArguments_value_range = {"attributor" , "NumIRCSArguments_value_range", ("Number of " "call site arguments" " marked '" "value_range" "'")};; ++(NumIRCSArguments_value_range ); } | ||||
8649 | } | ||||
8650 | }; | ||||
8651 | } // namespace | ||||
8652 | |||||
8653 | /// ------------------ Potential Values Attribute ------------------------- | ||||
8654 | |||||
8655 | namespace { | ||||
8656 | struct AAPotentialConstantValuesImpl : AAPotentialConstantValues { | ||||
8657 | using StateType = PotentialConstantIntValuesState; | ||||
8658 | |||||
8659 | AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A) | ||||
8660 | : AAPotentialConstantValues(IRP, A) {} | ||||
8661 | |||||
8662 | /// See AbstractAttribute::initialize(..). | ||||
8663 | void initialize(Attributor &A) override { | ||||
8664 | if (A.hasSimplificationCallback(getIRPosition())) | ||||
8665 | indicatePessimisticFixpoint(); | ||||
8666 | else | ||||
8667 | AAPotentialConstantValues::initialize(A); | ||||
8668 | } | ||||
8669 | |||||
8670 | bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S, | ||||
8671 | bool &ContainsUndef) { | ||||
8672 | SmallVector<AA::ValueAndContext> Values; | ||||
8673 | bool UsedAssumedInformation = false; | ||||
8674 | if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural, | ||||
8675 | UsedAssumedInformation)) { | ||||
8676 | if (!IRP.getAssociatedType()->isIntegerTy()) | ||||
8677 | return false; | ||||
8678 | auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>( | ||||
8679 | *this, IRP, DepClassTy::REQUIRED); | ||||
8680 | if (!PotentialValuesAA.getState().isValidState()) | ||||
8681 | return false; | ||||
8682 | ContainsUndef = PotentialValuesAA.getState().undefIsContained(); | ||||
8683 | S = PotentialValuesAA.getState().getAssumedSet(); | ||||
8684 | return true; | ||||
8685 | } | ||||
8686 | |||||
8687 | for (auto &It : Values) { | ||||
8688 | if (isa<UndefValue>(It.getValue())) | ||||
8689 | continue; | ||||
8690 | auto *CI = dyn_cast<ConstantInt>(It.getValue()); | ||||
8691 | if (!CI) | ||||
8692 | return false; | ||||
8693 | S.insert(CI->getValue()); | ||||
8694 | } | ||||
8695 | ContainsUndef = S.empty(); | ||||
8696 | |||||
8697 | return true; | ||||
8698 | } | ||||
8699 | |||||
8700 | /// See AbstractAttribute::getAsStr(). | ||||
8701 | const std::string getAsStr() const override { | ||||
8702 | std::string Str; | ||||
8703 | llvm::raw_string_ostream OS(Str); | ||||
8704 | OS << getState(); | ||||
8705 | return OS.str(); | ||||
8706 | } | ||||
8707 | |||||
8708 | /// See AbstractAttribute::updateImpl(...). | ||||
8709 | ChangeStatus updateImpl(Attributor &A) override { | ||||
8710 | return indicatePessimisticFixpoint(); | ||||
8711 | } | ||||
8712 | }; | ||||
8713 | |||||
8714 | struct AAPotentialConstantValuesArgument final | ||||
8715 | : AAArgumentFromCallSiteArguments<AAPotentialConstantValues, | ||||
8716 | AAPotentialConstantValuesImpl, | ||||
8717 | PotentialConstantIntValuesState> { | ||||
8718 | using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues, | ||||
8719 | AAPotentialConstantValuesImpl, | ||||
8720 | PotentialConstantIntValuesState>; | ||||
8721 | AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A) | ||||
8722 | : Base(IRP, A) {} | ||||
8723 | |||||
8724 | /// See AbstractAttribute::initialize(..). | ||||
8725 | void initialize(Attributor &A) override { | ||||
8726 | if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { | ||||
8727 | indicatePessimisticFixpoint(); | ||||
8728 | } else { | ||||
8729 | Base::initialize(A); | ||||
8730 | } | ||||
8731 | } | ||||
8732 | |||||
8733 | /// See AbstractAttribute::trackStatistics() | ||||
8734 | void trackStatistics() const override { | ||||
8735 | STATS_DECLTRACK_ARG_ATTR(potential_values){ static llvm::Statistic NumIRArguments_potential_values = {"attributor" , "NumIRArguments_potential_values", ("Number of " "arguments" " marked '" "potential_values" "'")};; ++(NumIRArguments_potential_values ); } | ||||
8736 | } | ||||
8737 | }; | ||||
8738 | |||||
8739 | struct AAPotentialConstantValuesReturned | ||||
8740 | : AAReturnedFromReturnedValues<AAPotentialConstantValues, | ||||
8741 | AAPotentialConstantValuesImpl> { | ||||
8742 | using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues, | ||||
8743 | AAPotentialConstantValuesImpl>; | ||||
8744 | AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A) | ||||
8745 | : Base(IRP, A) {} | ||||
8746 | |||||
8747 | /// See AbstractAttribute::trackStatistics() | ||||
8748 | void trackStatistics() const override { | ||||
8749 | STATS_DECLTRACK_FNRET_ATTR(potential_values){ static llvm::Statistic NumIRFunctionReturn_potential_values = {"attributor", "NumIRFunctionReturn_potential_values", ("Number of " "function returns" " marked '" "potential_values" "'")};; ++ (NumIRFunctionReturn_potential_values); } | ||||
8750 | } | ||||
8751 | }; | ||||
8752 | |||||
8753 | struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { | ||||
8754 | AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A) | ||||
8755 | : AAPotentialConstantValuesImpl(IRP, A) {} | ||||
8756 | |||||
8757 | /// See AbstractAttribute::initialize(..). | ||||
8758 | void initialize(Attributor &A) override { | ||||
8759 | AAPotentialConstantValuesImpl::initialize(A); | ||||
8760 | if (isAtFixpoint()) | ||||
8761 | return; | ||||
8762 | |||||
8763 | Value &V = getAssociatedValue(); | ||||
8764 | |||||
8765 | if (auto *C = dyn_cast<ConstantInt>(&V)) { | ||||
8766 | unionAssumed(C->getValue()); | ||||
8767 | indicateOptimisticFixpoint(); | ||||
8768 | return; | ||||
8769 | } | ||||
8770 | |||||
8771 | if (isa<UndefValue>(&V)) { | ||||
8772 | unionAssumedWithUndef(); | ||||
8773 | indicateOptimisticFixpoint(); | ||||
8774 | return; | ||||
8775 | } | ||||
8776 | |||||
8777 | if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) | ||||
8778 | return; | ||||
8779 | |||||
8780 | if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) | ||||
8781 | return; | ||||
8782 | |||||
8783 | indicatePessimisticFixpoint(); | ||||
8784 | |||||
8785 | LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialConstantValues] We give up: " << getAssociatedValue() << "\n"; } } while (false ) | ||||
8786 | << getAssociatedValue() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialConstantValues] We give up: " << getAssociatedValue() << "\n"; } } while (false ); | ||||
8787 | } | ||||
8788 | |||||
8789 | static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, | ||||
8790 | const APInt &RHS) { | ||||
8791 | return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); | ||||
8792 | } | ||||
8793 | |||||
8794 | static APInt calculateCastInst(const CastInst *CI, const APInt &Src, | ||||
8795 | uint32_t ResultBitWidth) { | ||||
8796 | Instruction::CastOps CastOp = CI->getOpcode(); | ||||
8797 | switch (CastOp) { | ||||
8798 | default: | ||||
8799 | llvm_unreachable("unsupported or not integer cast")::llvm::llvm_unreachable_internal("unsupported or not integer cast" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8799); | ||||
8800 | case Instruction::Trunc: | ||||
8801 | return Src.trunc(ResultBitWidth); | ||||
8802 | case Instruction::SExt: | ||||
8803 | return Src.sext(ResultBitWidth); | ||||
8804 | case Instruction::ZExt: | ||||
8805 | return Src.zext(ResultBitWidth); | ||||
8806 | case Instruction::BitCast: | ||||
8807 | return Src; | ||||
8808 | } | ||||
8809 | } | ||||
8810 | |||||
8811 | static APInt calculateBinaryOperator(const BinaryOperator *BinOp, | ||||
8812 | const APInt &LHS, const APInt &RHS, | ||||
8813 | bool &SkipOperation, bool &Unsupported) { | ||||
8814 | Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); | ||||
8815 | // Unsupported is set to true when the binary operator is not supported. | ||||
8816 | // SkipOperation is set to true when UB occur with the given operand pair | ||||
8817 | // (LHS, RHS). | ||||
8818 | // TODO: we should look at nsw and nuw keywords to handle operations | ||||
8819 | // that create poison or undef value. | ||||
8820 | switch (BinOpcode) { | ||||
8821 | default: | ||||
8822 | Unsupported = true; | ||||
8823 | return LHS; | ||||
8824 | case Instruction::Add: | ||||
8825 | return LHS + RHS; | ||||
8826 | case Instruction::Sub: | ||||
8827 | return LHS - RHS; | ||||
8828 | case Instruction::Mul: | ||||
8829 | return LHS * RHS; | ||||
8830 | case Instruction::UDiv: | ||||
8831 | if (RHS.isZero()) { | ||||
8832 | SkipOperation = true; | ||||
8833 | return LHS; | ||||
8834 | } | ||||
8835 | return LHS.udiv(RHS); | ||||
8836 | case Instruction::SDiv: | ||||
8837 | if (RHS.isZero()) { | ||||
8838 | SkipOperation = true; | ||||
8839 | return LHS; | ||||
8840 | } | ||||
8841 | return LHS.sdiv(RHS); | ||||
8842 | case Instruction::URem: | ||||
8843 | if (RHS.isZero()) { | ||||
8844 | SkipOperation = true; | ||||
8845 | return LHS; | ||||
8846 | } | ||||
8847 | return LHS.urem(RHS); | ||||
8848 | case Instruction::SRem: | ||||
8849 | if (RHS.isZero()) { | ||||
8850 | SkipOperation = true; | ||||
8851 | return LHS; | ||||
8852 | } | ||||
8853 | return LHS.srem(RHS); | ||||
8854 | case Instruction::Shl: | ||||
8855 | return LHS.shl(RHS); | ||||
8856 | case Instruction::LShr: | ||||
8857 | return LHS.lshr(RHS); | ||||
8858 | case Instruction::AShr: | ||||
8859 | return LHS.ashr(RHS); | ||||
8860 | case Instruction::And: | ||||
8861 | return LHS & RHS; | ||||
8862 | case Instruction::Or: | ||||
8863 | return LHS | RHS; | ||||
8864 | case Instruction::Xor: | ||||
8865 | return LHS ^ RHS; | ||||
8866 | } | ||||
8867 | } | ||||
8868 | |||||
8869 | bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, | ||||
8870 | const APInt &LHS, const APInt &RHS) { | ||||
8871 | bool SkipOperation = false; | ||||
8872 | bool Unsupported = false; | ||||
8873 | APInt Result = | ||||
8874 | calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); | ||||
8875 | if (Unsupported) | ||||
8876 | return false; | ||||
8877 | // If SkipOperation is true, we can ignore this operand pair (L, R). | ||||
8878 | if (!SkipOperation) | ||||
8879 | unionAssumed(Result); | ||||
8880 | return isValidState(); | ||||
8881 | } | ||||
8882 | |||||
8883 | ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { | ||||
8884 | auto AssumedBefore = getAssumed(); | ||||
8885 | Value *LHS = ICI->getOperand(0); | ||||
8886 | Value *RHS = ICI->getOperand(1); | ||||
8887 | |||||
8888 | bool LHSContainsUndef = false, RHSContainsUndef = false; | ||||
8889 | SetTy LHSAAPVS, RHSAAPVS; | ||||
8890 | if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS, | ||||
8891 | LHSContainsUndef) || | ||||
8892 | !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS, | ||||
8893 | RHSContainsUndef)) | ||||
8894 | return indicatePessimisticFixpoint(); | ||||
8895 | |||||
8896 | // TODO: make use of undef flag to limit potential values aggressively. | ||||
8897 | bool MaybeTrue = false, MaybeFalse = false; | ||||
8898 | const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); | ||||
8899 | if (LHSContainsUndef && RHSContainsUndef) { | ||||
8900 | // The result of any comparison between undefs can be soundly replaced | ||||
8901 | // with undef. | ||||
8902 | unionAssumedWithUndef(); | ||||
8903 | } else if (LHSContainsUndef) { | ||||
8904 | for (const APInt &R : RHSAAPVS) { | ||||
8905 | bool CmpResult = calculateICmpInst(ICI, Zero, R); | ||||
8906 | MaybeTrue |= CmpResult; | ||||
8907 | MaybeFalse |= !CmpResult; | ||||
8908 | if (MaybeTrue & MaybeFalse) | ||||
8909 | return indicatePessimisticFixpoint(); | ||||
8910 | } | ||||
8911 | } else if (RHSContainsUndef) { | ||||
8912 | for (const APInt &L : LHSAAPVS) { | ||||
8913 | bool CmpResult = calculateICmpInst(ICI, L, Zero); | ||||
8914 | MaybeTrue |= CmpResult; | ||||
8915 | MaybeFalse |= !CmpResult; | ||||
8916 | if (MaybeTrue & MaybeFalse) | ||||
8917 | return indicatePessimisticFixpoint(); | ||||
8918 | } | ||||
8919 | } else { | ||||
8920 | for (const APInt &L : LHSAAPVS) { | ||||
8921 | for (const APInt &R : RHSAAPVS) { | ||||
8922 | bool CmpResult = calculateICmpInst(ICI, L, R); | ||||
8923 | MaybeTrue |= CmpResult; | ||||
8924 | MaybeFalse |= !CmpResult; | ||||
8925 | if (MaybeTrue & MaybeFalse) | ||||
8926 | return indicatePessimisticFixpoint(); | ||||
8927 | } | ||||
8928 | } | ||||
8929 | } | ||||
8930 | if (MaybeTrue) | ||||
8931 | unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); | ||||
8932 | if (MaybeFalse) | ||||
8933 | unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); | ||||
8934 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED | ||||
8935 | : ChangeStatus::CHANGED; | ||||
8936 | } | ||||
8937 | |||||
8938 | ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { | ||||
8939 | auto AssumedBefore = getAssumed(); | ||||
8940 | Value *LHS = SI->getTrueValue(); | ||||
8941 | Value *RHS = SI->getFalseValue(); | ||||
8942 | |||||
8943 | bool UsedAssumedInformation = false; | ||||
8944 | Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, | ||||
8945 | UsedAssumedInformation); | ||||
8946 | |||||
8947 | // Check if we only need one operand. | ||||
8948 | bool OnlyLeft = false, OnlyRight = false; | ||||
8949 | if (C && *C && (*C)->isOneValue()) | ||||
8950 | OnlyLeft = true; | ||||
8951 | else if (C && *C && (*C)->isZeroValue()) | ||||
8952 | OnlyRight = true; | ||||
8953 | |||||
8954 | bool LHSContainsUndef = false, RHSContainsUndef = false; | ||||
8955 | SetTy LHSAAPVS, RHSAAPVS; | ||||
8956 | if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS), | ||||
8957 | LHSAAPVS, LHSContainsUndef)) | ||||
8958 | return indicatePessimisticFixpoint(); | ||||
8959 | |||||
8960 | if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS), | ||||
8961 | RHSAAPVS, RHSContainsUndef)) | ||||
8962 | return indicatePessimisticFixpoint(); | ||||
8963 | |||||
8964 | if (OnlyLeft || OnlyRight) { | ||||
8965 | // select (true/false), lhs, rhs | ||||
8966 | auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS; | ||||
8967 | auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef; | ||||
8968 | |||||
8969 | if (Undef) | ||||
8970 | unionAssumedWithUndef(); | ||||
8971 | else { | ||||
8972 | for (const auto &It : *OpAA) | ||||
8973 | unionAssumed(It); | ||||
8974 | } | ||||
8975 | |||||
8976 | } else if (LHSContainsUndef && RHSContainsUndef) { | ||||
8977 | // select i1 *, undef , undef => undef | ||||
8978 | unionAssumedWithUndef(); | ||||
8979 | } else { | ||||
8980 | for (const auto &It : LHSAAPVS) | ||||
8981 | unionAssumed(It); | ||||
8982 | for (const auto &It : RHSAAPVS) | ||||
8983 | unionAssumed(It); | ||||
8984 | } | ||||
8985 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED | ||||
8986 | : ChangeStatus::CHANGED; | ||||
8987 | } | ||||
8988 | |||||
8989 | ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { | ||||
8990 | auto AssumedBefore = getAssumed(); | ||||
8991 | if (!CI->isIntegerCast()) | ||||
8992 | return indicatePessimisticFixpoint(); | ||||
8993 | assert(CI->getNumOperands() == 1 && "Expected cast to be unary!")(static_cast <bool> (CI->getNumOperands() == 1 && "Expected cast to be unary!") ? void (0) : __assert_fail ("CI->getNumOperands() == 1 && \"Expected cast to be unary!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 8993, __extension__ __PRETTY_FUNCTION__)); | ||||
8994 | uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); | ||||
8995 | Value *Src = CI->getOperand(0); | ||||
8996 | |||||
8997 | bool SrcContainsUndef = false; | ||||
8998 | SetTy SrcPVS; | ||||
8999 | if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS, | ||||
9000 | SrcContainsUndef)) | ||||
9001 | return indicatePessimisticFixpoint(); | ||||
9002 | |||||
9003 | if (SrcContainsUndef) | ||||
9004 | unionAssumedWithUndef(); | ||||
9005 | else { | ||||
9006 | for (const APInt &S : SrcPVS) { | ||||
9007 | APInt T = calculateCastInst(CI, S, ResultBitWidth); | ||||
9008 | unionAssumed(T); | ||||
9009 | } | ||||
9010 | } | ||||
9011 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED | ||||
9012 | : ChangeStatus::CHANGED; | ||||
9013 | } | ||||
9014 | |||||
9015 | ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { | ||||
9016 | auto AssumedBefore = getAssumed(); | ||||
9017 | Value *LHS = BinOp->getOperand(0); | ||||
9018 | Value *RHS = BinOp->getOperand(1); | ||||
9019 | |||||
9020 | bool LHSContainsUndef = false, RHSContainsUndef = false; | ||||
9021 | SetTy LHSAAPVS, RHSAAPVS; | ||||
9022 | if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS, | ||||
9023 | LHSContainsUndef) || | ||||
9024 | !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS, | ||||
9025 | RHSContainsUndef)) | ||||
9026 | return indicatePessimisticFixpoint(); | ||||
9027 | |||||
9028 | const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); | ||||
9029 | |||||
9030 | // TODO: make use of undef flag to limit potential values aggressively. | ||||
9031 | if (LHSContainsUndef && RHSContainsUndef) { | ||||
9032 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) | ||||
9033 | return indicatePessimisticFixpoint(); | ||||
9034 | } else if (LHSContainsUndef) { | ||||
9035 | for (const APInt &R : RHSAAPVS) { | ||||
9036 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) | ||||
9037 | return indicatePessimisticFixpoint(); | ||||
9038 | } | ||||
9039 | } else if (RHSContainsUndef) { | ||||
9040 | for (const APInt &L : LHSAAPVS) { | ||||
9041 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) | ||||
9042 | return indicatePessimisticFixpoint(); | ||||
9043 | } | ||||
9044 | } else { | ||||
9045 | for (const APInt &L : LHSAAPVS) { | ||||
9046 | for (const APInt &R : RHSAAPVS) { | ||||
9047 | if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) | ||||
9048 | return indicatePessimisticFixpoint(); | ||||
9049 | } | ||||
9050 | } | ||||
9051 | } | ||||
9052 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED | ||||
9053 | : ChangeStatus::CHANGED; | ||||
9054 | } | ||||
9055 | |||||
9056 | /// See AbstractAttribute::updateImpl(...). | ||||
9057 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9058 | Value &V = getAssociatedValue(); | ||||
9059 | Instruction *I = dyn_cast<Instruction>(&V); | ||||
9060 | |||||
9061 | if (auto *ICI = dyn_cast<ICmpInst>(I)) | ||||
9062 | return updateWithICmpInst(A, ICI); | ||||
9063 | |||||
9064 | if (auto *SI = dyn_cast<SelectInst>(I)) | ||||
9065 | return updateWithSelectInst(A, SI); | ||||
9066 | |||||
9067 | if (auto *CI = dyn_cast<CastInst>(I)) | ||||
9068 | return updateWithCastInst(A, CI); | ||||
9069 | |||||
9070 | if (auto *BinOp = dyn_cast<BinaryOperator>(I)) | ||||
9071 | return updateWithBinaryOperator(A, BinOp); | ||||
9072 | |||||
9073 | return indicatePessimisticFixpoint(); | ||||
9074 | } | ||||
9075 | |||||
9076 | /// See AbstractAttribute::trackStatistics() | ||||
9077 | void trackStatistics() const override { | ||||
9078 | STATS_DECLTRACK_FLOATING_ATTR(potential_values){ static llvm::Statistic NumIRFloating_potential_values = {"attributor" , "NumIRFloating_potential_values", ("Number of floating values known to be '" "potential_values" "'")};; ++(NumIRFloating_potential_values ); } | ||||
9079 | } | ||||
9080 | }; | ||||
9081 | |||||
9082 | struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl { | ||||
9083 | AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A) | ||||
9084 | : AAPotentialConstantValuesImpl(IRP, A) {} | ||||
9085 | |||||
9086 | /// See AbstractAttribute::initialize(...). | ||||
9087 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9088 | llvm_unreachable(::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 9090) | ||||
9089 | "AAPotentialConstantValues(Function|CallSite)::updateImpl will "::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 9090) | ||||
9090 | "not be called")::llvm::llvm_unreachable_internal("AAPotentialConstantValues(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 9090); | ||||
9091 | } | ||||
9092 | |||||
9093 | /// See AbstractAttribute::trackStatistics() | ||||
9094 | void trackStatistics() const override { | ||||
9095 | STATS_DECLTRACK_FN_ATTR(potential_values){ static llvm::Statistic NumIRFunction_potential_values = {"attributor" , "NumIRFunction_potential_values", ("Number of " "functions" " marked '" "potential_values" "'")};; ++(NumIRFunction_potential_values ); } | ||||
9096 | } | ||||
9097 | }; | ||||
9098 | |||||
9099 | struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction { | ||||
9100 | AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A) | ||||
9101 | : AAPotentialConstantValuesFunction(IRP, A) {} | ||||
9102 | |||||
9103 | /// See AbstractAttribute::trackStatistics() | ||||
9104 | void trackStatistics() const override { | ||||
9105 | STATS_DECLTRACK_CS_ATTR(potential_values){ static llvm::Statistic NumIRCS_potential_values = {"attributor" , "NumIRCS_potential_values", ("Number of " "call site" " marked '" "potential_values" "'")};; ++(NumIRCS_potential_values); } | ||||
9106 | } | ||||
9107 | }; | ||||
9108 | |||||
9109 | struct AAPotentialConstantValuesCallSiteReturned | ||||
9110 | : AACallSiteReturnedFromReturned<AAPotentialConstantValues, | ||||
9111 | AAPotentialConstantValuesImpl> { | ||||
9112 | AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP, | ||||
9113 | Attributor &A) | ||||
9114 | : AACallSiteReturnedFromReturned<AAPotentialConstantValues, | ||||
9115 | AAPotentialConstantValuesImpl>(IRP, A) {} | ||||
9116 | |||||
9117 | /// See AbstractAttribute::trackStatistics() | ||||
9118 | void trackStatistics() const override { | ||||
9119 | STATS_DECLTRACK_CSRET_ATTR(potential_values){ static llvm::Statistic NumIRCSReturn_potential_values = {"attributor" , "NumIRCSReturn_potential_values", ("Number of " "call site returns" " marked '" "potential_values" "'")};; ++(NumIRCSReturn_potential_values ); } | ||||
9120 | } | ||||
9121 | }; | ||||
9122 | |||||
9123 | struct AAPotentialConstantValuesCallSiteArgument | ||||
9124 | : AAPotentialConstantValuesFloating { | ||||
9125 | AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP, | ||||
9126 | Attributor &A) | ||||
9127 | : AAPotentialConstantValuesFloating(IRP, A) {} | ||||
9128 | |||||
9129 | /// See AbstractAttribute::initialize(..). | ||||
9130 | void initialize(Attributor &A) override { | ||||
9131 | AAPotentialConstantValuesImpl::initialize(A); | ||||
9132 | if (isAtFixpoint()) | ||||
9133 | return; | ||||
9134 | |||||
9135 | Value &V = getAssociatedValue(); | ||||
9136 | |||||
9137 | if (auto *C = dyn_cast<ConstantInt>(&V)) { | ||||
9138 | unionAssumed(C->getValue()); | ||||
9139 | indicateOptimisticFixpoint(); | ||||
9140 | return; | ||||
9141 | } | ||||
9142 | |||||
9143 | if (isa<UndefValue>(&V)) { | ||||
9144 | unionAssumedWithUndef(); | ||||
9145 | indicateOptimisticFixpoint(); | ||||
9146 | return; | ||||
9147 | } | ||||
9148 | } | ||||
9149 | |||||
9150 | /// See AbstractAttribute::updateImpl(...). | ||||
9151 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9152 | Value &V = getAssociatedValue(); | ||||
9153 | auto AssumedBefore = getAssumed(); | ||||
9154 | auto &AA = A.getAAFor<AAPotentialConstantValues>( | ||||
9155 | *this, IRPosition::value(V), DepClassTy::REQUIRED); | ||||
9156 | const auto &S = AA.getAssumed(); | ||||
9157 | unionAssumed(S); | ||||
9158 | return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED | ||||
9159 | : ChangeStatus::CHANGED; | ||||
9160 | } | ||||
9161 | |||||
9162 | /// See AbstractAttribute::trackStatistics() | ||||
9163 | void trackStatistics() const override { | ||||
9164 | STATS_DECLTRACK_CSARG_ATTR(potential_values){ static llvm::Statistic NumIRCSArguments_potential_values = { "attributor", "NumIRCSArguments_potential_values", ("Number of " "call site arguments" " marked '" "potential_values" "'")};; ++(NumIRCSArguments_potential_values); } | ||||
9165 | } | ||||
9166 | }; | ||||
9167 | |||||
9168 | /// ------------------------ NoUndef Attribute --------------------------------- | ||||
9169 | struct AANoUndefImpl : AANoUndef { | ||||
9170 | AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} | ||||
9171 | |||||
9172 | /// See AbstractAttribute::initialize(...). | ||||
9173 | void initialize(Attributor &A) override { | ||||
9174 | if (getIRPosition().hasAttr({Attribute::NoUndef})) { | ||||
9175 | indicateOptimisticFixpoint(); | ||||
9176 | return; | ||||
9177 | } | ||||
9178 | Value &V = getAssociatedValue(); | ||||
9179 | if (isa<UndefValue>(V)) | ||||
9180 | indicatePessimisticFixpoint(); | ||||
9181 | else if (isa<FreezeInst>(V)) | ||||
9182 | indicateOptimisticFixpoint(); | ||||
9183 | else if (getPositionKind() != IRPosition::IRP_RETURNED && | ||||
9184 | isGuaranteedNotToBeUndefOrPoison(&V)) | ||||
9185 | indicateOptimisticFixpoint(); | ||||
9186 | else | ||||
9187 | AANoUndef::initialize(A); | ||||
9188 | } | ||||
9189 | |||||
9190 | /// See followUsesInMBEC | ||||
9191 | bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, | ||||
9192 | AANoUndef::StateType &State) { | ||||
9193 | const Value *UseV = U->get(); | ||||
9194 | const DominatorTree *DT = nullptr; | ||||
9195 | AssumptionCache *AC = nullptr; | ||||
9196 | InformationCache &InfoCache = A.getInfoCache(); | ||||
9197 | if (Function *F = getAnchorScope()) { | ||||
9198 | DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); | ||||
9199 | AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); | ||||
9200 | } | ||||
9201 | State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); | ||||
9202 | bool TrackUse = false; | ||||
9203 | // Track use for instructions which must produce undef or poison bits when | ||||
9204 | // at least one operand contains such bits. | ||||
9205 | if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) | ||||
9206 | TrackUse = true; | ||||
9207 | return TrackUse; | ||||
9208 | } | ||||
9209 | |||||
9210 | /// See AbstractAttribute::getAsStr(). | ||||
9211 | const std::string getAsStr() const override { | ||||
9212 | return getAssumed() ? "noundef" : "may-undef-or-poison"; | ||||
9213 | } | ||||
9214 | |||||
9215 | ChangeStatus manifest(Attributor &A) override { | ||||
9216 | // We don't manifest noundef attribute for dead positions because the | ||||
9217 | // associated values with dead positions would be replaced with undef | ||||
9218 | // values. | ||||
9219 | bool UsedAssumedInformation = false; | ||||
9220 | if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, | ||||
9221 | UsedAssumedInformation)) | ||||
9222 | return ChangeStatus::UNCHANGED; | ||||
9223 | // A position whose simplified value does not have any value is | ||||
9224 | // considered to be dead. We don't manifest noundef in such positions for | ||||
9225 | // the same reason above. | ||||
9226 | if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation, | ||||
9227 | AA::Interprocedural) | ||||
9228 | .has_value()) | ||||
9229 | return ChangeStatus::UNCHANGED; | ||||
9230 | return AANoUndef::manifest(A); | ||||
9231 | } | ||||
9232 | }; | ||||
9233 | |||||
9234 | struct AANoUndefFloating : public AANoUndefImpl { | ||||
9235 | AANoUndefFloating(const IRPosition &IRP, Attributor &A) | ||||
9236 | : AANoUndefImpl(IRP, A) {} | ||||
9237 | |||||
9238 | /// See AbstractAttribute::initialize(...). | ||||
9239 | void initialize(Attributor &A) override { | ||||
9240 | AANoUndefImpl::initialize(A); | ||||
9241 | if (!getState().isAtFixpoint()) | ||||
9242 | if (Instruction *CtxI = getCtxI()) | ||||
9243 | followUsesInMBEC(*this, A, getState(), *CtxI); | ||||
9244 | } | ||||
9245 | |||||
9246 | /// See AbstractAttribute::updateImpl(...). | ||||
9247 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9248 | |||||
9249 | SmallVector<AA::ValueAndContext> Values; | ||||
9250 | bool UsedAssumedInformation = false; | ||||
9251 | if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, | ||||
9252 | AA::AnyScope, UsedAssumedInformation)) { | ||||
9253 | Values.push_back({getAssociatedValue(), getCtxI()}); | ||||
9254 | } | ||||
9255 | |||||
9256 | StateType T; | ||||
9257 | auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { | ||||
9258 | const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), | ||||
9259 | DepClassTy::REQUIRED); | ||||
9260 | if (this == &AA) { | ||||
9261 | T.indicatePessimisticFixpoint(); | ||||
9262 | } else { | ||||
9263 | const AANoUndef::StateType &S = | ||||
9264 | static_cast<const AANoUndef::StateType &>(AA.getState()); | ||||
9265 | T ^= S; | ||||
9266 | } | ||||
9267 | return T.isValidState(); | ||||
9268 | }; | ||||
9269 | |||||
9270 | for (const auto &VAC : Values) | ||||
9271 | if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI())) | ||||
9272 | return indicatePessimisticFixpoint(); | ||||
9273 | |||||
9274 | return clampStateAndIndicateChange(getState(), T); | ||||
9275 | } | ||||
9276 | |||||
9277 | /// See AbstractAttribute::trackStatistics() | ||||
9278 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef){ static llvm::Statistic NumIRFunctionReturn_noundef = {"attributor" , "NumIRFunctionReturn_noundef", ("Number of " "function returns" " marked '" "noundef" "'")};; ++(NumIRFunctionReturn_noundef ); } } | ||||
9279 | }; | ||||
9280 | |||||
9281 | struct AANoUndefReturned final | ||||
9282 | : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { | ||||
9283 | AANoUndefReturned(const IRPosition &IRP, Attributor &A) | ||||
9284 | : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} | ||||
9285 | |||||
9286 | /// See AbstractAttribute::trackStatistics() | ||||
9287 | void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef){ static llvm::Statistic NumIRFunctionReturn_noundef = {"attributor" , "NumIRFunctionReturn_noundef", ("Number of " "function returns" " marked '" "noundef" "'")};; ++(NumIRFunctionReturn_noundef ); } } | ||||
9288 | }; | ||||
9289 | |||||
9290 | struct AANoUndefArgument final | ||||
9291 | : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { | ||||
9292 | AANoUndefArgument(const IRPosition &IRP, Attributor &A) | ||||
9293 | : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} | ||||
9294 | |||||
9295 | /// See AbstractAttribute::trackStatistics() | ||||
9296 | void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef){ static llvm::Statistic NumIRArguments_noundef = {"attributor" , "NumIRArguments_noundef", ("Number of " "arguments" " marked '" "noundef" "'")};; ++(NumIRArguments_noundef); } } | ||||
9297 | }; | ||||
9298 | |||||
9299 | struct AANoUndefCallSiteArgument final : AANoUndefFloating { | ||||
9300 | AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
9301 | : AANoUndefFloating(IRP, A) {} | ||||
9302 | |||||
9303 | /// See AbstractAttribute::trackStatistics() | ||||
9304 | void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef){ static llvm::Statistic NumIRCSArguments_noundef = {"attributor" , "NumIRCSArguments_noundef", ("Number of " "call site arguments" " marked '" "noundef" "'")};; ++(NumIRCSArguments_noundef); } } | ||||
9305 | }; | ||||
9306 | |||||
9307 | struct AANoUndefCallSiteReturned final | ||||
9308 | : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { | ||||
9309 | AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
9310 | : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} | ||||
9311 | |||||
9312 | /// See AbstractAttribute::trackStatistics() | ||||
9313 | void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef){ static llvm::Statistic NumIRCSReturn_noundef = {"attributor" , "NumIRCSReturn_noundef", ("Number of " "call site returns" " marked '" "noundef" "'")};; ++(NumIRCSReturn_noundef); } } | ||||
9314 | }; | ||||
9315 | |||||
9316 | struct AACallEdgesImpl : public AACallEdges { | ||||
9317 | AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} | ||||
9318 | |||||
9319 | const SetVector<Function *> &getOptimisticEdges() const override { | ||||
9320 | return CalledFunctions; | ||||
9321 | } | ||||
9322 | |||||
9323 | bool hasUnknownCallee() const override { return HasUnknownCallee; } | ||||
9324 | |||||
9325 | bool hasNonAsmUnknownCallee() const override { | ||||
9326 | return HasUnknownCalleeNonAsm; | ||||
9327 | } | ||||
9328 | |||||
9329 | const std::string getAsStr() const override { | ||||
9330 | return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + | ||||
9331 | std::to_string(CalledFunctions.size()) + "]"; | ||||
9332 | } | ||||
9333 | |||||
9334 | void trackStatistics() const override {} | ||||
9335 | |||||
9336 | protected: | ||||
9337 | void addCalledFunction(Function *Fn, ChangeStatus &Change) { | ||||
9338 | if (CalledFunctions.insert(Fn)) { | ||||
9339 | Change = ChangeStatus::CHANGED; | ||||
9340 | LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AACallEdges] New call edge: " << Fn->getName() << "\n"; } } while (false) | ||||
9341 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AACallEdges] New call edge: " << Fn->getName() << "\n"; } } while (false); | ||||
9342 | } | ||||
9343 | } | ||||
9344 | |||||
9345 | void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { | ||||
9346 | if (!HasUnknownCallee) | ||||
9347 | Change = ChangeStatus::CHANGED; | ||||
9348 | if (NonAsm && !HasUnknownCalleeNonAsm) | ||||
9349 | Change = ChangeStatus::CHANGED; | ||||
9350 | HasUnknownCalleeNonAsm |= NonAsm; | ||||
9351 | HasUnknownCallee = true; | ||||
9352 | } | ||||
9353 | |||||
9354 | private: | ||||
9355 | /// Optimistic set of functions that might be called by this position. | ||||
9356 | SetVector<Function *> CalledFunctions; | ||||
9357 | |||||
9358 | /// Is there any call with a unknown callee. | ||||
9359 | bool HasUnknownCallee = false; | ||||
9360 | |||||
9361 | /// Is there any call with a unknown callee, excluding any inline asm. | ||||
9362 | bool HasUnknownCalleeNonAsm = false; | ||||
9363 | }; | ||||
9364 | |||||
9365 | struct AACallEdgesCallSite : public AACallEdgesImpl { | ||||
9366 | AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) | ||||
9367 | : AACallEdgesImpl(IRP, A) {} | ||||
9368 | /// See AbstractAttribute::updateImpl(...). | ||||
9369 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9370 | ChangeStatus Change = ChangeStatus::UNCHANGED; | ||||
9371 | |||||
9372 | auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool { | ||||
9373 | if (Function *Fn = dyn_cast<Function>(&V)) { | ||||
9374 | addCalledFunction(Fn, Change); | ||||
9375 | } else { | ||||
9376 | LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"; } } while (false); | ||||
9377 | setHasUnknownCallee(true, Change); | ||||
9378 | } | ||||
9379 | |||||
9380 | // Explore all values. | ||||
9381 | return true; | ||||
9382 | }; | ||||
9383 | |||||
9384 | SmallVector<AA::ValueAndContext> Values; | ||||
9385 | // Process any value that we might call. | ||||
9386 | auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) { | ||||
9387 | bool UsedAssumedInformation = false; | ||||
9388 | Values.clear(); | ||||
9389 | if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values, | ||||
9390 | AA::AnyScope, UsedAssumedInformation)) { | ||||
9391 | Values.push_back({*V, CtxI}); | ||||
9392 | } | ||||
9393 | for (auto &VAC : Values) | ||||
9394 | VisitValue(*VAC.getValue(), VAC.getCtxI()); | ||||
9395 | }; | ||||
9396 | |||||
9397 | CallBase *CB = cast<CallBase>(getCtxI()); | ||||
9398 | |||||
9399 | if (CB->isInlineAsm()) { | ||||
9400 | if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") && | ||||
9401 | !hasAssumption(*CB, "ompx_no_call_asm")) | ||||
9402 | setHasUnknownCallee(false, Change); | ||||
9403 | return Change; | ||||
9404 | } | ||||
9405 | |||||
9406 | // Process callee metadata if available. | ||||
9407 | if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { | ||||
9408 | for (const auto &Op : MD->operands()) { | ||||
9409 | Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); | ||||
9410 | if (Callee) | ||||
9411 | addCalledFunction(Callee, Change); | ||||
9412 | } | ||||
9413 | return Change; | ||||
9414 | } | ||||
9415 | |||||
9416 | // The most simple case. | ||||
9417 | ProcessCalledOperand(CB->getCalledOperand(), CB); | ||||
9418 | |||||
9419 | // Process callback functions. | ||||
9420 | SmallVector<const Use *, 4u> CallbackUses; | ||||
9421 | AbstractCallSite::getCallbackUses(*CB, CallbackUses); | ||||
9422 | for (const Use *U : CallbackUses) | ||||
9423 | ProcessCalledOperand(U->get(), CB); | ||||
9424 | |||||
9425 | return Change; | ||||
9426 | } | ||||
9427 | }; | ||||
9428 | |||||
9429 | struct AACallEdgesFunction : public AACallEdgesImpl { | ||||
9430 | AACallEdgesFunction(const IRPosition &IRP, Attributor &A) | ||||
9431 | : AACallEdgesImpl(IRP, A) {} | ||||
9432 | |||||
9433 | /// See AbstractAttribute::updateImpl(...). | ||||
9434 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9435 | ChangeStatus Change = ChangeStatus::UNCHANGED; | ||||
9436 | |||||
9437 | auto ProcessCallInst = [&](Instruction &Inst) { | ||||
9438 | CallBase &CB = cast<CallBase>(Inst); | ||||
9439 | |||||
9440 | auto &CBEdges = A.getAAFor<AACallEdges>( | ||||
9441 | *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); | ||||
9442 | if (CBEdges.hasNonAsmUnknownCallee()) | ||||
9443 | setHasUnknownCallee(true, Change); | ||||
9444 | if (CBEdges.hasUnknownCallee()) | ||||
9445 | setHasUnknownCallee(false, Change); | ||||
9446 | |||||
9447 | for (Function *F : CBEdges.getOptimisticEdges()) | ||||
9448 | addCalledFunction(F, Change); | ||||
9449 | |||||
9450 | return true; | ||||
9451 | }; | ||||
9452 | |||||
9453 | // Visit all callable instructions. | ||||
9454 | bool UsedAssumedInformation = false; | ||||
9455 | if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, | ||||
9456 | UsedAssumedInformation, | ||||
9457 | /* CheckBBLivenessOnly */ true)) { | ||||
9458 | // If we haven't looked at all call like instructions, assume that there | ||||
9459 | // are unknown callees. | ||||
9460 | setHasUnknownCallee(true, Change); | ||||
9461 | } | ||||
9462 | |||||
9463 | return Change; | ||||
9464 | } | ||||
9465 | }; | ||||
9466 | |||||
9467 | struct AAFunctionReachabilityFunction : public AAFunctionReachability { | ||||
9468 | private: | ||||
9469 | struct QuerySet { | ||||
9470 | void markReachable(const Function &Fn) { | ||||
9471 | Reachable.insert(&Fn); | ||||
9472 | Unreachable.erase(&Fn); | ||||
9473 | } | ||||
9474 | |||||
9475 | /// If there is no information about the function None is returned. | ||||
9476 | Optional<bool> isCachedReachable(const Function &Fn) { | ||||
9477 | // Assume that we can reach the function. | ||||
9478 | // TODO: Be more specific with the unknown callee. | ||||
9479 | if (CanReachUnknownCallee) | ||||
9480 | return true; | ||||
9481 | |||||
9482 | if (Reachable.count(&Fn)) | ||||
9483 | return true; | ||||
9484 | |||||
9485 | if (Unreachable.count(&Fn)) | ||||
9486 | return false; | ||||
9487 | |||||
9488 | return llvm::None; | ||||
9489 | } | ||||
9490 | |||||
9491 | /// Set of functions that we know for sure is reachable. | ||||
9492 | DenseSet<const Function *> Reachable; | ||||
9493 | |||||
9494 | /// Set of functions that are unreachable, but might become reachable. | ||||
9495 | DenseSet<const Function *> Unreachable; | ||||
9496 | |||||
9497 | /// If we can reach a function with a call to a unknown function we assume | ||||
9498 | /// that we can reach any function. | ||||
9499 | bool CanReachUnknownCallee = false; | ||||
9500 | }; | ||||
9501 | |||||
9502 | struct QueryResolver : public QuerySet { | ||||
9503 | ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, | ||||
9504 | ArrayRef<const AACallEdges *> AAEdgesList) { | ||||
9505 | ChangeStatus Change = ChangeStatus::UNCHANGED; | ||||
9506 | |||||
9507 | for (const auto *AAEdges : AAEdgesList) { | ||||
9508 | if (AAEdges->hasUnknownCallee()) { | ||||
9509 | if (!CanReachUnknownCallee) { | ||||
9510 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[QueryResolver] Edges include unknown callee!\n" ; } } while (false) | ||||
9511 | << "[QueryResolver] Edges include unknown callee!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[QueryResolver] Edges include unknown callee!\n" ; } } while (false); | ||||
9512 | Change = ChangeStatus::CHANGED; | ||||
9513 | } | ||||
9514 | CanReachUnknownCallee = true; | ||||
9515 | return Change; | ||||
9516 | } | ||||
9517 | } | ||||
9518 | |||||
9519 | for (const Function *Fn : make_early_inc_range(Unreachable)) { | ||||
9520 | if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { | ||||
9521 | Change = ChangeStatus::CHANGED; | ||||
9522 | markReachable(*Fn); | ||||
9523 | } | ||||
9524 | } | ||||
9525 | return Change; | ||||
9526 | } | ||||
9527 | |||||
9528 | bool isReachable(Attributor &A, AAFunctionReachability &AA, | ||||
9529 | ArrayRef<const AACallEdges *> AAEdgesList, | ||||
9530 | const Function &Fn) { | ||||
9531 | Optional<bool> Cached = isCachedReachable(Fn); | ||||
9532 | if (Cached) | ||||
9533 | return Cached.value(); | ||||
9534 | |||||
9535 | // The query was not cached, thus it is new. We need to request an update | ||||
9536 | // explicitly to make sure this the information is properly run to a | ||||
9537 | // fixpoint. | ||||
9538 | A.registerForUpdate(AA); | ||||
9539 | |||||
9540 | // We need to assume that this function can't reach Fn to prevent | ||||
9541 | // an infinite loop if this function is recursive. | ||||
9542 | Unreachable.insert(&Fn); | ||||
9543 | |||||
9544 | bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); | ||||
9545 | if (Result) | ||||
9546 | markReachable(Fn); | ||||
9547 | return Result; | ||||
9548 | } | ||||
9549 | |||||
9550 | bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, | ||||
9551 | ArrayRef<const AACallEdges *> AAEdgesList, | ||||
9552 | const Function &Fn) const { | ||||
9553 | |||||
9554 | // Handle the most trivial case first. | ||||
9555 | for (const auto *AAEdges : AAEdgesList) { | ||||
9556 | const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); | ||||
9557 | |||||
9558 | if (Edges.count(const_cast<Function *>(&Fn))) | ||||
9559 | return true; | ||||
9560 | } | ||||
9561 | |||||
9562 | SmallVector<const AAFunctionReachability *, 8> Deps; | ||||
9563 | for (const auto &AAEdges : AAEdgesList) { | ||||
9564 | const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); | ||||
9565 | |||||
9566 | for (Function *Edge : Edges) { | ||||
9567 | // Functions that do not call back into the module can be ignored. | ||||
9568 | if (Edge->hasFnAttribute(Attribute::NoCallback)) | ||||
9569 | continue; | ||||
9570 | |||||
9571 | // We don't need a dependency if the result is reachable. | ||||
9572 | const AAFunctionReachability &EdgeReachability = | ||||
9573 | A.getAAFor<AAFunctionReachability>( | ||||
9574 | AA, IRPosition::function(*Edge), DepClassTy::NONE); | ||||
9575 | Deps.push_back(&EdgeReachability); | ||||
9576 | |||||
9577 | if (EdgeReachability.canReach(A, Fn)) | ||||
9578 | return true; | ||||
9579 | } | ||||
9580 | } | ||||
9581 | |||||
9582 | // The result is false for now, set dependencies and leave. | ||||
9583 | for (const auto *Dep : Deps) | ||||
9584 | A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); | ||||
9585 | |||||
9586 | return false; | ||||
9587 | } | ||||
9588 | }; | ||||
9589 | |||||
9590 | /// Get call edges that can be reached by this instruction. | ||||
9591 | bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, | ||||
9592 | const Instruction &Inst, | ||||
9593 | SmallVector<const AACallEdges *> &Result) const { | ||||
9594 | // Determine call like instructions that we can reach from the inst. | ||||
9595 | auto CheckCallBase = [&](Instruction &CBInst) { | ||||
9596 | if (!Reachability.isAssumedReachable(A, Inst, CBInst)) | ||||
9597 | return true; | ||||
9598 | |||||
9599 | auto &CB = cast<CallBase>(CBInst); | ||||
9600 | const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( | ||||
9601 | *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); | ||||
9602 | |||||
9603 | Result.push_back(&AAEdges); | ||||
9604 | return true; | ||||
9605 | }; | ||||
9606 | |||||
9607 | bool UsedAssumedInformation = false; | ||||
9608 | return A.checkForAllCallLikeInstructions(CheckCallBase, *this, | ||||
9609 | UsedAssumedInformation, | ||||
9610 | /* CheckBBLivenessOnly */ true); | ||||
9611 | } | ||||
9612 | |||||
9613 | public: | ||||
9614 | AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) | ||||
9615 | : AAFunctionReachability(IRP, A) {} | ||||
9616 | |||||
9617 | bool canReach(Attributor &A, const Function &Fn) const override { | ||||
9618 | if (!isValidState()) | ||||
9619 | return true; | ||||
9620 | |||||
9621 | const AACallEdges &AAEdges = | ||||
9622 | A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); | ||||
9623 | |||||
9624 | // Attributor returns attributes as const, so this function has to be | ||||
9625 | // const for users of this attribute to use it without having to do | ||||
9626 | // a const_cast. | ||||
9627 | // This is a hack for us to be able to cache queries. | ||||
9628 | auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); | ||||
9629 | bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, | ||||
9630 | {&AAEdges}, Fn); | ||||
9631 | |||||
9632 | return Result; | ||||
9633 | } | ||||
9634 | |||||
9635 | /// Can \p CB reach \p Fn | ||||
9636 | bool canReach(Attributor &A, CallBase &CB, | ||||
9637 | const Function &Fn) const override { | ||||
9638 | if (!isValidState()) | ||||
9639 | return true; | ||||
9640 | |||||
9641 | const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( | ||||
9642 | *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); | ||||
9643 | |||||
9644 | // Attributor returns attributes as const, so this function has to be | ||||
9645 | // const for users of this attribute to use it without having to do | ||||
9646 | // a const_cast. | ||||
9647 | // This is a hack for us to be able to cache queries. | ||||
9648 | auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); | ||||
9649 | QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; | ||||
9650 | |||||
9651 | bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); | ||||
9652 | |||||
9653 | return Result; | ||||
9654 | } | ||||
9655 | |||||
9656 | bool instructionCanReach(Attributor &A, const Instruction &Inst, | ||||
9657 | const Function &Fn) const override { | ||||
9658 | if (!isValidState()) | ||||
9659 | return true; | ||||
9660 | |||||
9661 | const auto &Reachability = A.getAAFor<AAReachability>( | ||||
9662 | *this, IRPosition::function(*getAssociatedFunction()), | ||||
9663 | DepClassTy::REQUIRED); | ||||
9664 | |||||
9665 | SmallVector<const AACallEdges *> CallEdges; | ||||
9666 | bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); | ||||
9667 | // Attributor returns attributes as const, so this function has to be | ||||
9668 | // const for users of this attribute to use it without having to do | ||||
9669 | // a const_cast. | ||||
9670 | // This is a hack for us to be able to cache queries. | ||||
9671 | auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); | ||||
9672 | QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; | ||||
9673 | if (!AllKnown) { | ||||
9674 | LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges known, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReachability] Not all reachable edges known, " "may reach unknown callee!\n"; } } while (false) | ||||
9675 | "may reach unknown callee!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReachability] Not all reachable edges known, " "may reach unknown callee!\n"; } } while (false); | ||||
9676 | InstQSet.CanReachUnknownCallee = true; | ||||
9677 | } | ||||
9678 | |||||
9679 | return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); | ||||
9680 | } | ||||
9681 | |||||
9682 | /// See AbstractAttribute::updateImpl(...). | ||||
9683 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9684 | const AACallEdges &AAEdges = | ||||
9685 | A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); | ||||
9686 | ChangeStatus Change = ChangeStatus::UNCHANGED; | ||||
9687 | |||||
9688 | Change |= WholeFunction.update(A, *this, {&AAEdges}); | ||||
9689 | |||||
9690 | for (auto &CBPair : CBQueries) { | ||||
9691 | const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( | ||||
9692 | *this, IRPosition::callsite_function(*CBPair.first), | ||||
9693 | DepClassTy::REQUIRED); | ||||
9694 | |||||
9695 | Change |= CBPair.second.update(A, *this, {&AAEdges}); | ||||
9696 | } | ||||
9697 | |||||
9698 | // Update the Instruction queries. | ||||
9699 | if (!InstQueries.empty()) { | ||||
9700 | const AAReachability *Reachability = &A.getAAFor<AAReachability>( | ||||
9701 | *this, IRPosition::function(*getAssociatedFunction()), | ||||
9702 | DepClassTy::REQUIRED); | ||||
9703 | |||||
9704 | // Check for local callbases first. | ||||
9705 | for (auto &InstPair : InstQueries) { | ||||
9706 | SmallVector<const AACallEdges *> CallEdges; | ||||
9707 | bool AllKnown = | ||||
9708 | getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); | ||||
9709 | // Update will return change if we this effects any queries. | ||||
9710 | if (!AllKnown) { | ||||
9711 | LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReachability] Not all reachable edges " "known, may reach unknown callee!\n"; } } while (false) | ||||
9712 | "known, may reach unknown callee!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAReachability] Not all reachable edges " "known, may reach unknown callee!\n"; } } while (false); | ||||
9713 | InstPair.second.CanReachUnknownCallee = true; | ||||
9714 | } | ||||
9715 | Change |= InstPair.second.update(A, *this, CallEdges); | ||||
9716 | } | ||||
9717 | } | ||||
9718 | |||||
9719 | return Change; | ||||
9720 | } | ||||
9721 | |||||
9722 | const std::string getAsStr() const override { | ||||
9723 | size_t QueryCount = | ||||
9724 | WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); | ||||
9725 | |||||
9726 | return "FunctionReachability [" + | ||||
9727 | (canReachUnknownCallee() | ||||
9728 | ? "unknown" | ||||
9729 | : (std::to_string(WholeFunction.Reachable.size()) + "," + | ||||
9730 | std::to_string(QueryCount))) + | ||||
9731 | "]"; | ||||
9732 | } | ||||
9733 | |||||
9734 | void trackStatistics() const override {} | ||||
9735 | |||||
9736 | private: | ||||
9737 | bool canReachUnknownCallee() const override { | ||||
9738 | return WholeFunction.CanReachUnknownCallee; | ||||
9739 | } | ||||
9740 | |||||
9741 | /// Used to answer if a the whole function can reacha a specific function. | ||||
9742 | QueryResolver WholeFunction; | ||||
9743 | |||||
9744 | /// Used to answer if a call base inside this function can reach a specific | ||||
9745 | /// function. | ||||
9746 | MapVector<const CallBase *, QueryResolver> CBQueries; | ||||
9747 | |||||
9748 | /// This is for instruction queries than scan "forward". | ||||
9749 | MapVector<const Instruction *, QueryResolver> InstQueries; | ||||
9750 | }; | ||||
9751 | } // namespace | ||||
9752 | |||||
9753 | template <typename AAType> | ||||
9754 | static Optional<Constant *> | ||||
9755 | askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA, | ||||
9756 | const IRPosition &IRP, Type &Ty) { | ||||
9757 | if (!Ty.isIntegerTy()) | ||||
9758 | return nullptr; | ||||
9759 | |||||
9760 | // This will also pass the call base context. | ||||
9761 | const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE); | ||||
9762 | |||||
9763 | Optional<Constant *> COpt = AA.getAssumedConstant(A); | ||||
9764 | |||||
9765 | if (!COpt.has_value()) { | ||||
9766 | A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL); | ||||
9767 | return llvm::None; | ||||
9768 | } | ||||
9769 | if (auto *C = COpt.value()) { | ||||
9770 | A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL); | ||||
9771 | return C; | ||||
9772 | } | ||||
9773 | return nullptr; | ||||
9774 | } | ||||
9775 | |||||
9776 | Value *AAPotentialValues::getSingleValue( | ||||
9777 | Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, | ||||
9778 | SmallVectorImpl<AA::ValueAndContext> &Values) { | ||||
9779 | Type &Ty = *IRP.getAssociatedType(); | ||||
9780 | Optional<Value *> V; | ||||
9781 | for (auto &It : Values) { | ||||
9782 | V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty); | ||||
9783 | if (V.has_value() && !V.value()) | ||||
9784 | break; | ||||
9785 | } | ||||
9786 | if (!V.has_value()) | ||||
9787 | return UndefValue::get(&Ty); | ||||
9788 | return V.value(); | ||||
9789 | } | ||||
9790 | |||||
9791 | namespace { | ||||
9792 | struct AAPotentialValuesImpl : AAPotentialValues { | ||||
9793 | using StateType = PotentialLLVMValuesState; | ||||
9794 | |||||
9795 | AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) | ||||
9796 | : AAPotentialValues(IRP, A) {} | ||||
9797 | |||||
9798 | /// See AbstractAttribute::initialize(..). | ||||
9799 | void initialize(Attributor &A) override { | ||||
9800 | if (A.hasSimplificationCallback(getIRPosition())) { | ||||
9801 | indicatePessimisticFixpoint(); | ||||
9802 | return; | ||||
9803 | } | ||||
9804 | Value *Stripped = getAssociatedValue().stripPointerCasts(); | ||||
9805 | if (isa<Constant>(Stripped)) { | ||||
9806 | addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope, | ||||
9807 | getAnchorScope()); | ||||
9808 | indicateOptimisticFixpoint(); | ||||
9809 | return; | ||||
9810 | } | ||||
9811 | AAPotentialValues::initialize(A); | ||||
9812 | } | ||||
9813 | |||||
9814 | /// See AbstractAttribute::getAsStr(). | ||||
9815 | const std::string getAsStr() const override { | ||||
9816 | std::string Str; | ||||
9817 | llvm::raw_string_ostream OS(Str); | ||||
9818 | OS << getState(); | ||||
9819 | return OS.str(); | ||||
9820 | } | ||||
9821 | |||||
9822 | template <typename AAType> | ||||
9823 | static Optional<Value *> askOtherAA(Attributor &A, | ||||
9824 | const AbstractAttribute &AA, | ||||
9825 | const IRPosition &IRP, Type &Ty) { | ||||
9826 | if (isa<Constant>(IRP.getAssociatedValue())) | ||||
9827 | return &IRP.getAssociatedValue(); | ||||
9828 | Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty); | ||||
9829 | if (!C) | ||||
9830 | return llvm::None; | ||||
9831 | if (C.value()) | ||||
9832 | if (auto *CC = AA::getWithType(**C, Ty)) | ||||
9833 | return CC; | ||||
9834 | return nullptr; | ||||
9835 | } | ||||
9836 | |||||
9837 | void addValue(Attributor &A, StateType &State, Value &V, | ||||
9838 | const Instruction *CtxI, AA::ValueScope S, | ||||
9839 | Function *AnchorScope) const { | ||||
9840 | |||||
9841 | IRPosition ValIRP = IRPosition::value(V); | ||||
9842 | if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) { | ||||
9843 | for (const auto &U : CB->args()) { | ||||
9844 | if (U.get() != &V) | ||||
9845 | continue; | ||||
9846 | ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); | ||||
9847 | break; | ||||
9848 | } | ||||
9849 | } | ||||
9850 | |||||
9851 | Value *VPtr = &V; | ||||
9852 | if (ValIRP.getAssociatedType()->isIntegerTy()) { | ||||
9853 | Type &Ty = *getAssociatedType(); | ||||
9854 | Optional<Value *> SimpleV = | ||||
9855 | askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty); | ||||
9856 | if (SimpleV.has_value() && !SimpleV.value()) { | ||||
9857 | auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>( | ||||
9858 | *this, ValIRP, DepClassTy::OPTIONAL); | ||||
9859 | if (PotentialConstantsAA.isValidState()) { | ||||
9860 | for (const auto &It : PotentialConstantsAA.getAssumedSet()) | ||||
9861 | State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S}); | ||||
9862 | if (PotentialConstantsAA.undefIsContained()) | ||||
9863 | State.unionAssumed({{*UndefValue::get(&Ty), nullptr}, S}); | ||||
9864 | return; | ||||
9865 | } | ||||
9866 | } | ||||
9867 | if (!SimpleV.has_value()) | ||||
9868 | return; | ||||
9869 | |||||
9870 | if (SimpleV.value()) | ||||
9871 | VPtr = SimpleV.value(); | ||||
9872 | } | ||||
9873 | |||||
9874 | if (isa<ConstantInt>(VPtr)) | ||||
9875 | CtxI = nullptr; | ||||
9876 | if (!AA::isValidInScope(*VPtr, AnchorScope)) | ||||
9877 | S = AA::ValueScope(S | AA::Interprocedural); | ||||
9878 | |||||
9879 | State.unionAssumed({{*VPtr, CtxI}, S}); | ||||
9880 | } | ||||
9881 | |||||
9882 | /// Helper struct to tie a value+context pair together with the scope for | ||||
9883 | /// which this is the simplified version. | ||||
9884 | struct ItemInfo { | ||||
9885 | AA::ValueAndContext I; | ||||
9886 | AA::ValueScope S; | ||||
9887 | |||||
9888 | bool operator==(const ItemInfo &II) const { | ||||
9889 | return II.I == I && II.S == S; | ||||
9890 | }; | ||||
9891 | bool operator<(const ItemInfo &II) const { | ||||
9892 | if (I == II.I) | ||||
9893 | return S < II.S; | ||||
9894 | return I < II.I; | ||||
9895 | }; | ||||
9896 | }; | ||||
9897 | |||||
9898 | bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) { | ||||
9899 | SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap; | ||||
9900 | for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) { | ||||
9901 | if (!(CS & S)) | ||||
9902 | continue; | ||||
9903 | |||||
9904 | bool UsedAssumedInformation = false; | ||||
9905 | SmallVector<AA::ValueAndContext> Values; | ||||
9906 | if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS, | ||||
9907 | UsedAssumedInformation)) | ||||
9908 | return false; | ||||
9909 | |||||
9910 | for (auto &It : Values) | ||||
9911 | ValueScopeMap[It] += CS; | ||||
9912 | } | ||||
9913 | for (auto &It : ValueScopeMap) | ||||
9914 | addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(), | ||||
9915 | AA::ValueScope(It.second), getAnchorScope()); | ||||
9916 | |||||
9917 | return true; | ||||
9918 | } | ||||
9919 | |||||
9920 | void giveUpOnIntraprocedural(Attributor &A) { | ||||
9921 | auto NewS = StateType::getBestState(getState()); | ||||
9922 | for (const auto &It : getAssumedSet()) { | ||||
9923 | if (It.second == AA::Intraprocedural) | ||||
9924 | continue; | ||||
9925 | addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(), | ||||
9926 | AA::Interprocedural, getAnchorScope()); | ||||
9927 | } | ||||
9928 | assert(!undefIsContained() && "Undef should be an explicit value!")(static_cast <bool> (!undefIsContained() && "Undef should be an explicit value!" ) ? void (0) : __assert_fail ("!undefIsContained() && \"Undef should be an explicit value!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 9928, __extension__ __PRETTY_FUNCTION__)); | ||||
9929 | addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural, | ||||
9930 | getAnchorScope()); | ||||
9931 | getState() = NewS; | ||||
9932 | } | ||||
9933 | |||||
9934 | /// See AbstractState::indicatePessimisticFixpoint(...). | ||||
9935 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
9936 | getState() = StateType::getBestState(getState()); | ||||
9937 | getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope}); | ||||
9938 | AAPotentialValues::indicateOptimisticFixpoint(); | ||||
9939 | return ChangeStatus::CHANGED; | ||||
9940 | } | ||||
9941 | |||||
9942 | /// See AbstractAttribute::updateImpl(...). | ||||
9943 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9944 | return indicatePessimisticFixpoint(); | ||||
9945 | } | ||||
9946 | |||||
9947 | /// See AbstractAttribute::manifest(...). | ||||
9948 | ChangeStatus manifest(Attributor &A) override { | ||||
9949 | SmallVector<AA::ValueAndContext> Values; | ||||
9950 | for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) { | ||||
9951 | Values.clear(); | ||||
9952 | if (!getAssumedSimplifiedValues(A, Values, S)) | ||||
9953 | continue; | ||||
9954 | Value &OldV = getAssociatedValue(); | ||||
9955 | if (isa<UndefValue>(OldV)) | ||||
9956 | continue; | ||||
9957 | Value *NewV = getSingleValue(A, *this, getIRPosition(), Values); | ||||
9958 | if (!NewV || NewV == &OldV) | ||||
9959 | continue; | ||||
9960 | if (getCtxI() && | ||||
9961 | !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache())) | ||||
9962 | continue; | ||||
9963 | if (A.changeAfterManifest(getIRPosition(), *NewV)) | ||||
9964 | return ChangeStatus::CHANGED; | ||||
9965 | } | ||||
9966 | return ChangeStatus::UNCHANGED; | ||||
9967 | } | ||||
9968 | |||||
9969 | bool getAssumedSimplifiedValues(Attributor &A, | ||||
9970 | SmallVectorImpl<AA::ValueAndContext> &Values, | ||||
9971 | AA::ValueScope S) const override { | ||||
9972 | if (!isValidState()) | ||||
9973 | return false; | ||||
9974 | for (const auto &It : getAssumedSet()) | ||||
9975 | if (It.second & S) | ||||
9976 | Values.push_back(It.first); | ||||
9977 | assert(!undefIsContained() && "Undef should be an explicit value!")(static_cast <bool> (!undefIsContained() && "Undef should be an explicit value!" ) ? void (0) : __assert_fail ("!undefIsContained() && \"Undef should be an explicit value!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 9977, __extension__ __PRETTY_FUNCTION__)); | ||||
9978 | return true; | ||||
9979 | } | ||||
9980 | }; | ||||
9981 | |||||
9982 | struct AAPotentialValuesFloating : AAPotentialValuesImpl { | ||||
9983 | AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) | ||||
9984 | : AAPotentialValuesImpl(IRP, A) {} | ||||
9985 | |||||
9986 | /// See AbstractAttribute::updateImpl(...). | ||||
9987 | ChangeStatus updateImpl(Attributor &A) override { | ||||
9988 | auto AssumedBefore = getAssumed(); | ||||
9989 | |||||
9990 | genericValueTraversal(A); | ||||
9991 | |||||
9992 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED | ||||
9993 | : ChangeStatus::CHANGED; | ||||
9994 | } | ||||
9995 | |||||
9996 | /// Helper struct to remember which AAIsDead instances we actually used. | ||||
9997 | struct LivenessInfo { | ||||
9998 | const AAIsDead *LivenessAA = nullptr; | ||||
9999 | bool AnyDead = false; | ||||
10000 | }; | ||||
10001 | |||||
10002 | /// Check if \p Cmp is a comparison we can simplify. | ||||
10003 | /// | ||||
10004 | /// We handle multiple cases, one in which at least one operand is an | ||||
10005 | /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other | ||||
10006 | /// operand. Return true if successful, in that case Worklist will be updated. | ||||
10007 | bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II, | ||||
10008 | SmallVectorImpl<ItemInfo> &Worklist) { | ||||
10009 | Value *LHS = Cmp.getOperand(0); | ||||
10010 | Value *RHS = Cmp.getOperand(1); | ||||
10011 | |||||
10012 | // Simplify the operands first. | ||||
10013 | bool UsedAssumedInformation = false; | ||||
10014 | const auto &SimplifiedLHS = A.getAssumedSimplified( | ||||
10015 | IRPosition::value(*LHS, getCallBaseContext()), *this, | ||||
10016 | UsedAssumedInformation, AA::Intraprocedural); | ||||
10017 | if (!SimplifiedLHS.has_value()) | ||||
10018 | return true; | ||||
10019 | if (!SimplifiedLHS.value()) | ||||
10020 | return false; | ||||
10021 | LHS = *SimplifiedLHS; | ||||
10022 | |||||
10023 | const auto &SimplifiedRHS = A.getAssumedSimplified( | ||||
10024 | IRPosition::value(*RHS, getCallBaseContext()), *this, | ||||
10025 | UsedAssumedInformation, AA::Intraprocedural); | ||||
10026 | if (!SimplifiedRHS.has_value()) | ||||
10027 | return true; | ||||
10028 | if (!SimplifiedRHS.value()) | ||||
10029 | return false; | ||||
10030 | RHS = *SimplifiedRHS; | ||||
10031 | |||||
10032 | LLVMContext &Ctx = Cmp.getContext(); | ||||
10033 | // Handle the trivial case first in which we don't even need to think about | ||||
10034 | // null or non-null. | ||||
10035 | if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { | ||||
10036 | Constant *NewV = | ||||
10037 | ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); | ||||
10038 | addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, | ||||
10039 | getAnchorScope()); | ||||
10040 | return true; | ||||
10041 | } | ||||
10042 | |||||
10043 | // From now on we only handle equalities (==, !=). | ||||
10044 | ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); | ||||
10045 | if (!ICmp || !ICmp->isEquality()) | ||||
10046 | return false; | ||||
10047 | |||||
10048 | bool LHSIsNull = isa<ConstantPointerNull>(LHS); | ||||
10049 | bool RHSIsNull = isa<ConstantPointerNull>(RHS); | ||||
10050 | if (!LHSIsNull && !RHSIsNull) | ||||
10051 | return false; | ||||
10052 | |||||
10053 | // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the | ||||
10054 | // non-nullptr operand and if we assume it's non-null we can conclude the | ||||
10055 | // result of the comparison. | ||||
10056 | assert((LHSIsNull || RHSIsNull) &&(static_cast <bool> ((LHSIsNull || RHSIsNull) && "Expected nullptr versus non-nullptr comparison at this point" ) ? void (0) : __assert_fail ("(LHSIsNull || RHSIsNull) && \"Expected nullptr versus non-nullptr comparison at this point\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 10057, __extension__ __PRETTY_FUNCTION__)) | ||||
10057 | "Expected nullptr versus non-nullptr comparison at this point")(static_cast <bool> ((LHSIsNull || RHSIsNull) && "Expected nullptr versus non-nullptr comparison at this point" ) ? void (0) : __assert_fail ("(LHSIsNull || RHSIsNull) && \"Expected nullptr versus non-nullptr comparison at this point\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 10057, __extension__ __PRETTY_FUNCTION__)); | ||||
10058 | |||||
10059 | // The index is the operand that we assume is not null. | ||||
10060 | unsigned PtrIdx = LHSIsNull; | ||||
10061 | auto &PtrNonNullAA = A.getAAFor<AANonNull>( | ||||
10062 | *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), | ||||
10063 | DepClassTy::REQUIRED); | ||||
10064 | if (!PtrNonNullAA.isAssumedNonNull()) | ||||
10065 | return false; | ||||
10066 | |||||
10067 | // The new value depends on the predicate, true for != and false for ==. | ||||
10068 | Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx), | ||||
10069 | ICmp->getPredicate() == CmpInst::ICMP_NE); | ||||
10070 | addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope()); | ||||
10071 | return true; | ||||
10072 | } | ||||
10073 | |||||
10074 | bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II, | ||||
10075 | SmallVectorImpl<ItemInfo> &Worklist) { | ||||
10076 | const Instruction *CtxI = II.I.getCtxI(); | ||||
10077 | bool UsedAssumedInformation = false; | ||||
10078 | |||||
10079 | Optional<Constant *> C = | ||||
10080 | A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation); | ||||
10081 | bool NoValueYet = !C.has_value(); | ||||
10082 | if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) | ||||
10083 | return true; | ||||
10084 | if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { | ||||
10085 | if (CI->isZero()) | ||||
10086 | Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S}); | ||||
10087 | else | ||||
10088 | Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S}); | ||||
10089 | } else { | ||||
10090 | // We could not simplify the condition, assume both values. | ||||
10091 | Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S}); | ||||
10092 | Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S}); | ||||
10093 | } | ||||
10094 | return true; | ||||
10095 | } | ||||
10096 | |||||
10097 | bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II, | ||||
10098 | SmallVectorImpl<ItemInfo> &Worklist) { | ||||
10099 | SmallSetVector<Value *, 4> PotentialCopies; | ||||
10100 | SmallSetVector<Instruction *, 4> PotentialValueOrigins; | ||||
10101 | bool UsedAssumedInformation = false; | ||||
10102 | if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies, | ||||
10103 | PotentialValueOrigins, *this, | ||||
10104 | UsedAssumedInformation, | ||||
10105 | /* OnlyExact */ true)) { | ||||
10106 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Failed to get potentially " "loaded values for load instruction " << LI << "\n" ; } } while (false) | ||||
10107 | "loaded values for load instruction "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Failed to get potentially " "loaded values for load instruction " << LI << "\n" ; } } while (false) | ||||
10108 | << LI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Failed to get potentially " "loaded values for load instruction " << LI << "\n" ; } } while (false); | ||||
10109 | return false; | ||||
10110 | } | ||||
10111 | |||||
10112 | // Do not simplify loads that are only used in llvm.assume if we cannot also | ||||
10113 | // remove all stores that may feed into the load. The reason is that the | ||||
10114 | // assume is probably worth something as long as the stores are around. | ||||
10115 | InformationCache &InfoCache = A.getInfoCache(); | ||||
10116 | if (InfoCache.isOnlyUsedByAssume(LI)) { | ||||
10117 | if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) { | ||||
10118 | if (!I) | ||||
10119 | return true; | ||||
10120 | if (auto *SI = dyn_cast<StoreInst>(I)) | ||||
10121 | return A.isAssumedDead(SI->getOperandUse(0), this, | ||||
10122 | /* LivenessAA */ nullptr, | ||||
10123 | UsedAssumedInformation, | ||||
10124 | /* CheckBBLivenessOnly */ false); | ||||
10125 | return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr, | ||||
10126 | UsedAssumedInformation, | ||||
10127 | /* CheckBBLivenessOnly */ false); | ||||
10128 | })) { | ||||
10129 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Load is onl used by assumes " "and we cannot delete all the stores: " << LI << "\n"; } } while (false) | ||||
10130 | "and we cannot delete all the stores: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Load is onl used by assumes " "and we cannot delete all the stores: " << LI << "\n"; } } while (false) | ||||
10131 | << LI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Load is onl used by assumes " "and we cannot delete all the stores: " << LI << "\n"; } } while (false); | ||||
10132 | return false; | ||||
10133 | } | ||||
10134 | } | ||||
10135 | |||||
10136 | // Values have to be dynamically unique or we loose the fact that a | ||||
10137 | // single llvm::Value might represent two runtime values (e.g., | ||||
10138 | // stack locations in different recursive calls). | ||||
10139 | const Instruction *CtxI = II.I.getCtxI(); | ||||
10140 | bool ScopeIsLocal = (II.S & AA::Intraprocedural); | ||||
10141 | bool AllLocal = ScopeIsLocal; | ||||
10142 | bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) { | ||||
10143 | AllLocal &= AA::isValidInScope(*PC, getAnchorScope()); | ||||
10144 | return AA::isDynamicallyUnique(A, *this, *PC); | ||||
10145 | }); | ||||
10146 | if (!DynamicallyUnique) { | ||||
10147 | LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Not all potentially loaded " "values are dynamically unique: " << LI << "\n"; } } while (false) | ||||
10148 | "values are dynamically unique: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Not all potentially loaded " "values are dynamically unique: " << LI << "\n"; } } while (false) | ||||
10149 | << LI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "[AAPotentialValues] Not all potentially loaded " "values are dynamically unique: " << LI << "\n"; } } while (false); | ||||
10150 | return false; | ||||
10151 | } | ||||
10152 | |||||
10153 | for (auto *PotentialCopy : PotentialCopies) { | ||||
10154 | if (AllLocal) { | ||||
10155 | Worklist.push_back({{*PotentialCopy, CtxI}, II.S}); | ||||
10156 | } else { | ||||
10157 | Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural}); | ||||
10158 | } | ||||
10159 | } | ||||
10160 | if (!AllLocal && ScopeIsLocal) | ||||
10161 | addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope()); | ||||
10162 | return true; | ||||
10163 | } | ||||
10164 | |||||
10165 | bool handlePHINode( | ||||
10166 | Attributor &A, PHINode &PHI, ItemInfo II, | ||||
10167 | SmallVectorImpl<ItemInfo> &Worklist, | ||||
10168 | SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { | ||||
10169 | auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { | ||||
10170 | LivenessInfo &LI = LivenessAAs[&F]; | ||||
10171 | if (!LI.LivenessAA) | ||||
10172 | LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F), | ||||
10173 | DepClassTy::NONE); | ||||
10174 | return LI; | ||||
10175 | }; | ||||
10176 | |||||
10177 | LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction()); | ||||
10178 | for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) { | ||||
10179 | BasicBlock *IncomingBB = PHI.getIncomingBlock(u); | ||||
10180 | if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) { | ||||
10181 | LI.AnyDead = true; | ||||
10182 | continue; | ||||
10183 | } | ||||
10184 | Worklist.push_back( | ||||
10185 | {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S}); | ||||
10186 | } | ||||
10187 | return true; | ||||
10188 | } | ||||
10189 | |||||
10190 | /// Use the generic, non-optimistic InstSimplfy functionality if we managed to | ||||
10191 | /// simplify any operand of the instruction \p I. Return true if successful, | ||||
10192 | /// in that case Worklist will be updated. | ||||
10193 | bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II, | ||||
10194 | SmallVectorImpl<ItemInfo> &Worklist) { | ||||
10195 | bool SomeSimplified = false; | ||||
10196 | bool UsedAssumedInformation = false; | ||||
10197 | |||||
10198 | SmallVector<Value *, 8> NewOps(I.getNumOperands()); | ||||
10199 | int Idx = 0; | ||||
10200 | for (Value *Op : I.operands()) { | ||||
10201 | const auto &SimplifiedOp = A.getAssumedSimplified( | ||||
10202 | IRPosition::value(*Op, getCallBaseContext()), *this, | ||||
10203 | UsedAssumedInformation, AA::Intraprocedural); | ||||
10204 | // If we are not sure about any operand we are not sure about the entire | ||||
10205 | // instruction, we'll wait. | ||||
10206 | if (!SimplifiedOp.has_value()) | ||||
10207 | return true; | ||||
10208 | |||||
10209 | if (SimplifiedOp.value()) | ||||
10210 | NewOps[Idx] = SimplifiedOp.value(); | ||||
10211 | else | ||||
10212 | NewOps[Idx] = Op; | ||||
10213 | |||||
10214 | SomeSimplified |= (NewOps[Idx] != Op); | ||||
10215 | ++Idx; | ||||
10216 | } | ||||
10217 | |||||
10218 | // We won't bother with the InstSimplify interface if we didn't simplify any | ||||
10219 | // operand ourselves. | ||||
10220 | if (!SomeSimplified) | ||||
10221 | return false; | ||||
10222 | |||||
10223 | InformationCache &InfoCache = A.getInfoCache(); | ||||
10224 | Function *F = I.getFunction(); | ||||
10225 | const auto *DT = | ||||
10226 | InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); | ||||
10227 | const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); | ||||
10228 | auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); | ||||
10229 | OptimizationRemarkEmitter *ORE = nullptr; | ||||
10230 | |||||
10231 | const DataLayout &DL = I.getModule()->getDataLayout(); | ||||
10232 | SimplifyQuery Q(DL, TLI, DT, AC, &I); | ||||
10233 | Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE); | ||||
10234 | if (!NewV || NewV == &I) | ||||
10235 | return false; | ||||
10236 | |||||
10237 | LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Generic inst " << I << " assumed simplified to " << *NewV << "\n"; } } while (false) | ||||
10238 | << *NewV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Generic inst " << I << " assumed simplified to " << *NewV << "\n"; } } while (false); | ||||
10239 | Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S}); | ||||
10240 | return true; | ||||
10241 | } | ||||
10242 | |||||
10243 | bool simplifyInstruction( | ||||
10244 | Attributor &A, Instruction &I, ItemInfo II, | ||||
10245 | SmallVectorImpl<ItemInfo> &Worklist, | ||||
10246 | SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { | ||||
10247 | if (auto *CI = dyn_cast<CmpInst>(&I)) | ||||
10248 | if (handleCmp(A, *CI, II, Worklist)) | ||||
10249 | return true; | ||||
10250 | |||||
10251 | switch (I.getOpcode()) { | ||||
10252 | case Instruction::Select: | ||||
10253 | return handleSelectInst(A, cast<SelectInst>(I), II, Worklist); | ||||
10254 | case Instruction::PHI: | ||||
10255 | return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs); | ||||
10256 | case Instruction::Load: | ||||
10257 | return handleLoadInst(A, cast<LoadInst>(I), II, Worklist); | ||||
10258 | default: | ||||
10259 | return handleGenericInst(A, I, II, Worklist); | ||||
10260 | }; | ||||
10261 | return false; | ||||
10262 | } | ||||
10263 | |||||
10264 | void genericValueTraversal(Attributor &A) { | ||||
10265 | SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; | ||||
10266 | |||||
10267 | Value *InitialV = &getAssociatedValue(); | ||||
10268 | SmallSet<ItemInfo, 16> Visited; | ||||
10269 | SmallVector<ItemInfo, 16> Worklist; | ||||
10270 | Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope}); | ||||
10271 | |||||
10272 | int Iteration = 0; | ||||
10273 | do { | ||||
10274 | ItemInfo II = Worklist.pop_back_val(); | ||||
10275 | Value *V = II.I.getValue(); | ||||
10276 | assert(V)(static_cast <bool> (V) ? void (0) : __assert_fail ("V" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 10276, __extension__ __PRETTY_FUNCTION__)); | ||||
10277 | const Instruction *CtxI = II.I.getCtxI(); | ||||
10278 | AA::ValueScope S = II.S; | ||||
10279 | |||||
10280 | // Check if we should process the current value. To prevent endless | ||||
10281 | // recursion keep a record of the values we followed! | ||||
10282 | if (!Visited.insert(II).second) | ||||
10283 | continue; | ||||
10284 | |||||
10285 | // Make sure we limit the compile time for complex expressions. | ||||
10286 | if (Iteration++ >= MaxPotentialValuesIterations) { | ||||
10287 | LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Generic value traversal reached iteration limit: " << Iteration << "!\n"; } } while (false) | ||||
10288 | << Iteration << "!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("attributor")) { dbgs() << "Generic value traversal reached iteration limit: " << Iteration << "!\n"; } } while (false); | ||||
10289 | addValue(A, getState(), *V, CtxI, S, getAnchorScope()); | ||||
10290 | continue; | ||||
10291 | } | ||||
10292 | |||||
10293 | // Explicitly look through calls with a "returned" attribute if we do | ||||
10294 | // not have a pointer as stripPointerCasts only works on them. | ||||
10295 | Value *NewV = nullptr; | ||||
10296 | if (V->getType()->isPointerTy()) { | ||||
10297 | NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType()); | ||||
10298 | } else { | ||||
10299 | auto *CB = dyn_cast<CallBase>(V); | ||||
10300 | if (CB && CB->getCalledFunction()) { | ||||
10301 | for (Argument &Arg : CB->getCalledFunction()->args()) | ||||
10302 | if (Arg.hasReturnedAttr()) { | ||||
10303 | NewV = CB->getArgOperand(Arg.getArgNo()); | ||||
10304 | break; | ||||
10305 | } | ||||
10306 | } | ||||
10307 | } | ||||
10308 | if (NewV && NewV != V) { | ||||
10309 | Worklist.push_back({{*NewV, CtxI}, S}); | ||||
10310 | continue; | ||||
10311 | } | ||||
10312 | |||||
10313 | if (auto *I = dyn_cast<Instruction>(V)) { | ||||
10314 | if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs)) | ||||
10315 | continue; | ||||
10316 | } | ||||
10317 | |||||
10318 | if (V != InitialV || isa<Argument>(V)) | ||||
10319 | if (recurseForValue(A, IRPosition::value(*V), II.S)) | ||||
10320 | continue; | ||||
10321 | |||||
10322 | // If we haven't stripped anything we give up. | ||||
10323 | if (V == InitialV && CtxI == getCtxI()) { | ||||
10324 | indicatePessimisticFixpoint(); | ||||
10325 | return; | ||||
10326 | } | ||||
10327 | |||||
10328 | addValue(A, getState(), *V, CtxI, S, getAnchorScope()); | ||||
10329 | } while (!Worklist.empty()); | ||||
10330 | |||||
10331 | // If we actually used liveness information so we have to record a | ||||
10332 | // dependence. | ||||
10333 | for (auto &It : LivenessAAs) | ||||
10334 | if (It.second.AnyDead) | ||||
10335 | A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL); | ||||
10336 | } | ||||
10337 | |||||
10338 | /// See AbstractAttribute::trackStatistics() | ||||
10339 | void trackStatistics() const override { | ||||
10340 | STATS_DECLTRACK_FLOATING_ATTR(potential_values){ static llvm::Statistic NumIRFloating_potential_values = {"attributor" , "NumIRFloating_potential_values", ("Number of floating values known to be '" "potential_values" "'")};; ++(NumIRFloating_potential_values ); } | ||||
10341 | } | ||||
10342 | }; | ||||
10343 | |||||
10344 | struct AAPotentialValuesArgument final : AAPotentialValuesImpl { | ||||
10345 | using Base = AAPotentialValuesImpl; | ||||
10346 | AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) | ||||
10347 | : Base(IRP, A) {} | ||||
10348 | |||||
10349 | /// See AbstractAttribute::initialize(..). | ||||
10350 | void initialize(Attributor &A) override { | ||||
10351 | auto &Arg = cast<Argument>(getAssociatedValue()); | ||||
10352 | if (Arg.hasPointeeInMemoryValueAttr()) | ||||
10353 | indicatePessimisticFixpoint(); | ||||
10354 | } | ||||
10355 | |||||
10356 | /// See AbstractAttribute::updateImpl(...). | ||||
10357 | ChangeStatus updateImpl(Attributor &A) override { | ||||
10358 | auto AssumedBefore = getAssumed(); | ||||
10359 | |||||
10360 | unsigned CSArgNo = getCallSiteArgNo(); | ||||
10361 | |||||
10362 | bool UsedAssumedInformation = false; | ||||
10363 | SmallVector<AA::ValueAndContext> Values; | ||||
10364 | auto CallSitePred = [&](AbstractCallSite ACS) { | ||||
10365 | const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo); | ||||
10366 | if (CSArgIRP.getPositionKind() == IRP_INVALID) | ||||
10367 | return false; | ||||
10368 | |||||
10369 | if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values, | ||||
10370 | AA::Interprocedural, | ||||
10371 | UsedAssumedInformation)) | ||||
10372 | return false; | ||||
10373 | |||||
10374 | return isValidState(); | ||||
10375 | }; | ||||
10376 | |||||
10377 | if (!A.checkForAllCallSites(CallSitePred, *this, | ||||
10378 | /* RequireAllCallSites */ true, | ||||
10379 | UsedAssumedInformation)) | ||||
10380 | return indicatePessimisticFixpoint(); | ||||
10381 | |||||
10382 | Function *Fn = getAssociatedFunction(); | ||||
10383 | bool AnyNonLocal = false; | ||||
10384 | for (auto &It : Values) { | ||||
10385 | if (isa<Constant>(It.getValue())) { | ||||
10386 | addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope, | ||||
10387 | getAnchorScope()); | ||||
10388 | continue; | ||||
10389 | } | ||||
10390 | if (!AA::isDynamicallyUnique(A, *this, *It.getValue())) | ||||
10391 | return indicatePessimisticFixpoint(); | ||||
10392 | |||||
10393 | if (auto *Arg = dyn_cast<Argument>(It.getValue())) | ||||
10394 | if (Arg->getParent() == Fn) { | ||||
10395 | addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope, | ||||
10396 | getAnchorScope()); | ||||
10397 | continue; | ||||
10398 | } | ||||
10399 | addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural, | ||||
10400 | getAnchorScope()); | ||||
10401 | AnyNonLocal = true; | ||||
10402 | } | ||||
10403 | assert(!undefIsContained() && "Undef should be an explicit value!")(static_cast <bool> (!undefIsContained() && "Undef should be an explicit value!" ) ? void (0) : __assert_fail ("!undefIsContained() && \"Undef should be an explicit value!\"" , "llvm/lib/Transforms/IPO/AttributorAttributes.cpp", 10403, __extension__ __PRETTY_FUNCTION__)); | ||||
10404 | if (AnyNonLocal) | ||||
10405 | giveUpOnIntraprocedural(A); | ||||
10406 | |||||
10407 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED | ||||
10408 | : ChangeStatus::CHANGED; | ||||
10409 | } | ||||
10410 | |||||
10411 | /// See AbstractAttribute::trackStatistics() | ||||
10412 | void trackStatistics() const override { | ||||
10413 | STATS_DECLTRACK_ARG_ATTR(potential_values){ static llvm::Statistic NumIRArguments_potential_values = {"attributor" , "NumIRArguments_potential_values", ("Number of " "arguments" " marked '" "potential_values" "'")};; ++(NumIRArguments_potential_values ); } | ||||
10414 | } | ||||
10415 | }; | ||||
10416 | |||||
10417 | struct AAPotentialValuesReturned | ||||
10418 | : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { | ||||
10419 | using Base = | ||||
10420 | AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; | ||||
10421 | AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) | ||||
10422 | : Base(IRP, A) {} | ||||
10423 | |||||
10424 | /// See AbstractAttribute::initialize(..). | ||||
10425 | void initialize(Attributor &A) override { | ||||
10426 | if (A.hasSimplificationCallback(getIRPosition())) | ||||
10427 | indicatePessimisticFixpoint(); | ||||
10428 | else | ||||
10429 | AAPotentialValues::initialize(A); | ||||
10430 | } | ||||
10431 | |||||
10432 | ChangeStatus manifest(Attributor &A) override { | ||||
10433 | // We queried AAValueSimplify for the returned values so they will be | ||||
10434 | // replaced if a simplified form was found. Nothing to do here. | ||||
10435 | return ChangeStatus::UNCHANGED; | ||||
10436 | } | ||||
10437 | |||||
10438 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
10439 | return AAPotentialValues::indicatePessimisticFixpoint(); | ||||
10440 | } | ||||
10441 | |||||
10442 | /// See AbstractAttribute::trackStatistics() | ||||
10443 | void trackStatistics() const override { | ||||
10444 | STATS_DECLTRACK_FNRET_ATTR(potential_values){ static llvm::Statistic NumIRFunctionReturn_potential_values = {"attributor", "NumIRFunctionReturn_potential_values", ("Number of " "function returns" " marked '" "potential_values" "'")};; ++ (NumIRFunctionReturn_potential_values); } | ||||
10445 | } | ||||
10446 | }; | ||||
10447 | |||||
10448 | struct AAPotentialValuesFunction : AAPotentialValuesImpl { | ||||
10449 | AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) | ||||
10450 | : AAPotentialValuesImpl(IRP, A) {} | ||||
10451 | |||||
10452 | /// See AbstractAttribute::updateImpl(...). | ||||
10453 | ChangeStatus updateImpl(Attributor &A) override { | ||||
10454 | llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "::llvm::llvm_unreachable_internal("AAPotentialValues(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 10455) | ||||
10455 | "not be called")::llvm::llvm_unreachable_internal("AAPotentialValues(Function|CallSite)::updateImpl will " "not be called", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 10455); | ||||
10456 | } | ||||
10457 | |||||
10458 | /// See AbstractAttribute::trackStatistics() | ||||
10459 | void trackStatistics() const override { | ||||
10460 | STATS_DECLTRACK_FN_ATTR(potential_values){ static llvm::Statistic NumIRFunction_potential_values = {"attributor" , "NumIRFunction_potential_values", ("Number of " "functions" " marked '" "potential_values" "'")};; ++(NumIRFunction_potential_values ); } | ||||
10461 | } | ||||
10462 | }; | ||||
10463 | |||||
10464 | struct AAPotentialValuesCallSite : AAPotentialValuesFunction { | ||||
10465 | AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) | ||||
10466 | : AAPotentialValuesFunction(IRP, A) {} | ||||
10467 | |||||
10468 | /// See AbstractAttribute::trackStatistics() | ||||
10469 | void trackStatistics() const override { | ||||
10470 | STATS_DECLTRACK_CS_ATTR(potential_values){ static llvm::Statistic NumIRCS_potential_values = {"attributor" , "NumIRCS_potential_values", ("Number of " "call site" " marked '" "potential_values" "'")};; ++(NumIRCS_potential_values); } | ||||
10471 | } | ||||
10472 | }; | ||||
10473 | |||||
10474 | struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl { | ||||
10475 | AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) | ||||
10476 | : AAPotentialValuesImpl(IRP, A) {} | ||||
10477 | |||||
10478 | /// See AbstractAttribute::updateImpl(...). | ||||
10479 | ChangeStatus updateImpl(Attributor &A) override { | ||||
10480 | auto AssumedBefore = getAssumed(); | ||||
10481 | |||||
10482 | Function *Callee = getAssociatedFunction(); | ||||
10483 | if (!Callee) | ||||
10484 | return indicatePessimisticFixpoint(); | ||||
10485 | |||||
10486 | bool UsedAssumedInformation = false; | ||||
10487 | auto *CB = cast<CallBase>(getCtxI()); | ||||
10488 | if (CB->isMustTailCall() && | ||||
10489 | !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr, | ||||
10490 | UsedAssumedInformation)) | ||||
10491 | return indicatePessimisticFixpoint(); | ||||
10492 | |||||
10493 | SmallVector<AA::ValueAndContext> Values; | ||||
10494 | if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this, | ||||
10495 | Values, AA::Intraprocedural, | ||||
10496 | UsedAssumedInformation)) | ||||
10497 | return indicatePessimisticFixpoint(); | ||||
10498 | |||||
10499 | Function *Caller = CB->getCaller(); | ||||
10500 | |||||
10501 | bool AnyNonLocal = false; | ||||
10502 | for (auto &It : Values) { | ||||
10503 | Value *V = It.getValue(); | ||||
10504 | Optional<Value *> CallerV = A.translateArgumentToCallSiteContent( | ||||
10505 | V, *CB, *this, UsedAssumedInformation); | ||||
10506 | if (!CallerV.has_value()) { | ||||
10507 | // Nothing to do as long as no value was determined. | ||||
10508 | continue; | ||||
10509 | } | ||||
10510 | V = CallerV.value() ? CallerV.value() : V; | ||||
10511 | if (AA::isDynamicallyUnique(A, *this, *V) && | ||||
10512 | AA::isValidInScope(*V, Caller)) { | ||||
10513 | if (CallerV.value()) { | ||||
10514 | SmallVector<AA::ValueAndContext> ArgValues; | ||||
10515 | IRPosition IRP = IRPosition::value(*V); | ||||
10516 | if (auto *Arg = dyn_cast<Argument>(V)) | ||||
10517 | if (Arg->getParent() == CB->getCalledFunction()) | ||||
10518 | IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo()); | ||||
10519 | if (recurseForValue(A, IRP, AA::AnyScope)) | ||||
10520 | continue; | ||||
10521 | } | ||||
10522 | addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope()); | ||||
10523 | } else { | ||||
10524 | AnyNonLocal = true; | ||||
10525 | break; | ||||
10526 | } | ||||
10527 | } | ||||
10528 | if (AnyNonLocal) { | ||||
10529 | Values.clear(); | ||||
10530 | if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this, | ||||
10531 | Values, AA::Interprocedural, | ||||
10532 | UsedAssumedInformation)) | ||||
10533 | return indicatePessimisticFixpoint(); | ||||
10534 | AnyNonLocal = false; | ||||
10535 | getState() = PotentialLLVMValuesState::getBestState(); | ||||
10536 | for (auto &It : Values) { | ||||
10537 | Value *V = It.getValue(); | ||||
10538 | if (!AA::isDynamicallyUnique(A, *this, *V)) | ||||
10539 | return indicatePessimisticFixpoint(); | ||||
10540 | if (AA::isValidInScope(*V, Caller)) { | ||||
10541 | addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope()); | ||||
10542 | } else { | ||||
10543 | AnyNonLocal = true; | ||||
10544 | addValue(A, getState(), *V, CB, AA::Interprocedural, | ||||
10545 | getAnchorScope()); | ||||
10546 | } | ||||
10547 | } | ||||
10548 | if (AnyNonLocal) | ||||
10549 | giveUpOnIntraprocedural(A); | ||||
10550 | } | ||||
10551 | return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED | ||||
10552 | : ChangeStatus::CHANGED; | ||||
10553 | } | ||||
10554 | |||||
10555 | ChangeStatus indicatePessimisticFixpoint() override { | ||||
10556 | return AAPotentialValues::indicatePessimisticFixpoint(); | ||||
10557 | } | ||||
10558 | |||||
10559 | /// See AbstractAttribute::trackStatistics() | ||||
10560 | void trackStatistics() const override { | ||||
10561 | STATS_DECLTRACK_CSRET_ATTR(potential_values){ static llvm::Statistic NumIRCSReturn_potential_values = {"attributor" , "NumIRCSReturn_potential_values", ("Number of " "call site returns" " marked '" "potential_values" "'")};; ++(NumIRCSReturn_potential_values ); } | ||||
10562 | } | ||||
10563 | }; | ||||
10564 | |||||
10565 | struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { | ||||
10566 | AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) | ||||
10567 | : AAPotentialValuesFloating(IRP, A) {} | ||||
10568 | |||||
10569 | /// See AbstractAttribute::trackStatistics() | ||||
10570 | void trackStatistics() const override { | ||||
10571 | STATS_DECLTRACK_CSARG_ATTR(potential_values){ static llvm::Statistic NumIRCSArguments_potential_values = { "attributor", "NumIRCSArguments_potential_values", ("Number of " "call site arguments" " marked '" "potential_values" "'")};; ++(NumIRCSArguments_potential_values); } | ||||
10572 | } | ||||
10573 | }; | ||||
10574 | } // namespace | ||||
10575 | |||||
10576 | /// ---------------------- Assumption Propagation ------------------------------ | ||||
10577 | namespace { | ||||
10578 | struct AAAssumptionInfoImpl : public AAAssumptionInfo { | ||||
10579 | AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, | ||||
10580 | const DenseSet<StringRef> &Known) | ||||
10581 | : AAAssumptionInfo(IRP, A, Known) {} | ||||
10582 | |||||
10583 | bool hasAssumption(const StringRef Assumption) const override { | ||||
10584 | return isValidState() && setContains(Assumption); | ||||
10585 | } | ||||
10586 | |||||
10587 | /// See AbstractAttribute::getAsStr() | ||||
10588 | const std::string getAsStr() const override { | ||||
10589 | const SetContents &Known = getKnown(); | ||||
10590 | const SetContents &Assumed = getAssumed(); | ||||
10591 | |||||
10592 | const std::string KnownStr = | ||||
10593 | llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); | ||||
10594 | const std::string AssumedStr = | ||||
10595 | (Assumed.isUniversal()) | ||||
10596 | ? "Universal" | ||||
10597 | : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); | ||||
10598 | |||||
10599 | return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; | ||||
10600 | } | ||||
10601 | }; | ||||
10602 | |||||
10603 | /// Propagates assumption information from parent functions to all of their | ||||
10604 | /// successors. An assumption can be propagated if the containing function | ||||
10605 | /// dominates the called function. | ||||
10606 | /// | ||||
10607 | /// We start with a "known" set of assumptions already valid for the associated | ||||
10608 | /// function and an "assumed" set that initially contains all possible | ||||
10609 | /// assumptions. The assumed set is inter-procedurally updated by narrowing its | ||||
10610 | /// contents as concrete values are known. The concrete values are seeded by the | ||||
10611 | /// first nodes that are either entries into the call graph, or contains no | ||||
10612 | /// assumptions. Each node is updated as the intersection of the assumed state | ||||
10613 | /// with all of its predecessors. | ||||
10614 | struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { | ||||
10615 | AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) | ||||
10616 | : AAAssumptionInfoImpl(IRP, A, | ||||
10617 | getAssumptions(*IRP.getAssociatedFunction())) {} | ||||
10618 | |||||
10619 | /// See AbstractAttribute::manifest(...). | ||||
10620 | ChangeStatus manifest(Attributor &A) override { | ||||
10621 | const auto &Assumptions = getKnown(); | ||||
10622 | |||||
10623 | // Don't manifest a universal set if it somehow made it here. | ||||
10624 | if (Assumptions.isUniversal()) | ||||
10625 | return ChangeStatus::UNCHANGED; | ||||
10626 | |||||
10627 | Function *AssociatedFunction = getAssociatedFunction(); | ||||
10628 | |||||
10629 | bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); | ||||
10630 | |||||
10631 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
10632 | } | ||||
10633 | |||||
10634 | /// See AbstractAttribute::updateImpl(...). | ||||
10635 | ChangeStatus updateImpl(Attributor &A) override { | ||||
10636 | bool Changed = false; | ||||
10637 | |||||
10638 | auto CallSitePred = [&](AbstractCallSite ACS) { | ||||
10639 | const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( | ||||
10640 | *this, IRPosition::callsite_function(*ACS.getInstruction()), | ||||
10641 | DepClassTy::REQUIRED); | ||||
10642 | // Get the set of assumptions shared by all of this function's callers. | ||||
10643 | Changed |= getIntersection(AssumptionAA.getAssumed()); | ||||
10644 | return !getAssumed().empty() || !getKnown().empty(); | ||||
10645 | }; | ||||
10646 | |||||
10647 | bool UsedAssumedInformation = false; | ||||
10648 | // Get the intersection of all assumptions held by this node's predecessors. | ||||
10649 | // If we don't know all the call sites then this is either an entry into the | ||||
10650 | // call graph or an empty node. This node is known to only contain its own | ||||
10651 | // assumptions and can be propagated to its successors. | ||||
10652 | if (!A.checkForAllCallSites(CallSitePred, *this, true, | ||||
10653 | UsedAssumedInformation)) | ||||
10654 | return indicatePessimisticFixpoint(); | ||||
10655 | |||||
10656 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
10657 | } | ||||
10658 | |||||
10659 | void trackStatistics() const override {} | ||||
10660 | }; | ||||
10661 | |||||
10662 | /// Assumption Info defined for call sites. | ||||
10663 | struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { | ||||
10664 | |||||
10665 | AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) | ||||
10666 | : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} | ||||
10667 | |||||
10668 | /// See AbstractAttribute::initialize(...). | ||||
10669 | void initialize(Attributor &A) override { | ||||
10670 | const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); | ||||
10671 | A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); | ||||
10672 | } | ||||
10673 | |||||
10674 | /// See AbstractAttribute::manifest(...). | ||||
10675 | ChangeStatus manifest(Attributor &A) override { | ||||
10676 | // Don't manifest a universal set if it somehow made it here. | ||||
10677 | if (getKnown().isUniversal()) | ||||
10678 | return ChangeStatus::UNCHANGED; | ||||
10679 | |||||
10680 | CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); | ||||
10681 | bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); | ||||
10682 | |||||
10683 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
10684 | } | ||||
10685 | |||||
10686 | /// See AbstractAttribute::updateImpl(...). | ||||
10687 | ChangeStatus updateImpl(Attributor &A) override { | ||||
10688 | const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); | ||||
10689 | auto &AssumptionAA = | ||||
10690 | A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); | ||||
10691 | bool Changed = getIntersection(AssumptionAA.getAssumed()); | ||||
10692 | return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; | ||||
10693 | } | ||||
10694 | |||||
10695 | /// See AbstractAttribute::trackStatistics() | ||||
10696 | void trackStatistics() const override {} | ||||
10697 | |||||
10698 | private: | ||||
10699 | /// Helper to initialized the known set as all the assumptions this call and | ||||
10700 | /// the callee contain. | ||||
10701 | DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { | ||||
10702 | const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); | ||||
10703 | auto Assumptions = getAssumptions(CB); | ||||
10704 | if (Function *F = IRP.getAssociatedFunction()) | ||||
10705 | set_union(Assumptions, getAssumptions(*F)); | ||||
10706 | if (Function *F = IRP.getAssociatedFunction()) | ||||
10707 | set_union(Assumptions, getAssumptions(*F)); | ||||
10708 | return Assumptions; | ||||
10709 | } | ||||
10710 | }; | ||||
10711 | } // namespace | ||||
10712 | |||||
10713 | AACallGraphNode *AACallEdgeIterator::operator*() const { | ||||
10714 | return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( | ||||
10715 | &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); | ||||
10716 | } | ||||
10717 | |||||
10718 | void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } | ||||
10719 | |||||
10720 | const char AAReturnedValues::ID = 0; | ||||
10721 | const char AANoUnwind::ID = 0; | ||||
10722 | const char AANoSync::ID = 0; | ||||
10723 | const char AANoFree::ID = 0; | ||||
10724 | const char AANonNull::ID = 0; | ||||
10725 | const char AANoRecurse::ID = 0; | ||||
10726 | const char AAWillReturn::ID = 0; | ||||
10727 | const char AAUndefinedBehavior::ID = 0; | ||||
10728 | const char AANoAlias::ID = 0; | ||||
10729 | const char AAReachability::ID = 0; | ||||
10730 | const char AANoReturn::ID = 0; | ||||
10731 | const char AAIsDead::ID = 0; | ||||
10732 | const char AADereferenceable::ID = 0; | ||||
10733 | const char AAAlign::ID = 0; | ||||
10734 | const char AAInstanceInfo::ID = 0; | ||||
10735 | const char AANoCapture::ID = 0; | ||||
10736 | const char AAValueSimplify::ID = 0; | ||||
10737 | const char AAHeapToStack::ID = 0; | ||||
10738 | const char AAPrivatizablePtr::ID = 0; | ||||
10739 | const char AAMemoryBehavior::ID = 0; | ||||
10740 | const char AAMemoryLocation::ID = 0; | ||||
10741 | const char AAValueConstantRange::ID = 0; | ||||
10742 | const char AAPotentialConstantValues::ID = 0; | ||||
10743 | const char AAPotentialValues::ID = 0; | ||||
10744 | const char AANoUndef::ID = 0; | ||||
10745 | const char AACallEdges::ID = 0; | ||||
10746 | const char AAFunctionReachability::ID = 0; | ||||
10747 | const char AAPointerInfo::ID = 0; | ||||
10748 | const char AAAssumptionInfo::ID = 0; | ||||
10749 | |||||
10750 | // Macro magic to create the static generator function for attributes that | ||||
10751 | // follow the naming scheme. | ||||
10752 | |||||
10753 | #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ | ||||
10754 | case IRPosition::PK: \ | ||||
10755 | llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!")::llvm::llvm_unreachable_internal("Cannot create " #CLASS " for a " POS_NAME " position!", "llvm/lib/Transforms/IPO/AttributorAttributes.cpp" , 10755); | ||||
10756 | |||||
10757 | #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ | ||||
10758 | case IRPosition::PK: \ | ||||
10759 | AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ | ||||
10760 | ++NumAAs; \ | ||||
10761 | break; | ||||
10762 | |||||
10763 | #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ | ||||
10764 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ | ||||
10765 | CLASS *AA = nullptr; \ | ||||
10766 | switch (IRP.getPositionKind()) { \ | ||||
10767 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ | ||||
10768 | SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ | ||||
10769 | SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ | ||||
10770 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ | ||||
10771 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ | ||||
10772 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ | ||||
10773 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ | ||||
10774 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ | ||||
10775 | } \ | ||||
10776 | return *AA; \ | ||||
10777 | } | ||||
10778 | |||||
10779 | #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ | ||||
10780 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ | ||||
10781 | CLASS *AA = nullptr; \ | ||||
10782 | switch (IRP.getPositionKind()) { \ | ||||
10783 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ | ||||
10784 | SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ | ||||
10785 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ | ||||
10786 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ | ||||
10787 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ | ||||
10788 | SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ | ||||
10789 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ | ||||
10790 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ | ||||
10791 | } \ | ||||
10792 | return *AA; \ | ||||
10793 | } | ||||
10794 | |||||
10795 | #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ | ||||
10796 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ | ||||
10797 | CLASS *AA = nullptr; \ | ||||
10798 | switch (IRP.getPositionKind()) { \ | ||||
10799 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ | ||||
10800 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ | ||||
10801 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ | ||||
10802 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ | ||||
10803 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ | ||||
10804 | SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ | ||||
10805 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ | ||||
10806 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ | ||||
10807 | } \ | ||||
10808 | return *AA; \ | ||||
10809 | } | ||||
10810 | |||||
10811 | #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ | ||||
10812 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ | ||||
10813 | CLASS *AA = nullptr; \ | ||||
10814 | switch (IRP.getPositionKind()) { \ | ||||
10815 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ | ||||
10816 | SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ | ||||
10817 | SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ | ||||
10818 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ | ||||
10819 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ | ||||
10820 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ | ||||
10821 | SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ | ||||
10822 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ | ||||
10823 | } \ | ||||
10824 | return *AA; \ | ||||
10825 | } | ||||
10826 | |||||
10827 | #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ | ||||
10828 | CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ | ||||
10829 | CLASS *AA = nullptr; \ | ||||
10830 | switch (IRP.getPositionKind()) { \ | ||||
10831 | SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ | ||||
10832 | SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ | ||||
10833 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ | ||||
10834 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ | ||||
10835 | SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ | ||||
10836 | SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ | ||||
10837 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ | ||||
10838 | SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ | ||||
10839 | } \ | ||||
10840 | return *AA; \ | ||||
10841 | } | ||||
10842 | |||||
10843 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) | ||||
10844 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) | ||||
10845 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) | ||||
10846 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) | ||||
10847 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) | ||||
10848 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) | ||||
10849 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) | ||||
10850 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) | ||||
10851 | CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) | ||||
10852 | |||||
10853 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) | ||||
10854 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) | ||||
10855 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) | ||||
10856 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) | ||||
10857 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) | ||||
10858 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo) | ||||
10859 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) | ||||
10860 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) | ||||
10861 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues) | ||||
10862 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) | ||||
10863 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) | ||||
10864 | CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) | ||||
10865 | |||||
10866 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) | ||||
10867 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) | ||||
10868 | CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) | ||||
10869 | |||||
10870 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) | ||||
10871 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) | ||||
10872 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) | ||||
10873 | CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) | ||||
10874 | |||||
10875 | CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) | ||||
10876 | |||||
10877 | #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION | ||||
10878 | #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION | ||||
10879 | #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION | ||||
10880 | #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION | ||||
10881 | #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION | ||||
10882 | #undef SWITCH_PK_CREATE | ||||
10883 | #undef SWITCH_PK_INV |
1 | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains routines that help analyze properties that chains of | |||
10 | // computations have. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_ANALYSIS_VALUETRACKING_H | |||
15 | #define LLVM_ANALYSIS_VALUETRACKING_H | |||
16 | ||||
17 | #include "llvm/ADT/ArrayRef.h" | |||
18 | #include "llvm/ADT/Optional.h" | |||
19 | #include "llvm/ADT/SmallSet.h" | |||
20 | #include "llvm/IR/Constants.h" | |||
21 | #include "llvm/IR/DataLayout.h" | |||
22 | #include "llvm/IR/InstrTypes.h" | |||
23 | #include "llvm/IR/Intrinsics.h" | |||
24 | #include <cassert> | |||
25 | #include <cstdint> | |||
26 | ||||
27 | namespace llvm { | |||
28 | ||||
29 | class Operator; | |||
30 | class AddOperator; | |||
31 | class AllocaInst; | |||
32 | class APInt; | |||
33 | class AssumptionCache; | |||
34 | class DominatorTree; | |||
35 | class GEPOperator; | |||
36 | class LoadInst; | |||
37 | class WithOverflowInst; | |||
38 | struct KnownBits; | |||
39 | class Loop; | |||
40 | class LoopInfo; | |||
41 | class MDNode; | |||
42 | class OptimizationRemarkEmitter; | |||
43 | class StringRef; | |||
44 | class TargetLibraryInfo; | |||
45 | class Value; | |||
46 | ||||
47 | constexpr unsigned MaxAnalysisRecursionDepth = 6; | |||
48 | ||||
49 | /// Determine which bits of V are known to be either zero or one and return | |||
50 | /// them in the KnownZero/KnownOne bit sets. | |||
51 | /// | |||
52 | /// This function is defined on values with integer type, values with pointer | |||
53 | /// type, and vectors of integers. In the case | |||
54 | /// where V is a vector, the known zero and known one values are the | |||
55 | /// same width as the vector element, and the bit is set only if it is true | |||
56 | /// for all of the elements in the vector. | |||
57 | void computeKnownBits(const Value *V, KnownBits &Known, | |||
58 | const DataLayout &DL, unsigned Depth = 0, | |||
59 | AssumptionCache *AC = nullptr, | |||
60 | const Instruction *CxtI = nullptr, | |||
61 | const DominatorTree *DT = nullptr, | |||
62 | OptimizationRemarkEmitter *ORE = nullptr, | |||
63 | bool UseInstrInfo = true); | |||
64 | ||||
65 | /// Determine which bits of V are known to be either zero or one and return | |||
66 | /// them in the KnownZero/KnownOne bit sets. | |||
67 | /// | |||
68 | /// This function is defined on values with integer type, values with pointer | |||
69 | /// type, and vectors of integers. In the case | |||
70 | /// where V is a vector, the known zero and known one values are the | |||
71 | /// same width as the vector element, and the bit is set only if it is true | |||
72 | /// for all of the demanded elements in the vector. | |||
73 | void computeKnownBits(const Value *V, const APInt &DemandedElts, | |||
74 | KnownBits &Known, const DataLayout &DL, | |||
75 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
76 | const Instruction *CxtI = nullptr, | |||
77 | const DominatorTree *DT = nullptr, | |||
78 | OptimizationRemarkEmitter *ORE = nullptr, | |||
79 | bool UseInstrInfo = true); | |||
80 | ||||
81 | /// Returns the known bits rather than passing by reference. | |||
82 | KnownBits computeKnownBits(const Value *V, const DataLayout &DL, | |||
83 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
84 | const Instruction *CxtI = nullptr, | |||
85 | const DominatorTree *DT = nullptr, | |||
86 | OptimizationRemarkEmitter *ORE = nullptr, | |||
87 | bool UseInstrInfo = true); | |||
88 | ||||
89 | /// Returns the known bits rather than passing by reference. | |||
90 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | |||
91 | const DataLayout &DL, unsigned Depth = 0, | |||
92 | AssumptionCache *AC = nullptr, | |||
93 | const Instruction *CxtI = nullptr, | |||
94 | const DominatorTree *DT = nullptr, | |||
95 | OptimizationRemarkEmitter *ORE = nullptr, | |||
96 | bool UseInstrInfo = true); | |||
97 | ||||
98 | /// Compute known bits from the range metadata. | |||
99 | /// \p KnownZero the set of bits that are known to be zero | |||
100 | /// \p KnownOne the set of bits that are known to be one | |||
101 | void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, | |||
102 | KnownBits &Known); | |||
103 | ||||
104 | /// Return true if LHS and RHS have no common bits set. | |||
105 | bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, | |||
106 | const DataLayout &DL, | |||
107 | AssumptionCache *AC = nullptr, | |||
108 | const Instruction *CxtI = nullptr, | |||
109 | const DominatorTree *DT = nullptr, | |||
110 | bool UseInstrInfo = true); | |||
111 | ||||
112 | /// Return true if the given value is known to have exactly one bit set when | |||
113 | /// defined. For vectors return true if every element is known to be a power | |||
114 | /// of two when defined. Supports values with integer or pointer type and | |||
115 | /// vectors of integers. If 'OrZero' is set, then return true if the given | |||
116 | /// value is either a power of two or zero. | |||
117 | bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, | |||
118 | bool OrZero = false, unsigned Depth = 0, | |||
119 | AssumptionCache *AC = nullptr, | |||
120 | const Instruction *CxtI = nullptr, | |||
121 | const DominatorTree *DT = nullptr, | |||
122 | bool UseInstrInfo = true); | |||
123 | ||||
124 | bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); | |||
125 | ||||
126 | /// Return true if the given value is known to be non-zero when defined. For | |||
127 | /// vectors, return true if every element is known to be non-zero when | |||
128 | /// defined. For pointers, if the context instruction and dominator tree are | |||
129 | /// specified, perform context-sensitive analysis and return true if the | |||
130 | /// pointer couldn't possibly be null at the specified instruction. | |||
131 | /// Supports values with integer or pointer type and vectors of integers. | |||
132 | bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
133 | AssumptionCache *AC = nullptr, | |||
134 | const Instruction *CxtI = nullptr, | |||
135 | const DominatorTree *DT = nullptr, | |||
136 | bool UseInstrInfo = true); | |||
137 | ||||
138 | /// Return true if the two given values are negation. | |||
139 | /// Currently can recoginze Value pair: | |||
140 | /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) | |||
141 | /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) | |||
142 | bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); | |||
143 | ||||
144 | /// Returns true if the give value is known to be non-negative. | |||
145 | bool isKnownNonNegative(const Value *V, const DataLayout &DL, | |||
146 | unsigned Depth = 0, | |||
147 | AssumptionCache *AC = nullptr, | |||
148 | const Instruction *CxtI = nullptr, | |||
149 | const DominatorTree *DT = nullptr, | |||
150 | bool UseInstrInfo = true); | |||
151 | ||||
152 | /// Returns true if the given value is known be positive (i.e. non-negative | |||
153 | /// and non-zero). | |||
154 | bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
155 | AssumptionCache *AC = nullptr, | |||
156 | const Instruction *CxtI = nullptr, | |||
157 | const DominatorTree *DT = nullptr, | |||
158 | bool UseInstrInfo = true); | |||
159 | ||||
160 | /// Returns true if the given value is known be negative (i.e. non-positive | |||
161 | /// and non-zero). | |||
162 | bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, | |||
163 | AssumptionCache *AC = nullptr, | |||
164 | const Instruction *CxtI = nullptr, | |||
165 | const DominatorTree *DT = nullptr, | |||
166 | bool UseInstrInfo = true); | |||
167 | ||||
168 | /// Return true if the given values are known to be non-equal when defined. | |||
169 | /// Supports scalar integer types only. | |||
170 | bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, | |||
171 | AssumptionCache *AC = nullptr, | |||
172 | const Instruction *CxtI = nullptr, | |||
173 | const DominatorTree *DT = nullptr, | |||
174 | bool UseInstrInfo = true); | |||
175 | ||||
176 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to | |||
177 | /// simplify operations downstream. Mask is known to be zero for bits that V | |||
178 | /// cannot have. | |||
179 | /// | |||
180 | /// This function is defined on values with integer type, values with pointer | |||
181 | /// type, and vectors of integers. In the case | |||
182 | /// where V is a vector, the mask, known zero, and known one values are the | |||
183 | /// same width as the vector element, and the bit is set only if it is true | |||
184 | /// for all of the elements in the vector. | |||
185 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, | |||
186 | const DataLayout &DL, | |||
187 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
188 | const Instruction *CxtI = nullptr, | |||
189 | const DominatorTree *DT = nullptr, | |||
190 | bool UseInstrInfo = true); | |||
191 | ||||
192 | /// Return the number of times the sign bit of the register is replicated into | |||
193 | /// the other bits. We know that at least 1 bit is always equal to the sign | |||
194 | /// bit (itself), but other cases can give us information. For example, | |||
195 | /// immediately after an "ashr X, 2", we know that the top 3 bits are all | |||
196 | /// equal to each other, so we return 3. For vectors, return the number of | |||
197 | /// sign bits for the vector element with the mininum number of known sign | |||
198 | /// bits. | |||
199 | unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, | |||
200 | unsigned Depth = 0, AssumptionCache *AC = nullptr, | |||
201 | const Instruction *CxtI = nullptr, | |||
202 | const DominatorTree *DT = nullptr, | |||
203 | bool UseInstrInfo = true); | |||
204 | ||||
205 | /// Get the upper bound on bit size for this Value \p Op as a signed integer. | |||
206 | /// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)). | |||
207 | /// Similar to the APInt::getSignificantBits function. | |||
208 | unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, | |||
209 | unsigned Depth = 0, | |||
210 | AssumptionCache *AC = nullptr, | |||
211 | const Instruction *CxtI = nullptr, | |||
212 | const DominatorTree *DT = nullptr); | |||
213 | ||||
214 | /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent | |||
215 | /// intrinsics are treated as-if they were intrinsics. | |||
216 | Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, | |||
217 | const TargetLibraryInfo *TLI); | |||
218 | ||||
219 | /// Return true if we can prove that the specified FP value is never equal to | |||
220 | /// -0.0. | |||
221 | bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, | |||
222 | unsigned Depth = 0); | |||
223 | ||||
224 | /// Return true if we can prove that the specified FP value is either NaN or | |||
225 | /// never less than -0.0. | |||
226 | /// | |||
227 | /// NaN --> true | |||
228 | /// +0 --> true | |||
229 | /// -0 --> true | |||
230 | /// x > +0 --> true | |||
231 | /// x < -0 --> false | |||
232 | bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); | |||
233 | ||||
234 | /// Return true if the floating-point scalar value is not an infinity or if | |||
235 | /// the floating-point vector value has no infinities. Return false if a value | |||
236 | /// could ever be infinity. | |||
237 | bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, | |||
238 | unsigned Depth = 0); | |||
239 | ||||
240 | /// Return true if the floating-point scalar value is not a NaN or if the | |||
241 | /// floating-point vector value has no NaN elements. Return false if a value | |||
242 | /// could ever be NaN. | |||
243 | bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, | |||
244 | unsigned Depth = 0); | |||
245 | ||||
246 | /// Return true if we can prove that the specified FP value's sign bit is 0. | |||
247 | /// | |||
248 | /// NaN --> true/false (depending on the NaN's sign bit) | |||
249 | /// +0 --> true | |||
250 | /// -0 --> false | |||
251 | /// x > +0 --> true | |||
252 | /// x < -0 --> false | |||
253 | bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); | |||
254 | ||||
255 | /// If the specified value can be set by repeating the same byte in memory, | |||
256 | /// return the i8 value that it is represented with. This is true for all i8 | |||
257 | /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double | |||
258 | /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. | |||
259 | /// i16 0x1234), return null. If the value is entirely undef and padding, | |||
260 | /// return undef. | |||
261 | Value *isBytewiseValue(Value *V, const DataLayout &DL); | |||
262 | ||||
263 | /// Given an aggregate and an sequence of indices, see if the scalar value | |||
264 | /// indexed is already around as a register, for example if it were inserted | |||
265 | /// directly into the aggregate. | |||
266 | /// | |||
267 | /// If InsertBefore is not null, this function will duplicate (modified) | |||
268 | /// insertvalues when a part of a nested struct is extracted. | |||
269 | Value *FindInsertedValue(Value *V, | |||
270 | ArrayRef<unsigned> idx_range, | |||
271 | Instruction *InsertBefore = nullptr); | |||
272 | ||||
273 | /// Analyze the specified pointer to see if it can be expressed as a base | |||
274 | /// pointer plus a constant offset. Return the base and offset to the caller. | |||
275 | /// | |||
276 | /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that | |||
277 | /// creates and later unpacks the required APInt. | |||
278 | inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, | |||
279 | const DataLayout &DL, | |||
280 | bool AllowNonInbounds = true) { | |||
281 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); | |||
| ||||
282 | Value *Base = | |||
283 | Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); | |||
284 | ||||
285 | Offset = OffsetAPInt.getSExtValue(); | |||
286 | return Base; | |||
287 | } | |||
288 | inline const Value * | |||
289 | GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, | |||
290 | const DataLayout &DL, | |||
291 | bool AllowNonInbounds = true) { | |||
292 | return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, | |||
293 | AllowNonInbounds); | |||
294 | } | |||
295 | ||||
296 | /// Returns true if the GEP is based on a pointer to a string (array of | |||
297 | // \p CharSize integers) and is indexing into this string. | |||
298 | bool isGEPBasedOnPointerToString(const GEPOperator *GEP, | |||
299 | unsigned CharSize = 8); | |||
300 | ||||
301 | /// Represents offset+length into a ConstantDataArray. | |||
302 | struct ConstantDataArraySlice { | |||
303 | /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid | |||
304 | /// initializer, it just doesn't fit the ConstantDataArray interface). | |||
305 | const ConstantDataArray *Array; | |||
306 | ||||
307 | /// Slice starts at this Offset. | |||
308 | uint64_t Offset; | |||
309 | ||||
310 | /// Length of the slice. | |||
311 | uint64_t Length; | |||
312 | ||||
313 | /// Moves the Offset and adjusts Length accordingly. | |||
314 | void move(uint64_t Delta) { | |||
315 | assert(Delta < Length)(static_cast <bool> (Delta < Length) ? void (0) : __assert_fail ("Delta < Length", "llvm/include/llvm/Analysis/ValueTracking.h" , 315, __extension__ __PRETTY_FUNCTION__)); | |||
316 | Offset += Delta; | |||
317 | Length -= Delta; | |||
318 | } | |||
319 | ||||
320 | /// Convenience accessor for elements in the slice. | |||
321 | uint64_t operator[](unsigned I) const { | |||
322 | return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset); | |||
323 | } | |||
324 | }; | |||
325 | ||||
326 | /// Returns true if the value \p V is a pointer into a ConstantDataArray. | |||
327 | /// If successful \p Slice will point to a ConstantDataArray info object | |||
328 | /// with an appropriate offset. | |||
329 | bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, | |||
330 | unsigned ElementSize, uint64_t Offset = 0); | |||
331 | ||||
332 | /// This function computes the length of a null-terminated C string pointed to | |||
333 | /// by V. If successful, it returns true and returns the string in Str. If | |||
334 | /// unsuccessful, it returns false. This does not include the trailing null | |||
335 | /// character by default. If TrimAtNul is set to false, then this returns any | |||
336 | /// trailing null characters as well as any other characters that come after | |||
337 | /// it. | |||
338 | bool getConstantStringInfo(const Value *V, StringRef &Str, | |||
339 | uint64_t Offset = 0, bool TrimAtNul = true); | |||
340 | ||||
341 | /// If we can compute the length of the string pointed to by the specified | |||
342 | /// pointer, return 'len+1'. If we can't, return 0. | |||
343 | uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); | |||
344 | ||||
345 | /// This function returns call pointer argument that is considered the same by | |||
346 | /// aliasing rules. You CAN'T use it to replace one value with another. If | |||
347 | /// \p MustPreserveNullness is true, the call must preserve the nullness of | |||
348 | /// the pointer. | |||
349 | const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, | |||
350 | bool MustPreserveNullness); | |||
351 | inline Value * | |||
352 | getArgumentAliasingToReturnedPointer(CallBase *Call, | |||
353 | bool MustPreserveNullness) { | |||
354 | return const_cast<Value *>(getArgumentAliasingToReturnedPointer( | |||
355 | const_cast<const CallBase *>(Call), MustPreserveNullness)); | |||
356 | } | |||
357 | ||||
358 | /// {launder,strip}.invariant.group returns pointer that aliases its argument, | |||
359 | /// and it only captures pointer by returning it. | |||
360 | /// These intrinsics are not marked as nocapture, because returning is | |||
361 | /// considered as capture. The arguments are not marked as returned neither, | |||
362 | /// because it would make it useless. If \p MustPreserveNullness is true, | |||
363 | /// the intrinsic must preserve the nullness of the pointer. | |||
364 | bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | |||
365 | const CallBase *Call, bool MustPreserveNullness); | |||
366 | ||||
367 | /// This method strips off any GEP address adjustments and pointer casts from | |||
368 | /// the specified value, returning the original object being addressed. Note | |||
369 | /// that the returned value has pointer type if the specified value does. If | |||
370 | /// the MaxLookup value is non-zero, it limits the number of instructions to | |||
371 | /// be stripped off. | |||
372 | const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6); | |||
373 | inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) { | |||
374 | // Force const to avoid infinite recursion. | |||
375 | const Value *VConst = V; | |||
376 | return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup)); | |||
377 | } | |||
378 | ||||
379 | /// This method is similar to getUnderlyingObject except that it can | |||
380 | /// look through phi and select instructions and return multiple objects. | |||
381 | /// | |||
382 | /// If LoopInfo is passed, loop phis are further analyzed. If a pointer | |||
383 | /// accesses different objects in each iteration, we don't look through the | |||
384 | /// phi node. E.g. consider this loop nest: | |||
385 | /// | |||
386 | /// int **A; | |||
387 | /// for (i) | |||
388 | /// for (j) { | |||
389 | /// A[i][j] = A[i-1][j] * B[j] | |||
390 | /// } | |||
391 | /// | |||
392 | /// This is transformed by Load-PRE to stash away A[i] for the next iteration | |||
393 | /// of the outer loop: | |||
394 | /// | |||
395 | /// Curr = A[0]; // Prev_0 | |||
396 | /// for (i: 1..N) { | |||
397 | /// Prev = Curr; // Prev = PHI (Prev_0, Curr) | |||
398 | /// Curr = A[i]; | |||
399 | /// for (j: 0..N) { | |||
400 | /// Curr[j] = Prev[j] * B[j] | |||
401 | /// } | |||
402 | /// } | |||
403 | /// | |||
404 | /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects | |||
405 | /// should not assume that Curr and Prev share the same underlying object thus | |||
406 | /// it shouldn't look through the phi above. | |||
407 | void getUnderlyingObjects(const Value *V, | |||
408 | SmallVectorImpl<const Value *> &Objects, | |||
409 | LoopInfo *LI = nullptr, unsigned MaxLookup = 6); | |||
410 | ||||
411 | /// This is a wrapper around getUnderlyingObjects and adds support for basic | |||
412 | /// ptrtoint+arithmetic+inttoptr sequences. | |||
413 | bool getUnderlyingObjectsForCodeGen(const Value *V, | |||
414 | SmallVectorImpl<Value *> &Objects); | |||
415 | ||||
416 | /// Returns unique alloca where the value comes from, or nullptr. | |||
417 | /// If OffsetZero is true check that V points to the begining of the alloca. | |||
418 | AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false); | |||
419 | inline const AllocaInst *findAllocaForValue(const Value *V, | |||
420 | bool OffsetZero = false) { | |||
421 | return findAllocaForValue(const_cast<Value *>(V), OffsetZero); | |||
422 | } | |||
423 | ||||
424 | /// Return true if the only users of this pointer are lifetime markers. | |||
425 | bool onlyUsedByLifetimeMarkers(const Value *V); | |||
426 | ||||
427 | /// Return true if the only users of this pointer are lifetime markers or | |||
428 | /// droppable instructions. | |||
429 | bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V); | |||
430 | ||||
431 | /// Return true if speculation of the given load must be suppressed to avoid | |||
432 | /// ordering or interfering with an active sanitizer. If not suppressed, | |||
433 | /// dereferenceability and alignment must be proven separately. Note: This | |||
434 | /// is only needed for raw reasoning; if you use the interface below | |||
435 | /// (isSafeToSpeculativelyExecute), this is handled internally. | |||
436 | bool mustSuppressSpeculation(const LoadInst &LI); | |||
437 | ||||
438 | /// Return true if the instruction does not have any effects besides | |||
439 | /// calculating the result and does not have undefined behavior. | |||
440 | /// | |||
441 | /// This method never returns true for an instruction that returns true for | |||
442 | /// mayHaveSideEffects; however, this method also does some other checks in | |||
443 | /// addition. It checks for undefined behavior, like dividing by zero or | |||
444 | /// loading from an invalid pointer (but not for undefined results, like a | |||
445 | /// shift with a shift amount larger than the width of the result). It checks | |||
446 | /// for malloc and alloca because speculatively executing them might cause a | |||
447 | /// memory leak. It also returns false for instructions related to control | |||
448 | /// flow, specifically terminators and PHI nodes. | |||
449 | /// | |||
450 | /// If the CtxI is specified this method performs context-sensitive analysis | |||
451 | /// and returns true if it is safe to execute the instruction immediately | |||
452 | /// before the CtxI. | |||
453 | /// | |||
454 | /// If the CtxI is NOT specified this method only looks at the instruction | |||
455 | /// itself and its operands, so if this method returns true, it is safe to | |||
456 | /// move the instruction as long as the correct dominance relationships for | |||
457 | /// the operands and users hold. | |||
458 | /// | |||
459 | /// This method can return true for instructions that read memory; | |||
460 | /// for such instructions, moving them may change the resulting value. | |||
461 | bool isSafeToSpeculativelyExecute(const Instruction *I, | |||
462 | const Instruction *CtxI = nullptr, | |||
463 | const DominatorTree *DT = nullptr, | |||
464 | const TargetLibraryInfo *TLI = nullptr); | |||
465 | ||||
466 | /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is | |||
467 | /// the actual opcode of Inst. If the provided and actual opcode differ, the | |||
468 | /// function (virtually) overrides the opcode of Inst with the provided | |||
469 | /// Opcode. There are come constraints in this case: | |||
470 | /// * If Opcode has a fixed number of operands (eg, as binary operators do), | |||
471 | /// then Inst has to have at least as many leading operands. The function | |||
472 | /// will ignore all trailing operands beyond that number. | |||
473 | /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts | |||
474 | /// do), then all operands are considered. | |||
475 | /// * The virtual instruction has to satisfy all typing rules of the provided | |||
476 | /// Opcode. | |||
477 | /// * This function is pessimistic in the following sense: If one actually | |||
478 | /// materialized the virtual instruction, then isSafeToSpeculativelyExecute | |||
479 | /// may say that the materialized instruction is speculatable whereas this | |||
480 | /// function may have said that the instruction wouldn't be speculatable. | |||
481 | /// This behavior is a shortcoming in the current implementation and not | |||
482 | /// intentional. | |||
483 | bool isSafeToSpeculativelyExecuteWithOpcode( | |||
484 | unsigned Opcode, const Instruction *Inst, | |||
485 | const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr, | |||
486 | const TargetLibraryInfo *TLI = nullptr); | |||
487 | ||||
488 | /// Returns true if the result or effects of the given instructions \p I | |||
489 | /// depend values not reachable through the def use graph. | |||
490 | /// * Memory dependence arises for example if the instruction reads from | |||
491 | /// memory or may produce effects or undefined behaviour. Memory dependent | |||
492 | /// instructions generally cannot be reorderd with respect to other memory | |||
493 | /// dependent instructions. | |||
494 | /// * Control dependence arises for example if the instruction may fault | |||
495 | /// if lifted above a throwing call or infinite loop. | |||
496 | bool mayHaveNonDefUseDependency(const Instruction &I); | |||
497 | ||||
498 | /// Return true if it is an intrinsic that cannot be speculated but also | |||
499 | /// cannot trap. | |||
500 | bool isAssumeLikeIntrinsic(const Instruction *I); | |||
501 | ||||
502 | /// Return true if it is valid to use the assumptions provided by an | |||
503 | /// assume intrinsic, I, at the point in the control-flow identified by the | |||
504 | /// context instruction, CxtI. | |||
505 | bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, | |||
506 | const DominatorTree *DT = nullptr); | |||
507 | ||||
508 | enum class OverflowResult { | |||
509 | /// Always overflows in the direction of signed/unsigned min value. | |||
510 | AlwaysOverflowsLow, | |||
511 | /// Always overflows in the direction of signed/unsigned max value. | |||
512 | AlwaysOverflowsHigh, | |||
513 | /// May or may not overflow. | |||
514 | MayOverflow, | |||
515 | /// Never overflows. | |||
516 | NeverOverflows, | |||
517 | }; | |||
518 | ||||
519 | OverflowResult computeOverflowForUnsignedMul(const Value *LHS, | |||
520 | const Value *RHS, | |||
521 | const DataLayout &DL, | |||
522 | AssumptionCache *AC, | |||
523 | const Instruction *CxtI, | |||
524 | const DominatorTree *DT, | |||
525 | bool UseInstrInfo = true); | |||
526 | OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, | |||
527 | const DataLayout &DL, | |||
528 | AssumptionCache *AC, | |||
529 | const Instruction *CxtI, | |||
530 | const DominatorTree *DT, | |||
531 | bool UseInstrInfo = true); | |||
532 | OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, | |||
533 | const Value *RHS, | |||
534 | const DataLayout &DL, | |||
535 | AssumptionCache *AC, | |||
536 | const Instruction *CxtI, | |||
537 | const DominatorTree *DT, | |||
538 | bool UseInstrInfo = true); | |||
539 | OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, | |||
540 | const DataLayout &DL, | |||
541 | AssumptionCache *AC = nullptr, | |||
542 | const Instruction *CxtI = nullptr, | |||
543 | const DominatorTree *DT = nullptr); | |||
544 | /// This version also leverages the sign bit of Add if known. | |||
545 | OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, | |||
546 | const DataLayout &DL, | |||
547 | AssumptionCache *AC = nullptr, | |||
548 | const Instruction *CxtI = nullptr, | |||
549 | const DominatorTree *DT = nullptr); | |||
550 | OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, | |||
551 | const DataLayout &DL, | |||
552 | AssumptionCache *AC, | |||
553 | const Instruction *CxtI, | |||
554 | const DominatorTree *DT); | |||
555 | OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, | |||
556 | const DataLayout &DL, | |||
557 | AssumptionCache *AC, | |||
558 | const Instruction *CxtI, | |||
559 | const DominatorTree *DT); | |||
560 | ||||
561 | /// Returns true if the arithmetic part of the \p WO 's result is | |||
562 | /// used only along the paths control dependent on the computation | |||
563 | /// not overflowing, \p WO being an <op>.with.overflow intrinsic. | |||
564 | bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, | |||
565 | const DominatorTree &DT); | |||
566 | ||||
567 | ||||
568 | /// Determine the possible constant range of an integer or vector of integer | |||
569 | /// value. This is intended as a cheap, non-recursive check. | |||
570 | ConstantRange computeConstantRange(const Value *V, bool ForSigned, | |||
571 | bool UseInstrInfo = true, | |||
572 | AssumptionCache *AC = nullptr, | |||
573 | const Instruction *CtxI = nullptr, | |||
574 | const DominatorTree *DT = nullptr, | |||
575 | unsigned Depth = 0); | |||
576 | ||||
577 | /// Return true if this function can prove that the instruction I will | |||
578 | /// always transfer execution to one of its successors (including the next | |||
579 | /// instruction that follows within a basic block). E.g. this is not | |||
580 | /// guaranteed for function calls that could loop infinitely. | |||
581 | /// | |||
582 | /// In other words, this function returns false for instructions that may | |||
583 | /// transfer execution or fail to transfer execution in a way that is not | |||
584 | /// captured in the CFG nor in the sequence of instructions within a basic | |||
585 | /// block. | |||
586 | /// | |||
587 | /// Undefined behavior is assumed not to happen, so e.g. division is | |||
588 | /// guaranteed to transfer execution to the following instruction even | |||
589 | /// though division by zero might cause undefined behavior. | |||
590 | bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); | |||
591 | ||||
592 | /// Returns true if this block does not contain a potential implicit exit. | |||
593 | /// This is equivelent to saying that all instructions within the basic block | |||
594 | /// are guaranteed to transfer execution to their successor within the basic | |||
595 | /// block. This has the same assumptions w.r.t. undefined behavior as the | |||
596 | /// instruction variant of this function. | |||
597 | bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); | |||
598 | ||||
599 | /// Return true if every instruction in the range (Begin, End) is | |||
600 | /// guaranteed to transfer execution to its static successor. \p ScanLimit | |||
601 | /// bounds the search to avoid scanning huge blocks. | |||
602 | bool isGuaranteedToTransferExecutionToSuccessor( | |||
603 | BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, | |||
604 | unsigned ScanLimit = 32); | |||
605 | ||||
606 | /// Same as previous, but with range expressed via iterator_range. | |||
607 | bool isGuaranteedToTransferExecutionToSuccessor( | |||
608 | iterator_range<BasicBlock::const_iterator> Range, | |||
609 | unsigned ScanLimit = 32); | |||
610 | ||||
611 | /// Return true if this function can prove that the instruction I | |||
612 | /// is executed for every iteration of the loop L. | |||
613 | /// | |||
614 | /// Note that this currently only considers the loop header. | |||
615 | bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, | |||
616 | const Loop *L); | |||
617 | ||||
618 | /// Return true if I yields poison or raises UB if any of its operands is | |||
619 | /// poison. | |||
620 | /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true | |||
621 | /// if, for all i, r is evaluated to poison or op raises UB if vi = poison. | |||
622 | /// If vi is a vector or an aggregate and r is a single value, any poison | |||
623 | /// element in vi should make r poison or raise UB. | |||
624 | /// To filter out operands that raise UB on poison, you can use | |||
625 | /// getGuaranteedNonPoisonOp. | |||
626 | bool propagatesPoison(const Operator *I); | |||
627 | ||||
628 | /// Insert operands of I into Ops such that I will trigger undefined behavior | |||
629 | /// if I is executed and that operand has a poison value. | |||
630 | void getGuaranteedNonPoisonOps(const Instruction *I, | |||
631 | SmallPtrSetImpl<const Value *> &Ops); | |||
632 | /// Insert operands of I into Ops such that I will trigger undefined behavior | |||
633 | /// if I is executed and that operand is not a well-defined value | |||
634 | /// (i.e. has undef bits or poison). | |||
635 | void getGuaranteedWellDefinedOps(const Instruction *I, | |||
636 | SmallPtrSetImpl<const Value *> &Ops); | |||
637 | ||||
638 | /// Return true if the given instruction must trigger undefined behavior | |||
639 | /// when I is executed with any operands which appear in KnownPoison holding | |||
640 | /// a poison value at the point of execution. | |||
641 | bool mustTriggerUB(const Instruction *I, | |||
642 | const SmallSet<const Value *, 16>& KnownPoison); | |||
643 | ||||
644 | /// Return true if this function can prove that if Inst is executed | |||
645 | /// and yields a poison value or undef bits, then that will trigger | |||
646 | /// undefined behavior. | |||
647 | /// | |||
648 | /// Note that this currently only considers the basic block that is | |||
649 | /// the parent of Inst. | |||
650 | bool programUndefinedIfUndefOrPoison(const Instruction *Inst); | |||
651 | bool programUndefinedIfPoison(const Instruction *Inst); | |||
652 | ||||
653 | /// canCreateUndefOrPoison returns true if Op can create undef or poison from | |||
654 | /// non-undef & non-poison operands. | |||
655 | /// For vectors, canCreateUndefOrPoison returns true if there is potential | |||
656 | /// poison or undef in any element of the result when vectors without | |||
657 | /// undef/poison poison are given as operands. | |||
658 | /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns | |||
659 | /// true. If Op raises immediate UB but never creates poison or undef | |||
660 | /// (e.g. sdiv I, 0), canCreatePoison returns false. | |||
661 | /// | |||
662 | /// \p ConsiderFlags controls whether poison producing flags on the | |||
663 | /// instruction are considered. This can be used to see if the instruction | |||
664 | /// could still introduce undef or poison even without poison generating flags | |||
665 | /// which might be on the instruction. (i.e. could the result of | |||
666 | /// Op->dropPoisonGeneratingFlags() still create poison or undef) | |||
667 | /// | |||
668 | /// canCreatePoison returns true if Op can create poison from non-poison | |||
669 | /// operands. | |||
670 | bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags = true); | |||
671 | bool canCreatePoison(const Operator *Op, bool ConsiderFlags = true); | |||
672 | ||||
673 | /// Return true if V is poison given that ValAssumedPoison is already poison. | |||
674 | /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`, | |||
675 | /// impliesPoison returns true. | |||
676 | bool impliesPoison(const Value *ValAssumedPoison, const Value *V); | |||
677 | ||||
678 | /// Return true if this function can prove that V does not have undef bits | |||
679 | /// and is never poison. If V is an aggregate value or vector, check whether | |||
680 | /// all elements (except padding) are not undef or poison. | |||
681 | /// Note that this is different from canCreateUndefOrPoison because the | |||
682 | /// function assumes Op's operands are not poison/undef. | |||
683 | /// | |||
684 | /// If CtxI and DT are specified this method performs flow-sensitive analysis | |||
685 | /// and returns true if it is guaranteed to be never undef or poison | |||
686 | /// immediately before the CtxI. | |||
687 | bool isGuaranteedNotToBeUndefOrPoison(const Value *V, | |||
688 | AssumptionCache *AC = nullptr, | |||
689 | const Instruction *CtxI = nullptr, | |||
690 | const DominatorTree *DT = nullptr, | |||
691 | unsigned Depth = 0); | |||
692 | bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, | |||
693 | const Instruction *CtxI = nullptr, | |||
694 | const DominatorTree *DT = nullptr, | |||
695 | unsigned Depth = 0); | |||
696 | ||||
697 | /// Specific patterns of select instructions we can match. | |||
698 | enum SelectPatternFlavor { | |||
699 | SPF_UNKNOWN = 0, | |||
700 | SPF_SMIN, /// Signed minimum | |||
701 | SPF_UMIN, /// Unsigned minimum | |||
702 | SPF_SMAX, /// Signed maximum | |||
703 | SPF_UMAX, /// Unsigned maximum | |||
704 | SPF_FMINNUM, /// Floating point minnum | |||
705 | SPF_FMAXNUM, /// Floating point maxnum | |||
706 | SPF_ABS, /// Absolute value | |||
707 | SPF_NABS /// Negated absolute value | |||
708 | }; | |||
709 | ||||
710 | /// Behavior when a floating point min/max is given one NaN and one | |||
711 | /// non-NaN as input. | |||
712 | enum SelectPatternNaNBehavior { | |||
713 | SPNB_NA = 0, /// NaN behavior not applicable. | |||
714 | SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. | |||
715 | SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. | |||
716 | SPNB_RETURNS_ANY /// Given one NaN input, can return either (or | |||
717 | /// it has been determined that no operands can | |||
718 | /// be NaN). | |||
719 | }; | |||
720 | ||||
721 | struct SelectPatternResult { | |||
722 | SelectPatternFlavor Flavor; | |||
723 | SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is | |||
724 | /// SPF_FMINNUM or SPF_FMAXNUM. | |||
725 | bool Ordered; /// When implementing this min/max pattern as | |||
726 | /// fcmp; select, does the fcmp have to be | |||
727 | /// ordered? | |||
728 | ||||
729 | /// Return true if \p SPF is a min or a max pattern. | |||
730 | static bool isMinOrMax(SelectPatternFlavor SPF) { | |||
731 | return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; | |||
732 | } | |||
733 | }; | |||
734 | ||||
735 | /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind | |||
736 | /// and providing the out parameter results if we successfully match. | |||
737 | /// | |||
738 | /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be | |||
739 | /// the negation instruction from the idiom. | |||
740 | /// | |||
741 | /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does | |||
742 | /// not match that of the original select. If this is the case, the cast | |||
743 | /// operation (one of Trunc,SExt,Zext) that must be done to transform the | |||
744 | /// type of LHS and RHS into the type of V is returned in CastOp. | |||
745 | /// | |||
746 | /// For example: | |||
747 | /// %1 = icmp slt i32 %a, i32 4 | |||
748 | /// %2 = sext i32 %a to i64 | |||
749 | /// %3 = select i1 %1, i64 %2, i64 4 | |||
750 | /// | |||
751 | /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt | |||
752 | /// | |||
753 | SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, | |||
754 | Instruction::CastOps *CastOp = nullptr, | |||
755 | unsigned Depth = 0); | |||
756 | ||||
757 | inline SelectPatternResult | |||
758 | matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { | |||
759 | Value *L = const_cast<Value *>(LHS); | |||
760 | Value *R = const_cast<Value *>(RHS); | |||
761 | auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); | |||
762 | LHS = L; | |||
763 | RHS = R; | |||
764 | return Result; | |||
765 | } | |||
766 | ||||
767 | /// Determine the pattern that a select with the given compare as its | |||
768 | /// predicate and given values as its true/false operands would match. | |||
769 | SelectPatternResult matchDecomposedSelectPattern( | |||
770 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, | |||
771 | Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); | |||
772 | ||||
773 | /// Return the canonical comparison predicate for the specified | |||
774 | /// minimum/maximum flavor. | |||
775 | CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, | |||
776 | bool Ordered = false); | |||
777 | ||||
778 | /// Return the inverse minimum/maximum flavor of the specified flavor. | |||
779 | /// For example, signed minimum is the inverse of signed maximum. | |||
780 | SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); | |||
781 | ||||
782 | Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID); | |||
783 | ||||
784 | /// Return the canonical inverse comparison predicate for the specified | |||
785 | /// minimum/maximum flavor. | |||
786 | CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF); | |||
787 | ||||
788 | /// Return the minimum or maximum constant value for the specified integer | |||
789 | /// min/max flavor and type. | |||
790 | APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth); | |||
791 | ||||
792 | /// Check if the values in \p VL are select instructions that can be converted | |||
793 | /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a | |||
794 | /// conversion is possible, together with a bool indicating whether all select | |||
795 | /// conditions are only used by the selects. Otherwise return | |||
796 | /// Intrinsic::not_intrinsic. | |||
797 | std::pair<Intrinsic::ID, bool> | |||
798 | canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL); | |||
799 | ||||
800 | /// Attempt to match a simple first order recurrence cycle of the form: | |||
801 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | |||
802 | /// %inc = binop %iv, %step | |||
803 | /// OR | |||
804 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] | |||
805 | /// %inc = binop %step, %iv | |||
806 | /// | |||
807 | /// A first order recurrence is a formula with the form: X_n = f(X_(n-1)) | |||
808 | /// | |||
809 | /// A couple of notes on subtleties in that definition: | |||
810 | /// * The Step does not have to be loop invariant. In math terms, it can | |||
811 | /// be a free variable. We allow recurrences with both constant and | |||
812 | /// variable coefficients. Callers may wish to filter cases where Step | |||
813 | /// does not dominate P. | |||
814 | /// * For non-commutative operators, we will match both forms. This | |||
815 | /// results in some odd recurrence structures. Callers may wish to filter | |||
816 | /// out recurrences where the phi is not the LHS of the returned operator. | |||
817 | /// * Because of the structure matched, the caller can assume as a post | |||
818 | /// condition of the match the presence of a Loop with P's parent as it's | |||
819 | /// header *except* in unreachable code. (Dominance decays in unreachable | |||
820 | /// code.) | |||
821 | /// | |||
822 | /// NOTE: This is intentional simple. If you want the ability to analyze | |||
823 | /// non-trivial loop conditons, see ScalarEvolution instead. | |||
824 | bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, | |||
825 | Value *&Start, Value *&Step); | |||
826 | ||||
827 | /// Analogous to the above, but starting from the binary operator | |||
828 | bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, | |||
829 | Value *&Start, Value *&Step); | |||
830 | ||||
831 | /// Return true if RHS is known to be implied true by LHS. Return false if | |||
832 | /// RHS is known to be implied false by LHS. Otherwise, return None if no | |||
833 | /// implication can be made. | |||
834 | /// A & B must be i1 (boolean) values or a vector of such values. Note that | |||
835 | /// the truth table for implication is the same as <=u on i1 values (but not | |||
836 | /// <=s!). The truth table for both is: | |||
837 | /// | T | F (B) | |||
838 | /// T | T | F | |||
839 | /// F | T | T | |||
840 | /// (A) | |||
841 | Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, | |||
842 | const DataLayout &DL, bool LHSIsTrue = true, | |||
843 | unsigned Depth = 0); | |||
844 | Optional<bool> isImpliedCondition(const Value *LHS, | |||
845 | CmpInst::Predicate RHSPred, | |||
846 | const Value *RHSOp0, const Value *RHSOp1, | |||
847 | const DataLayout &DL, bool LHSIsTrue = true, | |||
848 | unsigned Depth = 0); | |||
849 | ||||
850 | /// Return the boolean condition value in the context of the given instruction | |||
851 | /// if it is known based on dominating conditions. | |||
852 | Optional<bool> isImpliedByDomCondition(const Value *Cond, | |||
853 | const Instruction *ContextI, | |||
854 | const DataLayout &DL); | |||
855 | Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, | |||
856 | const Value *LHS, const Value *RHS, | |||
857 | const Instruction *ContextI, | |||
858 | const DataLayout &DL); | |||
859 | ||||
860 | /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that | |||
861 | /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In | |||
862 | /// this case offset would be -8. | |||
863 | Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, | |||
864 | const DataLayout &DL); | |||
865 | } // end namespace llvm | |||
866 | ||||
867 | #endif // LLVM_ANALYSIS_VALUETRACKING_H |