File: | lib/Transforms/IPO/GlobalOpt.cpp |
Warning: | line 1661, column 23 Value stored to 'E' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This pass transforms simple global variables that never have their address |
11 | // taken. If obviously true, it marks read/write globals as constant, deletes |
12 | // variables only stored to, etc. |
13 | // |
14 | //===----------------------------------------------------------------------===// |
15 | |
16 | #include "llvm/Transforms/IPO/GlobalOpt.h" |
17 | #include "llvm/ADT/DenseMap.h" |
18 | #include "llvm/ADT/STLExtras.h" |
19 | #include "llvm/ADT/SmallPtrSet.h" |
20 | #include "llvm/ADT/SmallSet.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/ADT/Statistic.h" |
23 | #include "llvm/ADT/Twine.h" |
24 | #include "llvm/ADT/iterator_range.h" |
25 | #include "llvm/Analysis/BlockFrequencyInfo.h" |
26 | #include "llvm/Analysis/ConstantFolding.h" |
27 | #include "llvm/Analysis/MemoryBuiltins.h" |
28 | #include "llvm/Analysis/TargetLibraryInfo.h" |
29 | #include "llvm/Analysis/TargetTransformInfo.h" |
30 | #include "llvm/Analysis/Utils/Local.h" |
31 | #include "llvm/BinaryFormat/Dwarf.h" |
32 | #include "llvm/IR/Attributes.h" |
33 | #include "llvm/IR/BasicBlock.h" |
34 | #include "llvm/IR/CallSite.h" |
35 | #include "llvm/IR/CallingConv.h" |
36 | #include "llvm/IR/Constant.h" |
37 | #include "llvm/IR/Constants.h" |
38 | #include "llvm/IR/DataLayout.h" |
39 | #include "llvm/IR/DebugInfoMetadata.h" |
40 | #include "llvm/IR/DerivedTypes.h" |
41 | #include "llvm/IR/Dominators.h" |
42 | #include "llvm/IR/Function.h" |
43 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
44 | #include "llvm/IR/GlobalAlias.h" |
45 | #include "llvm/IR/GlobalValue.h" |
46 | #include "llvm/IR/GlobalVariable.h" |
47 | #include "llvm/IR/InstrTypes.h" |
48 | #include "llvm/IR/Instruction.h" |
49 | #include "llvm/IR/Instructions.h" |
50 | #include "llvm/IR/IntrinsicInst.h" |
51 | #include "llvm/IR/Module.h" |
52 | #include "llvm/IR/Operator.h" |
53 | #include "llvm/IR/Type.h" |
54 | #include "llvm/IR/Use.h" |
55 | #include "llvm/IR/User.h" |
56 | #include "llvm/IR/Value.h" |
57 | #include "llvm/IR/ValueHandle.h" |
58 | #include "llvm/Pass.h" |
59 | #include "llvm/Support/AtomicOrdering.h" |
60 | #include "llvm/Support/Casting.h" |
61 | #include "llvm/Support/CommandLine.h" |
62 | #include "llvm/Support/Debug.h" |
63 | #include "llvm/Support/ErrorHandling.h" |
64 | #include "llvm/Support/MathExtras.h" |
65 | #include "llvm/Support/raw_ostream.h" |
66 | #include "llvm/Transforms/IPO.h" |
67 | #include "llvm/Transforms/Utils/CtorUtils.h" |
68 | #include "llvm/Transforms/Utils/Evaluator.h" |
69 | #include "llvm/Transforms/Utils/GlobalStatus.h" |
70 | #include <cassert> |
71 | #include <cstdint> |
72 | #include <utility> |
73 | #include <vector> |
74 | |
75 | using namespace llvm; |
76 | |
77 | #define DEBUG_TYPE"globalopt" "globalopt" |
78 | |
79 | STATISTIC(NumMarked , "Number of globals marked constant")static llvm::Statistic NumMarked = {"globalopt", "NumMarked", "Number of globals marked constant", {0}, {false}}; |
80 | STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr")static llvm::Statistic NumUnnamed = {"globalopt", "NumUnnamed" , "Number of globals marked unnamed_addr", {0}, {false}}; |
81 | STATISTIC(NumSRA , "Number of aggregate globals broken into scalars")static llvm::Statistic NumSRA = {"globalopt", "NumSRA", "Number of aggregate globals broken into scalars" , {0}, {false}}; |
82 | STATISTIC(NumHeapSRA , "Number of heap objects SRA'd")static llvm::Statistic NumHeapSRA = {"globalopt", "NumHeapSRA" , "Number of heap objects SRA'd", {0}, {false}}; |
83 | STATISTIC(NumSubstitute,"Number of globals with initializers stored into them")static llvm::Statistic NumSubstitute = {"globalopt", "NumSubstitute" , "Number of globals with initializers stored into them", {0} , {false}}; |
84 | STATISTIC(NumDeleted , "Number of globals deleted")static llvm::Statistic NumDeleted = {"globalopt", "NumDeleted" , "Number of globals deleted", {0}, {false}}; |
85 | STATISTIC(NumGlobUses , "Number of global uses devirtualized")static llvm::Statistic NumGlobUses = {"globalopt", "NumGlobUses" , "Number of global uses devirtualized", {0}, {false}}; |
86 | STATISTIC(NumLocalized , "Number of globals localized")static llvm::Statistic NumLocalized = {"globalopt", "NumLocalized" , "Number of globals localized", {0}, {false}}; |
87 | STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans")static llvm::Statistic NumShrunkToBool = {"globalopt", "NumShrunkToBool" , "Number of global vars shrunk to booleans", {0}, {false}}; |
88 | STATISTIC(NumFastCallFns , "Number of functions converted to fastcc")static llvm::Statistic NumFastCallFns = {"globalopt", "NumFastCallFns" , "Number of functions converted to fastcc", {0}, {false}}; |
89 | STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated")static llvm::Statistic NumCtorsEvaluated = {"globalopt", "NumCtorsEvaluated" , "Number of static ctors evaluated", {0}, {false}}; |
90 | STATISTIC(NumNestRemoved , "Number of nest attributes removed")static llvm::Statistic NumNestRemoved = {"globalopt", "NumNestRemoved" , "Number of nest attributes removed", {0}, {false}}; |
91 | STATISTIC(NumAliasesResolved, "Number of global aliases resolved")static llvm::Statistic NumAliasesResolved = {"globalopt", "NumAliasesResolved" , "Number of global aliases resolved", {0}, {false}}; |
92 | STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated")static llvm::Statistic NumAliasesRemoved = {"globalopt", "NumAliasesRemoved" , "Number of global aliases eliminated", {0}, {false}}; |
93 | STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed")static llvm::Statistic NumCXXDtorsRemoved = {"globalopt", "NumCXXDtorsRemoved" , "Number of global C++ destructors removed", {0}, {false}}; |
94 | STATISTIC(NumInternalFunc, "Number of internal functions")static llvm::Statistic NumInternalFunc = {"globalopt", "NumInternalFunc" , "Number of internal functions", {0}, {false}}; |
95 | STATISTIC(NumColdCC, "Number of functions marked coldcc")static llvm::Statistic NumColdCC = {"globalopt", "NumColdCC", "Number of functions marked coldcc", {0}, {false}}; |
96 | |
97 | static cl::opt<bool> |
98 | EnableColdCCStressTest("enable-coldcc-stress-test", |
99 | cl::desc("Enable stress test of coldcc by adding " |
100 | "calling conv to all internal functions."), |
101 | cl::init(false), cl::Hidden); |
102 | |
103 | static cl::opt<int> ColdCCRelFreq( |
104 | "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, |
105 | cl::desc( |
106 | "Maximum block frequency, expressed as a percentage of caller's " |
107 | "entry frequency, for a call site to be considered cold for enabling" |
108 | "coldcc")); |
109 | |
110 | /// Is this global variable possibly used by a leak checker as a root? If so, |
111 | /// we might not really want to eliminate the stores to it. |
112 | static bool isLeakCheckerRoot(GlobalVariable *GV) { |
113 | // A global variable is a root if it is a pointer, or could plausibly contain |
114 | // a pointer. There are two challenges; one is that we could have a struct |
115 | // the has an inner member which is a pointer. We recurse through the type to |
116 | // detect these (up to a point). The other is that we may actually be a union |
117 | // of a pointer and another type, and so our LLVM type is an integer which |
118 | // gets converted into a pointer, or our type is an [i8 x #] with a pointer |
119 | // potentially contained here. |
120 | |
121 | if (GV->hasPrivateLinkage()) |
122 | return false; |
123 | |
124 | SmallVector<Type *, 4> Types; |
125 | Types.push_back(GV->getValueType()); |
126 | |
127 | unsigned Limit = 20; |
128 | do { |
129 | Type *Ty = Types.pop_back_val(); |
130 | switch (Ty->getTypeID()) { |
131 | default: break; |
132 | case Type::PointerTyID: return true; |
133 | case Type::ArrayTyID: |
134 | case Type::VectorTyID: { |
135 | SequentialType *STy = cast<SequentialType>(Ty); |
136 | Types.push_back(STy->getElementType()); |
137 | break; |
138 | } |
139 | case Type::StructTyID: { |
140 | StructType *STy = cast<StructType>(Ty); |
141 | if (STy->isOpaque()) return true; |
142 | for (StructType::element_iterator I = STy->element_begin(), |
143 | E = STy->element_end(); I != E; ++I) { |
144 | Type *InnerTy = *I; |
145 | if (isa<PointerType>(InnerTy)) return true; |
146 | if (isa<CompositeType>(InnerTy)) |
147 | Types.push_back(InnerTy); |
148 | } |
149 | break; |
150 | } |
151 | } |
152 | if (--Limit == 0) return true; |
153 | } while (!Types.empty()); |
154 | return false; |
155 | } |
156 | |
157 | /// Given a value that is stored to a global but never read, determine whether |
158 | /// it's safe to remove the store and the chain of computation that feeds the |
159 | /// store. |
160 | static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { |
161 | do { |
162 | if (isa<Constant>(V)) |
163 | return true; |
164 | if (!V->hasOneUse()) |
165 | return false; |
166 | if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || |
167 | isa<GlobalValue>(V)) |
168 | return false; |
169 | if (isAllocationFn(V, TLI)) |
170 | return true; |
171 | |
172 | Instruction *I = cast<Instruction>(V); |
173 | if (I->mayHaveSideEffects()) |
174 | return false; |
175 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { |
176 | if (!GEP->hasAllConstantIndices()) |
177 | return false; |
178 | } else if (I->getNumOperands() != 1) { |
179 | return false; |
180 | } |
181 | |
182 | V = I->getOperand(0); |
183 | } while (true); |
184 | } |
185 | |
186 | /// This GV is a pointer root. Loop over all users of the global and clean up |
187 | /// any that obviously don't assign the global a value that isn't dynamically |
188 | /// allocated. |
189 | static bool CleanupPointerRootUsers(GlobalVariable *GV, |
190 | const TargetLibraryInfo *TLI) { |
191 | // A brief explanation of leak checkers. The goal is to find bugs where |
192 | // pointers are forgotten, causing an accumulating growth in memory |
193 | // usage over time. The common strategy for leak checkers is to whitelist the |
194 | // memory pointed to by globals at exit. This is popular because it also |
195 | // solves another problem where the main thread of a C++ program may shut down |
196 | // before other threads that are still expecting to use those globals. To |
197 | // handle that case, we expect the program may create a singleton and never |
198 | // destroy it. |
199 | |
200 | bool Changed = false; |
201 | |
202 | // If Dead[n].first is the only use of a malloc result, we can delete its |
203 | // chain of computation and the store to the global in Dead[n].second. |
204 | SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; |
205 | |
206 | // Constants can't be pointers to dynamically allocated memory. |
207 | for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); |
208 | UI != E;) { |
209 | User *U = *UI++; |
210 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) { |
211 | Value *V = SI->getValueOperand(); |
212 | if (isa<Constant>(V)) { |
213 | Changed = true; |
214 | SI->eraseFromParent(); |
215 | } else if (Instruction *I = dyn_cast<Instruction>(V)) { |
216 | if (I->hasOneUse()) |
217 | Dead.push_back(std::make_pair(I, SI)); |
218 | } |
219 | } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { |
220 | if (isa<Constant>(MSI->getValue())) { |
221 | Changed = true; |
222 | MSI->eraseFromParent(); |
223 | } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { |
224 | if (I->hasOneUse()) |
225 | Dead.push_back(std::make_pair(I, MSI)); |
226 | } |
227 | } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { |
228 | GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); |
229 | if (MemSrc && MemSrc->isConstant()) { |
230 | Changed = true; |
231 | MTI->eraseFromParent(); |
232 | } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { |
233 | if (I->hasOneUse()) |
234 | Dead.push_back(std::make_pair(I, MTI)); |
235 | } |
236 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { |
237 | if (CE->use_empty()) { |
238 | CE->destroyConstant(); |
239 | Changed = true; |
240 | } |
241 | } else if (Constant *C = dyn_cast<Constant>(U)) { |
242 | if (isSafeToDestroyConstant(C)) { |
243 | C->destroyConstant(); |
244 | // This could have invalidated UI, start over from scratch. |
245 | Dead.clear(); |
246 | CleanupPointerRootUsers(GV, TLI); |
247 | return true; |
248 | } |
249 | } |
250 | } |
251 | |
252 | for (int i = 0, e = Dead.size(); i != e; ++i) { |
253 | if (IsSafeComputationToRemove(Dead[i].first, TLI)) { |
254 | Dead[i].second->eraseFromParent(); |
255 | Instruction *I = Dead[i].first; |
256 | do { |
257 | if (isAllocationFn(I, TLI)) |
258 | break; |
259 | Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); |
260 | if (!J) |
261 | break; |
262 | I->eraseFromParent(); |
263 | I = J; |
264 | } while (true); |
265 | I->eraseFromParent(); |
266 | } |
267 | } |
268 | |
269 | return Changed; |
270 | } |
271 | |
272 | /// We just marked GV constant. Loop over all users of the global, cleaning up |
273 | /// the obvious ones. This is largely just a quick scan over the use list to |
274 | /// clean up the easy and obvious cruft. This returns true if it made a change. |
275 | static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, |
276 | const DataLayout &DL, |
277 | TargetLibraryInfo *TLI) { |
278 | bool Changed = false; |
279 | // Note that we need to use a weak value handle for the worklist items. When |
280 | // we delete a constant array, we may also be holding pointer to one of its |
281 | // elements (or an element of one of its elements if we're dealing with an |
282 | // array of arrays) in the worklist. |
283 | SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end()); |
284 | while (!WorkList.empty()) { |
285 | Value *UV = WorkList.pop_back_val(); |
286 | if (!UV) |
287 | continue; |
288 | |
289 | User *U = cast<User>(UV); |
290 | |
291 | if (LoadInst *LI = dyn_cast<LoadInst>(U)) { |
292 | if (Init) { |
293 | // Replace the load with the initializer. |
294 | LI->replaceAllUsesWith(Init); |
295 | LI->eraseFromParent(); |
296 | Changed = true; |
297 | } |
298 | } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { |
299 | // Store must be unreachable or storing Init into the global. |
300 | SI->eraseFromParent(); |
301 | Changed = true; |
302 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { |
303 | if (CE->getOpcode() == Instruction::GetElementPtr) { |
304 | Constant *SubInit = nullptr; |
305 | if (Init) |
306 | SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); |
307 | Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); |
308 | } else if ((CE->getOpcode() == Instruction::BitCast && |
309 | CE->getType()->isPointerTy()) || |
310 | CE->getOpcode() == Instruction::AddrSpaceCast) { |
311 | // Pointer cast, delete any stores and memsets to the global. |
312 | Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI); |
313 | } |
314 | |
315 | if (CE->use_empty()) { |
316 | CE->destroyConstant(); |
317 | Changed = true; |
318 | } |
319 | } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { |
320 | // Do not transform "gepinst (gep constexpr (GV))" here, because forming |
321 | // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold |
322 | // and will invalidate our notion of what Init is. |
323 | Constant *SubInit = nullptr; |
324 | if (!isa<ConstantExpr>(GEP->getOperand(0))) { |
325 | ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>( |
326 | ConstantFoldInstruction(GEP, DL, TLI)); |
327 | if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) |
328 | SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); |
329 | |
330 | // If the initializer is an all-null value and we have an inbounds GEP, |
331 | // we already know what the result of any load from that GEP is. |
332 | // TODO: Handle splats. |
333 | if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) |
334 | SubInit = Constant::getNullValue(GEP->getResultElementType()); |
335 | } |
336 | Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); |
337 | |
338 | if (GEP->use_empty()) { |
339 | GEP->eraseFromParent(); |
340 | Changed = true; |
341 | } |
342 | } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv |
343 | if (MI->getRawDest() == V) { |
344 | MI->eraseFromParent(); |
345 | Changed = true; |
346 | } |
347 | |
348 | } else if (Constant *C = dyn_cast<Constant>(U)) { |
349 | // If we have a chain of dead constantexprs or other things dangling from |
350 | // us, and if they are all dead, nuke them without remorse. |
351 | if (isSafeToDestroyConstant(C)) { |
352 | C->destroyConstant(); |
353 | CleanupConstantGlobalUsers(V, Init, DL, TLI); |
354 | return true; |
355 | } |
356 | } |
357 | } |
358 | return Changed; |
359 | } |
360 | |
361 | /// Return true if the specified instruction is a safe user of a derived |
362 | /// expression from a global that we want to SROA. |
363 | static bool isSafeSROAElementUse(Value *V) { |
364 | // We might have a dead and dangling constant hanging off of here. |
365 | if (Constant *C = dyn_cast<Constant>(V)) |
366 | return isSafeToDestroyConstant(C); |
367 | |
368 | Instruction *I = dyn_cast<Instruction>(V); |
369 | if (!I) return false; |
370 | |
371 | // Loads are ok. |
372 | if (isa<LoadInst>(I)) return true; |
373 | |
374 | // Stores *to* the pointer are ok. |
375 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) |
376 | return SI->getOperand(0) != V; |
377 | |
378 | // Otherwise, it must be a GEP. |
379 | GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); |
380 | if (!GEPI) return false; |
381 | |
382 | if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || |
383 | !cast<Constant>(GEPI->getOperand(1))->isNullValue()) |
384 | return false; |
385 | |
386 | for (User *U : GEPI->users()) |
387 | if (!isSafeSROAElementUse(U)) |
388 | return false; |
389 | return true; |
390 | } |
391 | |
392 | /// U is a direct user of the specified global value. Look at it and its uses |
393 | /// and decide whether it is safe to SROA this global. |
394 | static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { |
395 | // The user of the global must be a GEP Inst or a ConstantExpr GEP. |
396 | if (!isa<GetElementPtrInst>(U) && |
397 | (!isa<ConstantExpr>(U) || |
398 | cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) |
399 | return false; |
400 | |
401 | // Check to see if this ConstantExpr GEP is SRA'able. In particular, we |
402 | // don't like < 3 operand CE's, and we don't like non-constant integer |
403 | // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some |
404 | // value of C. |
405 | if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || |
406 | !cast<Constant>(U->getOperand(1))->isNullValue() || |
407 | !isa<ConstantInt>(U->getOperand(2))) |
408 | return false; |
409 | |
410 | gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); |
411 | ++GEPI; // Skip over the pointer index. |
412 | |
413 | // If this is a use of an array allocation, do a bit more checking for sanity. |
414 | if (GEPI.isSequential()) { |
415 | ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); |
416 | |
417 | // Check to make sure that index falls within the array. If not, |
418 | // something funny is going on, so we won't do the optimization. |
419 | // |
420 | if (GEPI.isBoundedSequential() && |
421 | Idx->getZExtValue() >= GEPI.getSequentialNumElements()) |
422 | return false; |
423 | |
424 | // We cannot scalar repl this level of the array unless any array |
425 | // sub-indices are in-range constants. In particular, consider: |
426 | // A[0][i]. We cannot know that the user isn't doing invalid things like |
427 | // allowing i to index an out-of-range subscript that accesses A[1]. |
428 | // |
429 | // Scalar replacing *just* the outer index of the array is probably not |
430 | // going to be a win anyway, so just give up. |
431 | for (++GEPI; // Skip array index. |
432 | GEPI != E; |
433 | ++GEPI) { |
434 | if (GEPI.isStruct()) |
435 | continue; |
436 | |
437 | ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); |
438 | if (!IdxVal || |
439 | (GEPI.isBoundedSequential() && |
440 | IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) |
441 | return false; |
442 | } |
443 | } |
444 | |
445 | return llvm::all_of(U->users(), |
446 | [](User *UU) { return isSafeSROAElementUse(UU); }); |
447 | } |
448 | |
449 | /// Look at all uses of the global and decide whether it is safe for us to |
450 | /// perform this transformation. |
451 | static bool GlobalUsersSafeToSRA(GlobalValue *GV) { |
452 | for (User *U : GV->users()) |
453 | if (!IsUserOfGlobalSafeForSRA(U, GV)) |
454 | return false; |
455 | |
456 | return true; |
457 | } |
458 | |
459 | /// Copy over the debug info for a variable to its SRA replacements. |
460 | static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV, |
461 | uint64_t FragmentOffsetInBits, |
462 | uint64_t FragmentSizeInBits, |
463 | unsigned NumElements) { |
464 | SmallVector<DIGlobalVariableExpression *, 1> GVs; |
465 | GV->getDebugInfo(GVs); |
466 | for (auto *GVE : GVs) { |
467 | DIVariable *Var = GVE->getVariable(); |
468 | DIExpression *Expr = GVE->getExpression(); |
469 | if (NumElements > 1) { |
470 | if (auto E = DIExpression::createFragmentExpression( |
471 | Expr, FragmentOffsetInBits, FragmentSizeInBits)) |
472 | Expr = *E; |
473 | else |
474 | return; |
475 | } |
476 | auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr); |
477 | NGV->addDebugInfo(NGVE); |
478 | } |
479 | } |
480 | |
481 | /// Perform scalar replacement of aggregates on the specified global variable. |
482 | /// This opens the door for other optimizations by exposing the behavior of the |
483 | /// program in a more fine-grained way. We have determined that this |
484 | /// transformation is safe already. We return the first global variable we |
485 | /// insert so that the caller can reprocess it. |
486 | static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { |
487 | // Make sure this global only has simple uses that we can SRA. |
488 | if (!GlobalUsersSafeToSRA(GV)) |
489 | return nullptr; |
490 | |
491 | assert(GV->hasLocalLinkage())(static_cast <bool> (GV->hasLocalLinkage()) ? void ( 0) : __assert_fail ("GV->hasLocalLinkage()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 491, __extension__ __PRETTY_FUNCTION__)); |
492 | Constant *Init = GV->getInitializer(); |
493 | Type *Ty = Init->getType(); |
494 | |
495 | std::vector<GlobalVariable *> NewGlobals; |
496 | Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); |
497 | |
498 | // Get the alignment of the global, either explicit or target-specific. |
499 | unsigned StartAlignment = GV->getAlignment(); |
500 | if (StartAlignment == 0) |
501 | StartAlignment = DL.getABITypeAlignment(GV->getType()); |
502 | |
503 | if (StructType *STy = dyn_cast<StructType>(Ty)) { |
504 | unsigned NumElements = STy->getNumElements(); |
505 | NewGlobals.reserve(NumElements); |
506 | const StructLayout &Layout = *DL.getStructLayout(STy); |
507 | for (unsigned i = 0, e = NumElements; i != e; ++i) { |
508 | Constant *In = Init->getAggregateElement(i); |
509 | assert(In && "Couldn't get element of initializer?")(static_cast <bool> (In && "Couldn't get element of initializer?" ) ? void (0) : __assert_fail ("In && \"Couldn't get element of initializer?\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 509, __extension__ __PRETTY_FUNCTION__)); |
510 | GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, |
511 | GlobalVariable::InternalLinkage, |
512 | In, GV->getName()+"."+Twine(i), |
513 | GV->getThreadLocalMode(), |
514 | GV->getType()->getAddressSpace()); |
515 | NGV->setExternallyInitialized(GV->isExternallyInitialized()); |
516 | NGV->copyAttributesFrom(GV); |
517 | Globals.push_back(NGV); |
518 | NewGlobals.push_back(NGV); |
519 | |
520 | // Calculate the known alignment of the field. If the original aggregate |
521 | // had 256 byte alignment for example, something might depend on that: |
522 | // propagate info to each field. |
523 | uint64_t FieldOffset = Layout.getElementOffset(i); |
524 | unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); |
525 | if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) |
526 | NGV->setAlignment(NewAlign); |
527 | |
528 | // Copy over the debug info for the variable. |
529 | uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType()); |
530 | uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(i); |
531 | transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, NumElements); |
532 | } |
533 | } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { |
534 | unsigned NumElements = STy->getNumElements(); |
535 | if (NumElements > 16 && GV->hasNUsesOrMore(16)) |
536 | return nullptr; // It's not worth it. |
537 | NewGlobals.reserve(NumElements); |
538 | auto ElTy = STy->getElementType(); |
539 | uint64_t EltSize = DL.getTypeAllocSize(ElTy); |
540 | unsigned EltAlign = DL.getABITypeAlignment(ElTy); |
541 | uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy); |
542 | for (unsigned i = 0, e = NumElements; i != e; ++i) { |
543 | Constant *In = Init->getAggregateElement(i); |
544 | assert(In && "Couldn't get element of initializer?")(static_cast <bool> (In && "Couldn't get element of initializer?" ) ? void (0) : __assert_fail ("In && \"Couldn't get element of initializer?\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 544, __extension__ __PRETTY_FUNCTION__)); |
545 | |
546 | GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, |
547 | GlobalVariable::InternalLinkage, |
548 | In, GV->getName()+"."+Twine(i), |
549 | GV->getThreadLocalMode(), |
550 | GV->getType()->getAddressSpace()); |
551 | NGV->setExternallyInitialized(GV->isExternallyInitialized()); |
552 | NGV->copyAttributesFrom(GV); |
553 | Globals.push_back(NGV); |
554 | NewGlobals.push_back(NGV); |
555 | |
556 | // Calculate the known alignment of the field. If the original aggregate |
557 | // had 256 byte alignment for example, something might depend on that: |
558 | // propagate info to each field. |
559 | unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); |
560 | if (NewAlign > EltAlign) |
561 | NGV->setAlignment(NewAlign); |
562 | transferSRADebugInfo(GV, NGV, FragmentSizeInBits * i, FragmentSizeInBits, |
563 | NumElements); |
564 | } |
565 | } |
566 | |
567 | if (NewGlobals.empty()) |
568 | return nullptr; |
569 | |
570 | DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"; } } while (false); |
571 | |
572 | Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); |
573 | |
574 | // Loop over all of the uses of the global, replacing the constantexpr geps, |
575 | // with smaller constantexpr geps or direct references. |
576 | while (!GV->use_empty()) { |
577 | User *GEP = GV->user_back(); |
578 | assert(((isa<ConstantExpr>(GEP) &&(static_cast <bool> (((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction:: GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!") ? void (0) : __assert_fail ( "((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 580, __extension__ __PRETTY_FUNCTION__)) |
579 | cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||(static_cast <bool> (((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction:: GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!") ? void (0) : __assert_fail ( "((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 580, __extension__ __PRETTY_FUNCTION__)) |
580 | isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!")(static_cast <bool> (((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction:: GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!") ? void (0) : __assert_fail ( "((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 580, __extension__ __PRETTY_FUNCTION__)); |
581 | |
582 | // Ignore the 1th operand, which has to be zero or else the program is quite |
583 | // broken (undefined). Get the 2nd operand, which is the structure or array |
584 | // index. |
585 | unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); |
586 | if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. |
587 | |
588 | Value *NewPtr = NewGlobals[Val]; |
589 | Type *NewTy = NewGlobals[Val]->getValueType(); |
590 | |
591 | // Form a shorter GEP if needed. |
592 | if (GEP->getNumOperands() > 3) { |
593 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { |
594 | SmallVector<Constant*, 8> Idxs; |
595 | Idxs.push_back(NullInt); |
596 | for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) |
597 | Idxs.push_back(CE->getOperand(i)); |
598 | NewPtr = |
599 | ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs); |
600 | } else { |
601 | GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); |
602 | SmallVector<Value*, 8> Idxs; |
603 | Idxs.push_back(NullInt); |
604 | for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) |
605 | Idxs.push_back(GEPI->getOperand(i)); |
606 | NewPtr = GetElementPtrInst::Create( |
607 | NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI); |
608 | } |
609 | } |
610 | GEP->replaceAllUsesWith(NewPtr); |
611 | |
612 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) |
613 | GEPI->eraseFromParent(); |
614 | else |
615 | cast<ConstantExpr>(GEP)->destroyConstant(); |
616 | } |
617 | |
618 | // Delete the old global, now that it is dead. |
619 | Globals.erase(GV); |
620 | ++NumSRA; |
621 | |
622 | // Loop over the new globals array deleting any globals that are obviously |
623 | // dead. This can arise due to scalarization of a structure or an array that |
624 | // has elements that are dead. |
625 | unsigned FirstGlobal = 0; |
626 | for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) |
627 | if (NewGlobals[i]->use_empty()) { |
628 | Globals.erase(NewGlobals[i]); |
629 | if (FirstGlobal == i) ++FirstGlobal; |
630 | } |
631 | |
632 | return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr; |
633 | } |
634 | |
635 | /// Return true if all users of the specified value will trap if the value is |
636 | /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid |
637 | /// reprocessing them. |
638 | static bool AllUsesOfValueWillTrapIfNull(const Value *V, |
639 | SmallPtrSetImpl<const PHINode*> &PHIs) { |
640 | for (const User *U : V->users()) |
641 | if (isa<LoadInst>(U)) { |
642 | // Will trap. |
643 | } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { |
644 | if (SI->getOperand(0) == V) { |
645 | //cerr << "NONTRAPPING USE: " << *U; |
646 | return false; // Storing the value. |
647 | } |
648 | } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { |
649 | if (CI->getCalledValue() != V) { |
650 | //cerr << "NONTRAPPING USE: " << *U; |
651 | return false; // Not calling the ptr |
652 | } |
653 | } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { |
654 | if (II->getCalledValue() != V) { |
655 | //cerr << "NONTRAPPING USE: " << *U; |
656 | return false; // Not calling the ptr |
657 | } |
658 | } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { |
659 | if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; |
660 | } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { |
661 | if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; |
662 | } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { |
663 | // If we've already seen this phi node, ignore it, it has already been |
664 | // checked. |
665 | if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) |
666 | return false; |
667 | } else if (isa<ICmpInst>(U) && |
668 | isa<ConstantPointerNull>(U->getOperand(1))) { |
669 | // Ignore icmp X, null |
670 | } else { |
671 | //cerr << "NONTRAPPING USE: " << *U; |
672 | return false; |
673 | } |
674 | |
675 | return true; |
676 | } |
677 | |
678 | /// Return true if all uses of any loads from GV will trap if the loaded value |
679 | /// is null. Note that this also permits comparisons of the loaded value |
680 | /// against null, as a special case. |
681 | static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { |
682 | for (const User *U : GV->users()) |
683 | if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { |
684 | SmallPtrSet<const PHINode*, 8> PHIs; |
685 | if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) |
686 | return false; |
687 | } else if (isa<StoreInst>(U)) { |
688 | // Ignore stores to the global. |
689 | } else { |
690 | // We don't know or understand this user, bail out. |
691 | //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; |
692 | return false; |
693 | } |
694 | return true; |
695 | } |
696 | |
697 | static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { |
698 | bool Changed = false; |
699 | for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { |
700 | Instruction *I = cast<Instruction>(*UI++); |
701 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { |
702 | LI->setOperand(0, NewV); |
703 | Changed = true; |
704 | } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { |
705 | if (SI->getOperand(1) == V) { |
706 | SI->setOperand(1, NewV); |
707 | Changed = true; |
708 | } |
709 | } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { |
710 | CallSite CS(I); |
711 | if (CS.getCalledValue() == V) { |
712 | // Calling through the pointer! Turn into a direct call, but be careful |
713 | // that the pointer is not also being passed as an argument. |
714 | CS.setCalledFunction(NewV); |
715 | Changed = true; |
716 | bool PassedAsArg = false; |
717 | for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) |
718 | if (CS.getArgument(i) == V) { |
719 | PassedAsArg = true; |
720 | CS.setArgument(i, NewV); |
721 | } |
722 | |
723 | if (PassedAsArg) { |
724 | // Being passed as an argument also. Be careful to not invalidate UI! |
725 | UI = V->user_begin(); |
726 | } |
727 | } |
728 | } else if (CastInst *CI = dyn_cast<CastInst>(I)) { |
729 | Changed |= OptimizeAwayTrappingUsesOfValue(CI, |
730 | ConstantExpr::getCast(CI->getOpcode(), |
731 | NewV, CI->getType())); |
732 | if (CI->use_empty()) { |
733 | Changed = true; |
734 | CI->eraseFromParent(); |
735 | } |
736 | } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { |
737 | // Should handle GEP here. |
738 | SmallVector<Constant*, 8> Idxs; |
739 | Idxs.reserve(GEPI->getNumOperands()-1); |
740 | for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); |
741 | i != e; ++i) |
742 | if (Constant *C = dyn_cast<Constant>(*i)) |
743 | Idxs.push_back(C); |
744 | else |
745 | break; |
746 | if (Idxs.size() == GEPI->getNumOperands()-1) |
747 | Changed |= OptimizeAwayTrappingUsesOfValue( |
748 | GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs)); |
749 | if (GEPI->use_empty()) { |
750 | Changed = true; |
751 | GEPI->eraseFromParent(); |
752 | } |
753 | } |
754 | } |
755 | |
756 | return Changed; |
757 | } |
758 | |
759 | /// The specified global has only one non-null value stored into it. If there |
760 | /// are uses of the loaded value that would trap if the loaded value is |
761 | /// dynamically null, then we know that they cannot be reachable with a null |
762 | /// optimize away the load. |
763 | static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, |
764 | const DataLayout &DL, |
765 | TargetLibraryInfo *TLI) { |
766 | bool Changed = false; |
767 | |
768 | // Keep track of whether we are able to remove all the uses of the global |
769 | // other than the store that defines it. |
770 | bool AllNonStoreUsesGone = true; |
771 | |
772 | // Replace all uses of loads with uses of uses of the stored value. |
773 | for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ |
774 | User *GlobalUser = *GUI++; |
775 | if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { |
776 | Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); |
777 | // If we were able to delete all uses of the loads |
778 | if (LI->use_empty()) { |
779 | LI->eraseFromParent(); |
780 | Changed = true; |
781 | } else { |
782 | AllNonStoreUsesGone = false; |
783 | } |
784 | } else if (isa<StoreInst>(GlobalUser)) { |
785 | // Ignore the store that stores "LV" to the global. |
786 | assert(GlobalUser->getOperand(1) == GV &&(static_cast <bool> (GlobalUser->getOperand(1) == GV && "Must be storing *to* the global") ? void (0) : __assert_fail ("GlobalUser->getOperand(1) == GV && \"Must be storing *to* the global\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 787, __extension__ __PRETTY_FUNCTION__)) |
787 | "Must be storing *to* the global")(static_cast <bool> (GlobalUser->getOperand(1) == GV && "Must be storing *to* the global") ? void (0) : __assert_fail ("GlobalUser->getOperand(1) == GV && \"Must be storing *to* the global\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 787, __extension__ __PRETTY_FUNCTION__)); |
788 | } else { |
789 | AllNonStoreUsesGone = false; |
790 | |
791 | // If we get here we could have other crazy uses that are transitively |
792 | // loaded. |
793 | assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||(static_cast <bool> ((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr> (GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst >(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!") ? void (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 797, __extension__ __PRETTY_FUNCTION__)) |
794 | isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||(static_cast <bool> ((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr> (GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst >(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!") ? void (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 797, __extension__ __PRETTY_FUNCTION__)) |
795 | isa<BitCastInst>(GlobalUser) ||(static_cast <bool> ((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr> (GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst >(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!") ? void (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 797, __extension__ __PRETTY_FUNCTION__)) |
796 | isa<GetElementPtrInst>(GlobalUser)) &&(static_cast <bool> ((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr> (GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst >(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!") ? void (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 797, __extension__ __PRETTY_FUNCTION__)) |
797 | "Only expect load and stores!")(static_cast <bool> ((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr> (GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst >(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!") ? void (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 797, __extension__ __PRETTY_FUNCTION__)); |
798 | } |
799 | } |
800 | |
801 | if (Changed) { |
802 | DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n"; } } while (false); |
803 | ++NumGlobUses; |
804 | } |
805 | |
806 | // If we nuked all of the loads, then none of the stores are needed either, |
807 | // nor is the global. |
808 | if (AllNonStoreUsesGone) { |
809 | if (isLeakCheckerRoot(GV)) { |
810 | Changed |= CleanupPointerRootUsers(GV, TLI); |
811 | } else { |
812 | Changed = true; |
813 | CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); |
814 | } |
815 | if (GV->use_empty()) { |
816 | DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** GLOBAL NOW DEAD!\n"; } } while (false); |
817 | Changed = true; |
818 | GV->eraseFromParent(); |
819 | ++NumDeleted; |
820 | } |
821 | } |
822 | return Changed; |
823 | } |
824 | |
825 | /// Walk the use list of V, constant folding all of the instructions that are |
826 | /// foldable. |
827 | static void ConstantPropUsersOf(Value *V, const DataLayout &DL, |
828 | TargetLibraryInfo *TLI) { |
829 | for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) |
830 | if (Instruction *I = dyn_cast<Instruction>(*UI++)) |
831 | if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { |
832 | I->replaceAllUsesWith(NewC); |
833 | |
834 | // Advance UI to the next non-I use to avoid invalidating it! |
835 | // Instructions could multiply use V. |
836 | while (UI != E && *UI == I) |
837 | ++UI; |
838 | if (isInstructionTriviallyDead(I, TLI)) |
839 | I->eraseFromParent(); |
840 | } |
841 | } |
842 | |
843 | /// This function takes the specified global variable, and transforms the |
844 | /// program as if it always contained the result of the specified malloc. |
845 | /// Because it is always the result of the specified malloc, there is no reason |
846 | /// to actually DO the malloc. Instead, turn the malloc into a global, and any |
847 | /// loads of GV as uses of the new global. |
848 | static GlobalVariable * |
849 | OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, |
850 | ConstantInt *NElements, const DataLayout &DL, |
851 | TargetLibraryInfo *TLI) { |
852 | DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'; } } while (false); |
853 | |
854 | Type *GlobalType; |
855 | if (NElements->getZExtValue() == 1) |
856 | GlobalType = AllocTy; |
857 | else |
858 | // If we have an array allocation, the global variable is of an array. |
859 | GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); |
860 | |
861 | // Create the new global variable. The contents of the malloc'd memory is |
862 | // undefined, so initialize with an undef value. |
863 | GlobalVariable *NewGV = new GlobalVariable( |
864 | *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, |
865 | UndefValue::get(GlobalType), GV->getName() + ".body", nullptr, |
866 | GV->getThreadLocalMode()); |
867 | |
868 | // If there are bitcast users of the malloc (which is typical, usually we have |
869 | // a malloc + bitcast) then replace them with uses of the new global. Update |
870 | // other users to use the global as well. |
871 | BitCastInst *TheBC = nullptr; |
872 | while (!CI->use_empty()) { |
873 | Instruction *User = cast<Instruction>(CI->user_back()); |
874 | if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { |
875 | if (BCI->getType() == NewGV->getType()) { |
876 | BCI->replaceAllUsesWith(NewGV); |
877 | BCI->eraseFromParent(); |
878 | } else { |
879 | BCI->setOperand(0, NewGV); |
880 | } |
881 | } else { |
882 | if (!TheBC) |
883 | TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); |
884 | User->replaceUsesOfWith(CI, TheBC); |
885 | } |
886 | } |
887 | |
888 | Constant *RepValue = NewGV; |
889 | if (NewGV->getType() != GV->getValueType()) |
890 | RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType()); |
891 | |
892 | // If there is a comparison against null, we will insert a global bool to |
893 | // keep track of whether the global was initialized yet or not. |
894 | GlobalVariable *InitBool = |
895 | new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, |
896 | GlobalValue::InternalLinkage, |
897 | ConstantInt::getFalse(GV->getContext()), |
898 | GV->getName()+".init", GV->getThreadLocalMode()); |
899 | bool InitBoolUsed = false; |
900 | |
901 | // Loop over all uses of GV, processing them in turn. |
902 | while (!GV->use_empty()) { |
903 | if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { |
904 | // The global is initialized when the store to it occurs. |
905 | new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, |
906 | SI->getOrdering(), SI->getSyncScopeID(), SI); |
907 | SI->eraseFromParent(); |
908 | continue; |
909 | } |
910 | |
911 | LoadInst *LI = cast<LoadInst>(GV->user_back()); |
912 | while (!LI->use_empty()) { |
913 | Use &LoadUse = *LI->use_begin(); |
914 | ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); |
915 | if (!ICI) { |
916 | LoadUse = RepValue; |
917 | continue; |
918 | } |
919 | |
920 | // Replace the cmp X, 0 with a use of the bool value. |
921 | // Sink the load to where the compare was, if atomic rules allow us to. |
922 | Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, |
923 | LI->getOrdering(), LI->getSyncScopeID(), |
924 | LI->isUnordered() ? (Instruction*)ICI : LI); |
925 | InitBoolUsed = true; |
926 | switch (ICI->getPredicate()) { |
927 | default: llvm_unreachable("Unknown ICmp Predicate!")::llvm::llvm_unreachable_internal("Unknown ICmp Predicate!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 927); |
928 | case ICmpInst::ICMP_ULT: |
929 | case ICmpInst::ICMP_SLT: // X < null -> always false |
930 | LV = ConstantInt::getFalse(GV->getContext()); |
931 | break; |
932 | case ICmpInst::ICMP_ULE: |
933 | case ICmpInst::ICMP_SLE: |
934 | case ICmpInst::ICMP_EQ: |
935 | LV = BinaryOperator::CreateNot(LV, "notinit", ICI); |
936 | break; |
937 | case ICmpInst::ICMP_NE: |
938 | case ICmpInst::ICMP_UGE: |
939 | case ICmpInst::ICMP_SGE: |
940 | case ICmpInst::ICMP_UGT: |
941 | case ICmpInst::ICMP_SGT: |
942 | break; // no change. |
943 | } |
944 | ICI->replaceAllUsesWith(LV); |
945 | ICI->eraseFromParent(); |
946 | } |
947 | LI->eraseFromParent(); |
948 | } |
949 | |
950 | // If the initialization boolean was used, insert it, otherwise delete it. |
951 | if (!InitBoolUsed) { |
952 | while (!InitBool->use_empty()) // Delete initializations |
953 | cast<StoreInst>(InitBool->user_back())->eraseFromParent(); |
954 | delete InitBool; |
955 | } else |
956 | GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool); |
957 | |
958 | // Now the GV is dead, nuke it and the malloc.. |
959 | GV->eraseFromParent(); |
960 | CI->eraseFromParent(); |
961 | |
962 | // To further other optimizations, loop over all users of NewGV and try to |
963 | // constant prop them. This will promote GEP instructions with constant |
964 | // indices into GEP constant-exprs, which will allow global-opt to hack on it. |
965 | ConstantPropUsersOf(NewGV, DL, TLI); |
966 | if (RepValue != NewGV) |
967 | ConstantPropUsersOf(RepValue, DL, TLI); |
968 | |
969 | return NewGV; |
970 | } |
971 | |
972 | /// Scan the use-list of V checking to make sure that there are no complex uses |
973 | /// of V. We permit simple things like dereferencing the pointer, but not |
974 | /// storing through the address, unless it is to the specified global. |
975 | static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, |
976 | const GlobalVariable *GV, |
977 | SmallPtrSetImpl<const PHINode*> &PHIs) { |
978 | for (const User *U : V->users()) { |
979 | const Instruction *Inst = cast<Instruction>(U); |
980 | |
981 | if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { |
982 | continue; // Fine, ignore. |
983 | } |
984 | |
985 | if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { |
986 | if (SI->getOperand(0) == V && SI->getOperand(1) != GV) |
987 | return false; // Storing the pointer itself... bad. |
988 | continue; // Otherwise, storing through it, or storing into GV... fine. |
989 | } |
990 | |
991 | // Must index into the array and into the struct. |
992 | if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { |
993 | if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) |
994 | return false; |
995 | continue; |
996 | } |
997 | |
998 | if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { |
999 | // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI |
1000 | // cycles. |
1001 | if (PHIs.insert(PN).second) |
1002 | if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) |
1003 | return false; |
1004 | continue; |
1005 | } |
1006 | |
1007 | if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { |
1008 | if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) |
1009 | return false; |
1010 | continue; |
1011 | } |
1012 | |
1013 | return false; |
1014 | } |
1015 | return true; |
1016 | } |
1017 | |
1018 | /// The Alloc pointer is stored into GV somewhere. Transform all uses of the |
1019 | /// allocation into loads from the global and uses of the resultant pointer. |
1020 | /// Further, delete the store into GV. This assumes that these value pass the |
1021 | /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. |
1022 | static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, |
1023 | GlobalVariable *GV) { |
1024 | while (!Alloc->use_empty()) { |
1025 | Instruction *U = cast<Instruction>(*Alloc->user_begin()); |
1026 | Instruction *InsertPt = U; |
1027 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) { |
1028 | // If this is the store of the allocation into the global, remove it. |
1029 | if (SI->getOperand(1) == GV) { |
1030 | SI->eraseFromParent(); |
1031 | continue; |
1032 | } |
1033 | } else if (PHINode *PN = dyn_cast<PHINode>(U)) { |
1034 | // Insert the load in the corresponding predecessor, not right before the |
1035 | // PHI. |
1036 | InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); |
1037 | } else if (isa<BitCastInst>(U)) { |
1038 | // Must be bitcast between the malloc and store to initialize the global. |
1039 | ReplaceUsesOfMallocWithGlobal(U, GV); |
1040 | U->eraseFromParent(); |
1041 | continue; |
1042 | } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { |
1043 | // If this is a "GEP bitcast" and the user is a store to the global, then |
1044 | // just process it as a bitcast. |
1045 | if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) |
1046 | if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) |
1047 | if (SI->getOperand(1) == GV) { |
1048 | // Must be bitcast GEP between the malloc and store to initialize |
1049 | // the global. |
1050 | ReplaceUsesOfMallocWithGlobal(GEPI, GV); |
1051 | GEPI->eraseFromParent(); |
1052 | continue; |
1053 | } |
1054 | } |
1055 | |
1056 | // Insert a load from the global, and use it instead of the malloc. |
1057 | Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); |
1058 | U->replaceUsesOfWith(Alloc, NL); |
1059 | } |
1060 | } |
1061 | |
1062 | /// Verify that all uses of V (a load, or a phi of a load) are simple enough to |
1063 | /// perform heap SRA on. This permits GEP's that index through the array and |
1064 | /// struct field, icmps of null, and PHIs. |
1065 | static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, |
1066 | SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, |
1067 | SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { |
1068 | // We permit two users of the load: setcc comparing against the null |
1069 | // pointer, and a getelementptr of a specific form. |
1070 | for (const User *U : V->users()) { |
1071 | const Instruction *UI = cast<Instruction>(U); |
1072 | |
1073 | // Comparison against null is ok. |
1074 | if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { |
1075 | if (!isa<ConstantPointerNull>(ICI->getOperand(1))) |
1076 | return false; |
1077 | continue; |
1078 | } |
1079 | |
1080 | // getelementptr is also ok, but only a simple form. |
1081 | if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { |
1082 | // Must index into the array and into the struct. |
1083 | if (GEPI->getNumOperands() < 3) |
1084 | return false; |
1085 | |
1086 | // Otherwise the GEP is ok. |
1087 | continue; |
1088 | } |
1089 | |
1090 | if (const PHINode *PN = dyn_cast<PHINode>(UI)) { |
1091 | if (!LoadUsingPHIsPerLoad.insert(PN).second) |
1092 | // This means some phi nodes are dependent on each other. |
1093 | // Avoid infinite looping! |
1094 | return false; |
1095 | if (!LoadUsingPHIs.insert(PN).second) |
1096 | // If we have already analyzed this PHI, then it is safe. |
1097 | continue; |
1098 | |
1099 | // Make sure all uses of the PHI are simple enough to transform. |
1100 | if (!LoadUsesSimpleEnoughForHeapSRA(PN, |
1101 | LoadUsingPHIs, LoadUsingPHIsPerLoad)) |
1102 | return false; |
1103 | |
1104 | continue; |
1105 | } |
1106 | |
1107 | // Otherwise we don't know what this is, not ok. |
1108 | return false; |
1109 | } |
1110 | |
1111 | return true; |
1112 | } |
1113 | |
1114 | /// If all users of values loaded from GV are simple enough to perform HeapSRA, |
1115 | /// return true. |
1116 | static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, |
1117 | Instruction *StoredVal) { |
1118 | SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; |
1119 | SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; |
1120 | for (const User *U : GV->users()) |
1121 | if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { |
1122 | if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, |
1123 | LoadUsingPHIsPerLoad)) |
1124 | return false; |
1125 | LoadUsingPHIsPerLoad.clear(); |
1126 | } |
1127 | |
1128 | // If we reach here, we know that all uses of the loads and transitive uses |
1129 | // (through PHI nodes) are simple enough to transform. However, we don't know |
1130 | // that all inputs the to the PHI nodes are in the same equivalence sets. |
1131 | // Check to verify that all operands of the PHIs are either PHIS that can be |
1132 | // transformed, loads from GV, or MI itself. |
1133 | for (const PHINode *PN : LoadUsingPHIs) { |
1134 | for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { |
1135 | Value *InVal = PN->getIncomingValue(op); |
1136 | |
1137 | // PHI of the stored value itself is ok. |
1138 | if (InVal == StoredVal) continue; |
1139 | |
1140 | if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { |
1141 | // One of the PHIs in our set is (optimistically) ok. |
1142 | if (LoadUsingPHIs.count(InPN)) |
1143 | continue; |
1144 | return false; |
1145 | } |
1146 | |
1147 | // Load from GV is ok. |
1148 | if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) |
1149 | if (LI->getOperand(0) == GV) |
1150 | continue; |
1151 | |
1152 | // UNDEF? NULL? |
1153 | |
1154 | // Anything else is rejected. |
1155 | return false; |
1156 | } |
1157 | } |
1158 | |
1159 | return true; |
1160 | } |
1161 | |
1162 | static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, |
1163 | DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, |
1164 | std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { |
1165 | std::vector<Value *> &FieldVals = InsertedScalarizedValues[V]; |
1166 | |
1167 | if (FieldNo >= FieldVals.size()) |
1168 | FieldVals.resize(FieldNo+1); |
1169 | |
1170 | // If we already have this value, just reuse the previously scalarized |
1171 | // version. |
1172 | if (Value *FieldVal = FieldVals[FieldNo]) |
1173 | return FieldVal; |
1174 | |
1175 | // Depending on what instruction this is, we have several cases. |
1176 | Value *Result; |
1177 | if (LoadInst *LI = dyn_cast<LoadInst>(V)) { |
1178 | // This is a scalarized version of the load from the global. Just create |
1179 | // a new Load of the scalarized global. |
1180 | Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, |
1181 | InsertedScalarizedValues, |
1182 | PHIsToRewrite), |
1183 | LI->getName()+".f"+Twine(FieldNo), LI); |
1184 | } else { |
1185 | PHINode *PN = cast<PHINode>(V); |
1186 | // PN's type is pointer to struct. Make a new PHI of pointer to struct |
1187 | // field. |
1188 | |
1189 | PointerType *PTy = cast<PointerType>(PN->getType()); |
1190 | StructType *ST = cast<StructType>(PTy->getElementType()); |
1191 | |
1192 | unsigned AS = PTy->getAddressSpace(); |
1193 | PHINode *NewPN = |
1194 | PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), |
1195 | PN->getNumIncomingValues(), |
1196 | PN->getName()+".f"+Twine(FieldNo), PN); |
1197 | Result = NewPN; |
1198 | PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); |
1199 | } |
1200 | |
1201 | return FieldVals[FieldNo] = Result; |
1202 | } |
1203 | |
1204 | /// Given a load instruction and a value derived from the load, rewrite the |
1205 | /// derived value to use the HeapSRoA'd load. |
1206 | static void RewriteHeapSROALoadUser(Instruction *LoadUser, |
1207 | DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, |
1208 | std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { |
1209 | // If this is a comparison against null, handle it. |
1210 | if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { |
1211 | assert(isa<ConstantPointerNull>(SCI->getOperand(1)))(static_cast <bool> (isa<ConstantPointerNull>(SCI ->getOperand(1))) ? void (0) : __assert_fail ("isa<ConstantPointerNull>(SCI->getOperand(1))" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1211, __extension__ __PRETTY_FUNCTION__)); |
1212 | // If we have a setcc of the loaded pointer, we can use a setcc of any |
1213 | // field. |
1214 | Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, |
1215 | InsertedScalarizedValues, PHIsToRewrite); |
1216 | |
1217 | Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, |
1218 | Constant::getNullValue(NPtr->getType()), |
1219 | SCI->getName()); |
1220 | SCI->replaceAllUsesWith(New); |
1221 | SCI->eraseFromParent(); |
1222 | return; |
1223 | } |
1224 | |
1225 | // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' |
1226 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { |
1227 | assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))(static_cast <bool> (GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && "Unexpected GEPI!" ) ? void (0) : __assert_fail ("GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && \"Unexpected GEPI!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1228, __extension__ __PRETTY_FUNCTION__)) |
1228 | && "Unexpected GEPI!")(static_cast <bool> (GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && "Unexpected GEPI!" ) ? void (0) : __assert_fail ("GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && \"Unexpected GEPI!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1228, __extension__ __PRETTY_FUNCTION__)); |
1229 | |
1230 | // Load the pointer for this field. |
1231 | unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); |
1232 | Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, |
1233 | InsertedScalarizedValues, PHIsToRewrite); |
1234 | |
1235 | // Create the new GEP idx vector. |
1236 | SmallVector<Value*, 8> GEPIdx; |
1237 | GEPIdx.push_back(GEPI->getOperand(1)); |
1238 | GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); |
1239 | |
1240 | Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx, |
1241 | GEPI->getName(), GEPI); |
1242 | GEPI->replaceAllUsesWith(NGEPI); |
1243 | GEPI->eraseFromParent(); |
1244 | return; |
1245 | } |
1246 | |
1247 | // Recursively transform the users of PHI nodes. This will lazily create the |
1248 | // PHIs that are needed for individual elements. Keep track of what PHIs we |
1249 | // see in InsertedScalarizedValues so that we don't get infinite loops (very |
1250 | // antisocial). If the PHI is already in InsertedScalarizedValues, it has |
1251 | // already been seen first by another load, so its uses have already been |
1252 | // processed. |
1253 | PHINode *PN = cast<PHINode>(LoadUser); |
1254 | if (!InsertedScalarizedValues.insert(std::make_pair(PN, |
1255 | std::vector<Value *>())).second) |
1256 | return; |
1257 | |
1258 | // If this is the first time we've seen this PHI, recursively process all |
1259 | // users. |
1260 | for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { |
1261 | Instruction *User = cast<Instruction>(*UI++); |
1262 | RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); |
1263 | } |
1264 | } |
1265 | |
1266 | /// We are performing Heap SRoA on a global. Ptr is a value loaded from the |
1267 | /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead. |
1268 | /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA. |
1269 | static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, |
1270 | DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, |
1271 | std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) { |
1272 | for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { |
1273 | Instruction *User = cast<Instruction>(*UI++); |
1274 | RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); |
1275 | } |
1276 | |
1277 | if (Load->use_empty()) { |
1278 | Load->eraseFromParent(); |
1279 | InsertedScalarizedValues.erase(Load); |
1280 | } |
1281 | } |
1282 | |
1283 | /// CI is an allocation of an array of structures. Break it up into multiple |
1284 | /// allocations of arrays of the fields. |
1285 | static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, |
1286 | Value *NElems, const DataLayout &DL, |
1287 | const TargetLibraryInfo *TLI) { |
1288 | DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'; } } while (false); |
1289 | Type *MAT = getMallocAllocatedType(CI, TLI); |
1290 | StructType *STy = cast<StructType>(MAT); |
1291 | |
1292 | // There is guaranteed to be at least one use of the malloc (storing |
1293 | // it into GV). If there are other uses, change them to be uses of |
1294 | // the global to simplify later code. This also deletes the store |
1295 | // into GV. |
1296 | ReplaceUsesOfMallocWithGlobal(CI, GV); |
1297 | |
1298 | // Okay, at this point, there are no users of the malloc. Insert N |
1299 | // new mallocs at the same place as CI, and N globals. |
1300 | std::vector<Value *> FieldGlobals; |
1301 | std::vector<Value *> FieldMallocs; |
1302 | |
1303 | SmallVector<OperandBundleDef, 1> OpBundles; |
1304 | CI->getOperandBundlesAsDefs(OpBundles); |
1305 | |
1306 | unsigned AS = GV->getType()->getPointerAddressSpace(); |
1307 | for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ |
1308 | Type *FieldTy = STy->getElementType(FieldNo); |
1309 | PointerType *PFieldTy = PointerType::get(FieldTy, AS); |
1310 | |
1311 | GlobalVariable *NGV = new GlobalVariable( |
1312 | *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage, |
1313 | Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo), |
1314 | nullptr, GV->getThreadLocalMode()); |
1315 | NGV->copyAttributesFrom(GV); |
1316 | FieldGlobals.push_back(NGV); |
1317 | |
1318 | unsigned TypeSize = DL.getTypeAllocSize(FieldTy); |
1319 | if (StructType *ST = dyn_cast<StructType>(FieldTy)) |
1320 | TypeSize = DL.getStructLayout(ST)->getSizeInBytes(); |
1321 | Type *IntPtrTy = DL.getIntPtrType(CI->getType()); |
1322 | Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, |
1323 | ConstantInt::get(IntPtrTy, TypeSize), |
1324 | NElems, OpBundles, nullptr, |
1325 | CI->getName() + ".f" + Twine(FieldNo)); |
1326 | FieldMallocs.push_back(NMI); |
1327 | new StoreInst(NMI, NGV, CI); |
1328 | } |
1329 | |
1330 | // The tricky aspect of this transformation is handling the case when malloc |
1331 | // fails. In the original code, malloc failing would set the result pointer |
1332 | // of malloc to null. In this case, some mallocs could succeed and others |
1333 | // could fail. As such, we emit code that looks like this: |
1334 | // F0 = malloc(field0) |
1335 | // F1 = malloc(field1) |
1336 | // F2 = malloc(field2) |
1337 | // if (F0 == 0 || F1 == 0 || F2 == 0) { |
1338 | // if (F0) { free(F0); F0 = 0; } |
1339 | // if (F1) { free(F1); F1 = 0; } |
1340 | // if (F2) { free(F2); F2 = 0; } |
1341 | // } |
1342 | // The malloc can also fail if its argument is too large. |
1343 | Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); |
1344 | Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), |
1345 | ConstantZero, "isneg"); |
1346 | for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { |
1347 | Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], |
1348 | Constant::getNullValue(FieldMallocs[i]->getType()), |
1349 | "isnull"); |
1350 | RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); |
1351 | } |
1352 | |
1353 | // Split the basic block at the old malloc. |
1354 | BasicBlock *OrigBB = CI->getParent(); |
1355 | BasicBlock *ContBB = |
1356 | OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont"); |
1357 | |
1358 | // Create the block to check the first condition. Put all these blocks at the |
1359 | // end of the function as they are unlikely to be executed. |
1360 | BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), |
1361 | "malloc_ret_null", |
1362 | OrigBB->getParent()); |
1363 | |
1364 | // Remove the uncond branch from OrigBB to ContBB, turning it into a cond |
1365 | // branch on RunningOr. |
1366 | OrigBB->getTerminator()->eraseFromParent(); |
1367 | BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); |
1368 | |
1369 | // Within the NullPtrBlock, we need to emit a comparison and branch for each |
1370 | // pointer, because some may be null while others are not. |
1371 | for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { |
1372 | Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); |
1373 | Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, |
1374 | Constant::getNullValue(GVVal->getType())); |
1375 | BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", |
1376 | OrigBB->getParent()); |
1377 | BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", |
1378 | OrigBB->getParent()); |
1379 | Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, |
1380 | Cmp, NullPtrBlock); |
1381 | |
1382 | // Fill in FreeBlock. |
1383 | CallInst::CreateFree(GVVal, OpBundles, BI); |
1384 | new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], |
1385 | FreeBlock); |
1386 | BranchInst::Create(NextBlock, FreeBlock); |
1387 | |
1388 | NullPtrBlock = NextBlock; |
1389 | } |
1390 | |
1391 | BranchInst::Create(ContBB, NullPtrBlock); |
1392 | |
1393 | // CI is no longer needed, remove it. |
1394 | CI->eraseFromParent(); |
1395 | |
1396 | /// As we process loads, if we can't immediately update all uses of the load, |
1397 | /// keep track of what scalarized loads are inserted for a given load. |
1398 | DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues; |
1399 | InsertedScalarizedValues[GV] = FieldGlobals; |
1400 | |
1401 | std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite; |
1402 | |
1403 | // Okay, the malloc site is completely handled. All of the uses of GV are now |
1404 | // loads, and all uses of those loads are simple. Rewrite them to use loads |
1405 | // of the per-field globals instead. |
1406 | for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { |
1407 | Instruction *User = cast<Instruction>(*UI++); |
1408 | |
1409 | if (LoadInst *LI = dyn_cast<LoadInst>(User)) { |
1410 | RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); |
1411 | continue; |
1412 | } |
1413 | |
1414 | // Must be a store of null. |
1415 | StoreInst *SI = cast<StoreInst>(User); |
1416 | assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&(static_cast <bool> (isa<ConstantPointerNull>(SI-> getOperand(0)) && "Unexpected heap-sra user!") ? void (0) : __assert_fail ("isa<ConstantPointerNull>(SI->getOperand(0)) && \"Unexpected heap-sra user!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1417, __extension__ __PRETTY_FUNCTION__)) |
1417 | "Unexpected heap-sra user!")(static_cast <bool> (isa<ConstantPointerNull>(SI-> getOperand(0)) && "Unexpected heap-sra user!") ? void (0) : __assert_fail ("isa<ConstantPointerNull>(SI->getOperand(0)) && \"Unexpected heap-sra user!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1417, __extension__ __PRETTY_FUNCTION__)); |
1418 | |
1419 | // Insert a store of null into each global. |
1420 | for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { |
1421 | Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType(); |
1422 | Constant *Null = Constant::getNullValue(ValTy); |
1423 | new StoreInst(Null, FieldGlobals[i], SI); |
1424 | } |
1425 | // Erase the original store. |
1426 | SI->eraseFromParent(); |
1427 | } |
1428 | |
1429 | // While we have PHIs that are interesting to rewrite, do it. |
1430 | while (!PHIsToRewrite.empty()) { |
1431 | PHINode *PN = PHIsToRewrite.back().first; |
1432 | unsigned FieldNo = PHIsToRewrite.back().second; |
1433 | PHIsToRewrite.pop_back(); |
1434 | PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); |
1435 | assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi")(static_cast <bool> (FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi") ? void (0) : __assert_fail ("FieldPN->getNumIncomingValues() == 0 &&\"Already processed this phi\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1435, __extension__ __PRETTY_FUNCTION__)); |
1436 | |
1437 | // Add all the incoming values. This can materialize more phis. |
1438 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
1439 | Value *InVal = PN->getIncomingValue(i); |
1440 | InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, |
1441 | PHIsToRewrite); |
1442 | FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); |
1443 | } |
1444 | } |
1445 | |
1446 | // Drop all inter-phi links and any loads that made it this far. |
1447 | for (DenseMap<Value *, std::vector<Value *>>::iterator |
1448 | I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); |
1449 | I != E; ++I) { |
1450 | if (PHINode *PN = dyn_cast<PHINode>(I->first)) |
1451 | PN->dropAllReferences(); |
1452 | else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) |
1453 | LI->dropAllReferences(); |
1454 | } |
1455 | |
1456 | // Delete all the phis and loads now that inter-references are dead. |
1457 | for (DenseMap<Value *, std::vector<Value *>>::iterator |
1458 | I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); |
1459 | I != E; ++I) { |
1460 | if (PHINode *PN = dyn_cast<PHINode>(I->first)) |
1461 | PN->eraseFromParent(); |
1462 | else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) |
1463 | LI->eraseFromParent(); |
1464 | } |
1465 | |
1466 | // The old global is now dead, remove it. |
1467 | GV->eraseFromParent(); |
1468 | |
1469 | ++NumHeapSRA; |
1470 | return cast<GlobalVariable>(FieldGlobals[0]); |
1471 | } |
1472 | |
1473 | /// This function is called when we see a pointer global variable with a single |
1474 | /// value stored it that is a malloc or cast of malloc. |
1475 | static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, |
1476 | Type *AllocTy, |
1477 | AtomicOrdering Ordering, |
1478 | const DataLayout &DL, |
1479 | TargetLibraryInfo *TLI) { |
1480 | // If this is a malloc of an abstract type, don't touch it. |
1481 | if (!AllocTy->isSized()) |
1482 | return false; |
1483 | |
1484 | // We can't optimize this global unless all uses of it are *known* to be |
1485 | // of the malloc value, not of the null initializer value (consider a use |
1486 | // that compares the global's value against zero to see if the malloc has |
1487 | // been reached). To do this, we check to see if all uses of the global |
1488 | // would trap if the global were null: this proves that they must all |
1489 | // happen after the malloc. |
1490 | if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) |
1491 | return false; |
1492 | |
1493 | // We can't optimize this if the malloc itself is used in a complex way, |
1494 | // for example, being stored into multiple globals. This allows the |
1495 | // malloc to be stored into the specified global, loaded icmp'd, and |
1496 | // GEP'd. These are all things we could transform to using the global |
1497 | // for. |
1498 | SmallPtrSet<const PHINode*, 8> PHIs; |
1499 | if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) |
1500 | return false; |
1501 | |
1502 | // If we have a global that is only initialized with a fixed size malloc, |
1503 | // transform the program to use global memory instead of malloc'd memory. |
1504 | // This eliminates dynamic allocation, avoids an indirection accessing the |
1505 | // data, and exposes the resultant global to further GlobalOpt. |
1506 | // We cannot optimize the malloc if we cannot determine malloc array size. |
1507 | Value *NElems = getMallocArraySize(CI, DL, TLI, true); |
1508 | if (!NElems) |
1509 | return false; |
1510 | |
1511 | if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) |
1512 | // Restrict this transformation to only working on small allocations |
1513 | // (2048 bytes currently), as we don't want to introduce a 16M global or |
1514 | // something. |
1515 | if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) { |
1516 | OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); |
1517 | return true; |
1518 | } |
1519 | |
1520 | // If the allocation is an array of structures, consider transforming this |
1521 | // into multiple malloc'd arrays, one for each field. This is basically |
1522 | // SRoA for malloc'd memory. |
1523 | |
1524 | if (Ordering != AtomicOrdering::NotAtomic) |
1525 | return false; |
1526 | |
1527 | // If this is an allocation of a fixed size array of structs, analyze as a |
1528 | // variable size array. malloc [100 x struct],1 -> malloc struct, 100 |
1529 | if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) |
1530 | if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) |
1531 | AllocTy = AT->getElementType(); |
1532 | |
1533 | StructType *AllocSTy = dyn_cast<StructType>(AllocTy); |
1534 | if (!AllocSTy) |
1535 | return false; |
1536 | |
1537 | // This the structure has an unreasonable number of fields, leave it |
1538 | // alone. |
1539 | if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && |
1540 | AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { |
1541 | |
1542 | // If this is a fixed size array, transform the Malloc to be an alloc of |
1543 | // structs. malloc [100 x struct],1 -> malloc struct, 100 |
1544 | if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { |
1545 | Type *IntPtrTy = DL.getIntPtrType(CI->getType()); |
1546 | unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes(); |
1547 | Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); |
1548 | Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); |
1549 | SmallVector<OperandBundleDef, 1> OpBundles; |
1550 | CI->getOperandBundlesAsDefs(OpBundles); |
1551 | Instruction *Malloc = |
1552 | CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements, |
1553 | OpBundles, nullptr, CI->getName()); |
1554 | Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); |
1555 | CI->replaceAllUsesWith(Cast); |
1556 | CI->eraseFromParent(); |
1557 | if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) |
1558 | CI = cast<CallInst>(BCI->getOperand(0)); |
1559 | else |
1560 | CI = cast<CallInst>(Malloc); |
1561 | } |
1562 | |
1563 | PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL, |
1564 | TLI); |
1565 | return true; |
1566 | } |
1567 | |
1568 | return false; |
1569 | } |
1570 | |
1571 | // Try to optimize globals based on the knowledge that only one value (besides |
1572 | // its initializer) is ever stored to the global. |
1573 | static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, |
1574 | AtomicOrdering Ordering, |
1575 | const DataLayout &DL, |
1576 | TargetLibraryInfo *TLI) { |
1577 | // Ignore no-op GEPs and bitcasts. |
1578 | StoredOnceVal = StoredOnceVal->stripPointerCasts(); |
1579 | |
1580 | // If we are dealing with a pointer global that is initialized to null and |
1581 | // only has one (non-null) value stored into it, then we can optimize any |
1582 | // users of the loaded value (often calls and loads) that would trap if the |
1583 | // value was null. |
1584 | if (GV->getInitializer()->getType()->isPointerTy() && |
1585 | GV->getInitializer()->isNullValue()) { |
1586 | if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { |
1587 | if (GV->getInitializer()->getType() != SOVC->getType()) |
1588 | SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); |
1589 | |
1590 | // Optimize away any trapping uses of the loaded value. |
1591 | if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) |
1592 | return true; |
1593 | } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { |
1594 | Type *MallocType = getMallocAllocatedType(CI, TLI); |
1595 | if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, |
1596 | Ordering, DL, TLI)) |
1597 | return true; |
1598 | } |
1599 | } |
1600 | |
1601 | return false; |
1602 | } |
1603 | |
1604 | /// At this point, we have learned that the only two values ever stored into GV |
1605 | /// are its initializer and OtherVal. See if we can shrink the global into a |
1606 | /// boolean and select between the two values whenever it is used. This exposes |
1607 | /// the values to other scalar optimizations. |
1608 | static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { |
1609 | Type *GVElType = GV->getValueType(); |
1610 | |
1611 | // If GVElType is already i1, it is already shrunk. If the type of the GV is |
1612 | // an FP value, pointer or vector, don't do this optimization because a select |
1613 | // between them is very expensive and unlikely to lead to later |
1614 | // simplification. In these cases, we typically end up with "cond ? v1 : v2" |
1615 | // where v1 and v2 both require constant pool loads, a big loss. |
1616 | if (GVElType == Type::getInt1Ty(GV->getContext()) || |
1617 | GVElType->isFloatingPointTy() || |
1618 | GVElType->isPointerTy() || GVElType->isVectorTy()) |
1619 | return false; |
1620 | |
1621 | // Walk the use list of the global seeing if all the uses are load or store. |
1622 | // If there is anything else, bail out. |
1623 | for (User *U : GV->users()) |
1624 | if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) |
1625 | return false; |
1626 | |
1627 | DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"; } } while (false); |
1628 | |
1629 | // Create the new global, initializing it to false. |
1630 | GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), |
1631 | false, |
1632 | GlobalValue::InternalLinkage, |
1633 | ConstantInt::getFalse(GV->getContext()), |
1634 | GV->getName()+".b", |
1635 | GV->getThreadLocalMode(), |
1636 | GV->getType()->getAddressSpace()); |
1637 | NewGV->copyAttributesFrom(GV); |
1638 | GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV); |
1639 | |
1640 | Constant *InitVal = GV->getInitializer(); |
1641 | assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&(static_cast <bool> (InitVal->getType() != Type::getInt1Ty (GV->getContext()) && "No reason to shrink to bool!" ) ? void (0) : __assert_fail ("InitVal->getType() != Type::getInt1Ty(GV->getContext()) && \"No reason to shrink to bool!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1642, __extension__ __PRETTY_FUNCTION__)) |
1642 | "No reason to shrink to bool!")(static_cast <bool> (InitVal->getType() != Type::getInt1Ty (GV->getContext()) && "No reason to shrink to bool!" ) ? void (0) : __assert_fail ("InitVal->getType() != Type::getInt1Ty(GV->getContext()) && \"No reason to shrink to bool!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1642, __extension__ __PRETTY_FUNCTION__)); |
1643 | |
1644 | SmallVector<DIGlobalVariableExpression *, 1> GVs; |
1645 | GV->getDebugInfo(GVs); |
1646 | |
1647 | // If initialized to zero and storing one into the global, we can use a cast |
1648 | // instead of a select to synthesize the desired value. |
1649 | bool IsOneZero = false; |
1650 | bool EmitOneOrZero = true; |
1651 | if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)){ |
1652 | IsOneZero = InitVal->isNullValue() && CI->isOne(); |
1653 | |
1654 | if (ConstantInt *CIInit = dyn_cast<ConstantInt>(GV->getInitializer())){ |
1655 | uint64_t ValInit = CIInit->getZExtValue(); |
1656 | uint64_t ValOther = CI->getZExtValue(); |
1657 | uint64_t ValMinus = ValOther - ValInit; |
1658 | |
1659 | for(auto *GVe : GVs){ |
1660 | DIGlobalVariable *DGV = GVe->getVariable(); |
1661 | DIExpression *E = GVe->getExpression(); |
Value stored to 'E' during its initialization is never read | |
1662 | |
1663 | // It is expected that the address of global optimized variable is on |
1664 | // top of the stack. After optimization, value of that variable will |
1665 | // be ether 0 for initial value or 1 for other value. The following |
1666 | // expression should return constant integer value depending on the |
1667 | // value at global object address: |
1668 | // val * (ValOther - ValInit) + ValInit: |
1669 | // DW_OP_deref DW_OP_constu <ValMinus> |
1670 | // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value |
1671 | E = DIExpression::get(NewGV->getContext(), |
1672 | {dwarf::DW_OP_deref, |
1673 | dwarf::DW_OP_constu, |
1674 | ValMinus, |
1675 | dwarf::DW_OP_mul, |
1676 | dwarf::DW_OP_constu, |
1677 | ValInit, |
1678 | dwarf::DW_OP_plus, |
1679 | dwarf::DW_OP_stack_value}); |
1680 | DIGlobalVariableExpression *DGVE = |
1681 | DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E); |
1682 | NewGV->addDebugInfo(DGVE); |
1683 | } |
1684 | EmitOneOrZero = false; |
1685 | } |
1686 | } |
1687 | |
1688 | if (EmitOneOrZero) { |
1689 | // FIXME: This will only emit address for debugger on which will |
1690 | // be written only 0 or 1. |
1691 | for(auto *GV : GVs) |
1692 | NewGV->addDebugInfo(GV); |
1693 | } |
1694 | |
1695 | while (!GV->use_empty()) { |
1696 | Instruction *UI = cast<Instruction>(GV->user_back()); |
1697 | if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { |
1698 | // Change the store into a boolean store. |
1699 | bool StoringOther = SI->getOperand(0) == OtherVal; |
1700 | // Only do this if we weren't storing a loaded value. |
1701 | Value *StoreVal; |
1702 | if (StoringOther || SI->getOperand(0) == InitVal) { |
1703 | StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), |
1704 | StoringOther); |
1705 | } else { |
1706 | // Otherwise, we are storing a previously loaded copy. To do this, |
1707 | // change the copy from copying the original value to just copying the |
1708 | // bool. |
1709 | Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); |
1710 | |
1711 | // If we've already replaced the input, StoredVal will be a cast or |
1712 | // select instruction. If not, it will be a load of the original |
1713 | // global. |
1714 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { |
1715 | assert(LI->getOperand(0) == GV && "Not a copy!")(static_cast <bool> (LI->getOperand(0) == GV && "Not a copy!") ? void (0) : __assert_fail ("LI->getOperand(0) == GV && \"Not a copy!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1715, __extension__ __PRETTY_FUNCTION__)); |
1716 | // Insert a new load, to preserve the saved value. |
1717 | StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, |
1718 | LI->getOrdering(), LI->getSyncScopeID(), LI); |
1719 | } else { |
1720 | assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&(static_cast <bool> ((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && "This is not a form that we understand!" ) ? void (0) : __assert_fail ("(isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && \"This is not a form that we understand!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1721, __extension__ __PRETTY_FUNCTION__)) |
1721 | "This is not a form that we understand!")(static_cast <bool> ((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && "This is not a form that we understand!" ) ? void (0) : __assert_fail ("(isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && \"This is not a form that we understand!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1721, __extension__ __PRETTY_FUNCTION__)); |
1722 | StoreVal = StoredVal->getOperand(0); |
1723 | assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!")(static_cast <bool> (isa<LoadInst>(StoreVal) && "Not a load of NewGV!") ? void (0) : __assert_fail ("isa<LoadInst>(StoreVal) && \"Not a load of NewGV!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1723, __extension__ __PRETTY_FUNCTION__)); |
1724 | } |
1725 | } |
1726 | new StoreInst(StoreVal, NewGV, false, 0, |
1727 | SI->getOrdering(), SI->getSyncScopeID(), SI); |
1728 | } else { |
1729 | // Change the load into a load of bool then a select. |
1730 | LoadInst *LI = cast<LoadInst>(UI); |
1731 | LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, |
1732 | LI->getOrdering(), LI->getSyncScopeID(), LI); |
1733 | Value *NSI; |
1734 | if (IsOneZero) |
1735 | NSI = new ZExtInst(NLI, LI->getType(), "", LI); |
1736 | else |
1737 | NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); |
1738 | NSI->takeName(LI); |
1739 | LI->replaceAllUsesWith(NSI); |
1740 | } |
1741 | UI->eraseFromParent(); |
1742 | } |
1743 | |
1744 | // Retain the name of the old global variable. People who are debugging their |
1745 | // programs may expect these variables to be named the same. |
1746 | NewGV->takeName(GV); |
1747 | GV->eraseFromParent(); |
1748 | return true; |
1749 | } |
1750 | |
1751 | static bool deleteIfDead(GlobalValue &GV, |
1752 | SmallSet<const Comdat *, 8> &NotDiscardableComdats) { |
1753 | GV.removeDeadConstantUsers(); |
1754 | |
1755 | if (!GV.isDiscardableIfUnused() && !GV.isDeclaration()) |
1756 | return false; |
1757 | |
1758 | if (const Comdat *C = GV.getComdat()) |
1759 | if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C)) |
1760 | return false; |
1761 | |
1762 | bool Dead; |
1763 | if (auto *F = dyn_cast<Function>(&GV)) |
1764 | Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead(); |
1765 | else |
1766 | Dead = GV.use_empty(); |
1767 | if (!Dead) |
1768 | return false; |
1769 | |
1770 | DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "GLOBAL DEAD: " << GV << "\n"; } } while (false); |
1771 | GV.eraseFromParent(); |
1772 | ++NumDeleted; |
1773 | return true; |
1774 | } |
1775 | |
1776 | static bool isPointerValueDeadOnEntryToFunction( |
1777 | const Function *F, GlobalValue *GV, |
1778 | function_ref<DominatorTree &(Function &)> LookupDomTree) { |
1779 | // Find all uses of GV. We expect them all to be in F, and if we can't |
1780 | // identify any of the uses we bail out. |
1781 | // |
1782 | // On each of these uses, identify if the memory that GV points to is |
1783 | // used/required/live at the start of the function. If it is not, for example |
1784 | // if the first thing the function does is store to the GV, the GV can |
1785 | // possibly be demoted. |
1786 | // |
1787 | // We don't do an exhaustive search for memory operations - simply look |
1788 | // through bitcasts as they're quite common and benign. |
1789 | const DataLayout &DL = GV->getParent()->getDataLayout(); |
1790 | SmallVector<LoadInst *, 4> Loads; |
1791 | SmallVector<StoreInst *, 4> Stores; |
1792 | for (auto *U : GV->users()) { |
1793 | if (Operator::getOpcode(U) == Instruction::BitCast) { |
1794 | for (auto *UU : U->users()) { |
1795 | if (auto *LI = dyn_cast<LoadInst>(UU)) |
1796 | Loads.push_back(LI); |
1797 | else if (auto *SI = dyn_cast<StoreInst>(UU)) |
1798 | Stores.push_back(SI); |
1799 | else |
1800 | return false; |
1801 | } |
1802 | continue; |
1803 | } |
1804 | |
1805 | Instruction *I = dyn_cast<Instruction>(U); |
1806 | if (!I) |
1807 | return false; |
1808 | assert(I->getParent()->getParent() == F)(static_cast <bool> (I->getParent()->getParent() == F) ? void (0) : __assert_fail ("I->getParent()->getParent() == F" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1808, __extension__ __PRETTY_FUNCTION__)); |
1809 | |
1810 | if (auto *LI = dyn_cast<LoadInst>(I)) |
1811 | Loads.push_back(LI); |
1812 | else if (auto *SI = dyn_cast<StoreInst>(I)) |
1813 | Stores.push_back(SI); |
1814 | else |
1815 | return false; |
1816 | } |
1817 | |
1818 | // We have identified all uses of GV into loads and stores. Now check if all |
1819 | // of them are known not to depend on the value of the global at the function |
1820 | // entry point. We do this by ensuring that every load is dominated by at |
1821 | // least one store. |
1822 | auto &DT = LookupDomTree(*const_cast<Function *>(F)); |
1823 | |
1824 | // The below check is quadratic. Check we're not going to do too many tests. |
1825 | // FIXME: Even though this will always have worst-case quadratic time, we |
1826 | // could put effort into minimizing the average time by putting stores that |
1827 | // have been shown to dominate at least one load at the beginning of the |
1828 | // Stores array, making subsequent dominance checks more likely to succeed |
1829 | // early. |
1830 | // |
1831 | // The threshold here is fairly large because global->local demotion is a |
1832 | // very powerful optimization should it fire. |
1833 | const unsigned Threshold = 100; |
1834 | if (Loads.size() * Stores.size() > Threshold) |
1835 | return false; |
1836 | |
1837 | for (auto *L : Loads) { |
1838 | auto *LTy = L->getType(); |
1839 | if (none_of(Stores, [&](const StoreInst *S) { |
1840 | auto *STy = S->getValueOperand()->getType(); |
1841 | // The load is only dominated by the store if DomTree says so |
1842 | // and the number of bits loaded in L is less than or equal to |
1843 | // the number of bits stored in S. |
1844 | return DT.dominates(S, L) && |
1845 | DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy); |
1846 | })) |
1847 | return false; |
1848 | } |
1849 | // All loads have known dependences inside F, so the global can be localized. |
1850 | return true; |
1851 | } |
1852 | |
1853 | /// C may have non-instruction users. Can all of those users be turned into |
1854 | /// instructions? |
1855 | static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) { |
1856 | // We don't do this exhaustively. The most common pattern that we really need |
1857 | // to care about is a constant GEP or constant bitcast - so just looking |
1858 | // through one single ConstantExpr. |
1859 | // |
1860 | // The set of constants that this function returns true for must be able to be |
1861 | // handled by makeAllConstantUsesInstructions. |
1862 | for (auto *U : C->users()) { |
1863 | if (isa<Instruction>(U)) |
1864 | continue; |
1865 | if (!isa<ConstantExpr>(U)) |
1866 | // Non instruction, non-constantexpr user; cannot convert this. |
1867 | return false; |
1868 | for (auto *UU : U->users()) |
1869 | if (!isa<Instruction>(UU)) |
1870 | // A constantexpr used by another constant. We don't try and recurse any |
1871 | // further but just bail out at this point. |
1872 | return false; |
1873 | } |
1874 | |
1875 | return true; |
1876 | } |
1877 | |
1878 | /// C may have non-instruction users, and |
1879 | /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the |
1880 | /// non-instruction users to instructions. |
1881 | static void makeAllConstantUsesInstructions(Constant *C) { |
1882 | SmallVector<ConstantExpr*,4> Users; |
1883 | for (auto *U : C->users()) { |
1884 | if (isa<ConstantExpr>(U)) |
1885 | Users.push_back(cast<ConstantExpr>(U)); |
1886 | else |
1887 | // We should never get here; allNonInstructionUsersCanBeMadeInstructions |
1888 | // should not have returned true for C. |
1889 | assert((static_cast <bool> (isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!" ) ? void (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1891, __extension__ __PRETTY_FUNCTION__)) |
1890 | isa<Instruction>(U) &&(static_cast <bool> (isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!" ) ? void (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1891, __extension__ __PRETTY_FUNCTION__)) |
1891 | "Can't transform non-constantexpr non-instruction to instruction!")(static_cast <bool> (isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!" ) ? void (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 1891, __extension__ __PRETTY_FUNCTION__)); |
1892 | } |
1893 | |
1894 | SmallVector<Value*,4> UUsers; |
1895 | for (auto *U : Users) { |
1896 | UUsers.clear(); |
1897 | for (auto *UU : U->users()) |
1898 | UUsers.push_back(UU); |
1899 | for (auto *UU : UUsers) { |
1900 | Instruction *UI = cast<Instruction>(UU); |
1901 | Instruction *NewU = U->getAsInstruction(); |
1902 | NewU->insertBefore(UI); |
1903 | UI->replaceUsesOfWith(U, NewU); |
1904 | } |
1905 | // We've replaced all the uses, so destroy the constant. (destroyConstant |
1906 | // will update value handles and metadata.) |
1907 | U->destroyConstant(); |
1908 | } |
1909 | } |
1910 | |
1911 | /// Analyze the specified global variable and optimize |
1912 | /// it if possible. If we make a change, return true. |
1913 | static bool processInternalGlobal( |
1914 | GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI, |
1915 | function_ref<DominatorTree &(Function &)> LookupDomTree) { |
1916 | auto &DL = GV->getParent()->getDataLayout(); |
1917 | // If this is a first class global and has only one accessing function and |
1918 | // this function is non-recursive, we replace the global with a local alloca |
1919 | // in this function. |
1920 | // |
1921 | // NOTE: It doesn't make sense to promote non-single-value types since we |
1922 | // are just replacing static memory to stack memory. |
1923 | // |
1924 | // If the global is in different address space, don't bring it to stack. |
1925 | if (!GS.HasMultipleAccessingFunctions && |
1926 | GS.AccessingFunction && |
1927 | GV->getValueType()->isSingleValueType() && |
1928 | GV->getType()->getAddressSpace() == 0 && |
1929 | !GV->isExternallyInitialized() && |
1930 | allNonInstructionUsersCanBeMadeInstructions(GV) && |
1931 | GS.AccessingFunction->doesNotRecurse() && |
1932 | isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV, |
1933 | LookupDomTree)) { |
1934 | const DataLayout &DL = GV->getParent()->getDataLayout(); |
1935 | |
1936 | DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"; } } while (false); |
1937 | Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction |
1938 | ->getEntryBlock().begin()); |
1939 | Type *ElemTy = GV->getValueType(); |
1940 | // FIXME: Pass Global's alignment when globals have alignment |
1941 | AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr, |
1942 | GV->getName(), &FirstI); |
1943 | if (!isa<UndefValue>(GV->getInitializer())) |
1944 | new StoreInst(GV->getInitializer(), Alloca, &FirstI); |
1945 | |
1946 | makeAllConstantUsesInstructions(GV); |
1947 | |
1948 | GV->replaceAllUsesWith(Alloca); |
1949 | GV->eraseFromParent(); |
1950 | ++NumLocalized; |
1951 | return true; |
1952 | } |
1953 | |
1954 | // If the global is never loaded (but may be stored to), it is dead. |
1955 | // Delete it now. |
1956 | if (!GS.IsLoaded) { |
1957 | DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"; } } while (false); |
1958 | |
1959 | bool Changed; |
1960 | if (isLeakCheckerRoot(GV)) { |
1961 | // Delete any constant stores to the global. |
1962 | Changed = CleanupPointerRootUsers(GV, TLI); |
1963 | } else { |
1964 | // Delete any stores we can find to the global. We may not be able to |
1965 | // make it completely dead though. |
1966 | Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); |
1967 | } |
1968 | |
1969 | // If the global is dead now, delete it. |
1970 | if (GV->use_empty()) { |
1971 | GV->eraseFromParent(); |
1972 | ++NumDeleted; |
1973 | Changed = true; |
1974 | } |
1975 | return Changed; |
1976 | |
1977 | } |
1978 | if (GS.StoredType <= GlobalStatus::InitializerStored) { |
1979 | DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "MARKING CONSTANT: " << *GV << "\n"; } } while (false); |
1980 | GV->setConstant(true); |
1981 | |
1982 | // Clean up any obviously simplifiable users now. |
1983 | CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); |
1984 | |
1985 | // If the global is dead now, just nuke it. |
1986 | if (GV->use_empty()) { |
1987 | DEBUG(dbgs() << " *** Marking constant allowed us to simplify "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** Marking constant allowed us to simplify " << "all users and delete global!\n"; } } while (false) |
1988 | << "all users and delete global!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** Marking constant allowed us to simplify " << "all users and delete global!\n"; } } while (false); |
1989 | GV->eraseFromParent(); |
1990 | ++NumDeleted; |
1991 | return true; |
1992 | } |
1993 | |
1994 | // Fall through to the next check; see if we can optimize further. |
1995 | ++NumMarked; |
1996 | } |
1997 | if (!GV->getInitializer()->getType()->isSingleValueType()) { |
1998 | const DataLayout &DL = GV->getParent()->getDataLayout(); |
1999 | if (SRAGlobal(GV, DL)) |
2000 | return true; |
2001 | } |
2002 | if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) { |
2003 | // If the initial value for the global was an undef value, and if only |
2004 | // one other value was stored into it, we can just change the |
2005 | // initializer to be the stored value, then delete all stores to the |
2006 | // global. This allows us to mark it constant. |
2007 | if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) |
2008 | if (isa<UndefValue>(GV->getInitializer())) { |
2009 | // Change the initial value here. |
2010 | GV->setInitializer(SOVConstant); |
2011 | |
2012 | // Clean up any obviously simplifiable users now. |
2013 | CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); |
2014 | |
2015 | if (GV->use_empty()) { |
2016 | DEBUG(dbgs() << " *** Substituting initializer allowed us to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** Substituting initializer allowed us to " << "simplify all users and delete global!\n"; } } while (false) |
2017 | << "simplify all users and delete global!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << " *** Substituting initializer allowed us to " << "simplify all users and delete global!\n"; } } while (false); |
2018 | GV->eraseFromParent(); |
2019 | ++NumDeleted; |
2020 | } |
2021 | ++NumSubstitute; |
2022 | return true; |
2023 | } |
2024 | |
2025 | // Try to optimize globals based on the knowledge that only one value |
2026 | // (besides its initializer) is ever stored to the global. |
2027 | if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI)) |
2028 | return true; |
2029 | |
2030 | // Otherwise, if the global was not a boolean, we can shrink it to be a |
2031 | // boolean. |
2032 | if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { |
2033 | if (GS.Ordering == AtomicOrdering::NotAtomic) { |
2034 | if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { |
2035 | ++NumShrunkToBool; |
2036 | return true; |
2037 | } |
2038 | } |
2039 | } |
2040 | } |
2041 | |
2042 | return false; |
2043 | } |
2044 | |
2045 | /// Analyze the specified global variable and optimize it if possible. If we |
2046 | /// make a change, return true. |
2047 | static bool |
2048 | processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI, |
2049 | function_ref<DominatorTree &(Function &)> LookupDomTree) { |
2050 | if (GV.getName().startswith("llvm.")) |
2051 | return false; |
2052 | |
2053 | GlobalStatus GS; |
2054 | |
2055 | if (GlobalStatus::analyzeGlobal(&GV, GS)) |
2056 | return false; |
2057 | |
2058 | bool Changed = false; |
2059 | if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) { |
2060 | auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global |
2061 | : GlobalValue::UnnamedAddr::Local; |
2062 | if (NewUnnamedAddr != GV.getUnnamedAddr()) { |
2063 | GV.setUnnamedAddr(NewUnnamedAddr); |
2064 | NumUnnamed++; |
2065 | Changed = true; |
2066 | } |
2067 | } |
2068 | |
2069 | // Do more involved optimizations if the global is internal. |
2070 | if (!GV.hasLocalLinkage()) |
2071 | return Changed; |
2072 | |
2073 | auto *GVar = dyn_cast<GlobalVariable>(&GV); |
2074 | if (!GVar) |
2075 | return Changed; |
2076 | |
2077 | if (GVar->isConstant() || !GVar->hasInitializer()) |
2078 | return Changed; |
2079 | |
2080 | return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed; |
2081 | } |
2082 | |
2083 | /// Walk all of the direct calls of the specified function, changing them to |
2084 | /// FastCC. |
2085 | static void ChangeCalleesToFastCall(Function *F) { |
2086 | for (User *U : F->users()) { |
2087 | if (isa<BlockAddress>(U)) |
2088 | continue; |
2089 | CallSite CS(cast<Instruction>(U)); |
2090 | CS.setCallingConv(CallingConv::Fast); |
2091 | } |
2092 | } |
2093 | |
2094 | static AttributeList StripNest(LLVMContext &C, AttributeList Attrs) { |
2095 | // There can be at most one attribute set with a nest attribute. |
2096 | unsigned NestIndex; |
2097 | if (Attrs.hasAttrSomewhere(Attribute::Nest, &NestIndex)) |
2098 | return Attrs.removeAttribute(C, NestIndex, Attribute::Nest); |
2099 | return Attrs; |
2100 | } |
2101 | |
2102 | static void RemoveNestAttribute(Function *F) { |
2103 | F->setAttributes(StripNest(F->getContext(), F->getAttributes())); |
2104 | for (User *U : F->users()) { |
2105 | if (isa<BlockAddress>(U)) |
2106 | continue; |
2107 | CallSite CS(cast<Instruction>(U)); |
2108 | CS.setAttributes(StripNest(F->getContext(), CS.getAttributes())); |
2109 | } |
2110 | } |
2111 | |
2112 | /// Return true if this is a calling convention that we'd like to change. The |
2113 | /// idea here is that we don't want to mess with the convention if the user |
2114 | /// explicitly requested something with performance implications like coldcc, |
2115 | /// GHC, or anyregcc. |
2116 | static bool hasChangeableCC(Function *F) { |
2117 | CallingConv::ID CC = F->getCallingConv(); |
2118 | |
2119 | // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? |
2120 | if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall) |
2121 | return false; |
2122 | |
2123 | // FIXME: Change CC for the whole chain of musttail calls when possible. |
2124 | // |
2125 | // Can't change CC of the function that either has musttail calls, or is a |
2126 | // musttail callee itself |
2127 | for (User *U : F->users()) { |
2128 | if (isa<BlockAddress>(U)) |
2129 | continue; |
2130 | CallInst* CI = dyn_cast<CallInst>(U); |
2131 | if (!CI) |
2132 | continue; |
2133 | |
2134 | if (CI->isMustTailCall()) |
2135 | return false; |
2136 | } |
2137 | |
2138 | for (BasicBlock &BB : *F) |
2139 | if (BB.getTerminatingMustTailCall()) |
2140 | return false; |
2141 | |
2142 | return true; |
2143 | } |
2144 | |
2145 | /// Return true if the block containing the call site has a BlockFrequency of |
2146 | /// less than ColdCCRelFreq% of the entry block. |
2147 | static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) { |
2148 | const BranchProbability ColdProb(ColdCCRelFreq, 100); |
2149 | auto CallSiteBB = CS.getInstruction()->getParent(); |
2150 | auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); |
2151 | auto CallerEntryFreq = |
2152 | CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock())); |
2153 | return CallSiteFreq < CallerEntryFreq * ColdProb; |
2154 | } |
2155 | |
2156 | // This function checks if the input function F is cold at all call sites. It |
2157 | // also looks each call site's containing function, returning false if the |
2158 | // caller function contains other non cold calls. The input vector AllCallsCold |
2159 | // contains a list of functions that only have call sites in cold blocks. |
2160 | static bool |
2161 | isValidCandidateForColdCC(Function &F, |
2162 | function_ref<BlockFrequencyInfo &(Function &)> GetBFI, |
2163 | const std::vector<Function *> &AllCallsCold) { |
2164 | |
2165 | if (F.user_empty()) |
2166 | return false; |
2167 | |
2168 | for (User *U : F.users()) { |
2169 | if (isa<BlockAddress>(U)) |
2170 | continue; |
2171 | |
2172 | CallSite CS(cast<Instruction>(U)); |
2173 | Function *CallerFunc = CS.getInstruction()->getParent()->getParent(); |
2174 | BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); |
2175 | if (!isColdCallSite(CS, CallerBFI)) |
2176 | return false; |
2177 | auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc); |
2178 | if (It == AllCallsCold.end()) |
2179 | return false; |
2180 | } |
2181 | return true; |
2182 | } |
2183 | |
2184 | static void changeCallSitesToColdCC(Function *F) { |
2185 | for (User *U : F->users()) { |
2186 | if (isa<BlockAddress>(U)) |
2187 | continue; |
2188 | CallSite CS(cast<Instruction>(U)); |
2189 | CS.setCallingConv(CallingConv::Cold); |
2190 | } |
2191 | } |
2192 | |
2193 | // This function iterates over all the call instructions in the input Function |
2194 | // and checks that all call sites are in cold blocks and are allowed to use the |
2195 | // coldcc calling convention. |
2196 | static bool |
2197 | hasOnlyColdCalls(Function &F, |
2198 | function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { |
2199 | for (BasicBlock &BB : F) { |
2200 | for (Instruction &I : BB) { |
2201 | if (CallInst *CI = dyn_cast<CallInst>(&I)) { |
2202 | CallSite CS(cast<Instruction>(CI)); |
2203 | // Skip over isline asm instructions since they aren't function calls. |
2204 | if (CI->isInlineAsm()) |
2205 | continue; |
2206 | Function *CalledFn = CI->getCalledFunction(); |
2207 | if (!CalledFn) |
2208 | return false; |
2209 | if (!CalledFn->hasLocalLinkage()) |
2210 | return false; |
2211 | // Skip over instrinsics since they won't remain as function calls. |
2212 | if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) |
2213 | continue; |
2214 | // Check if it's valid to use coldcc calling convention. |
2215 | if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || |
2216 | CalledFn->hasAddressTaken()) |
2217 | return false; |
2218 | BlockFrequencyInfo &CallerBFI = GetBFI(F); |
2219 | if (!isColdCallSite(CS, CallerBFI)) |
2220 | return false; |
2221 | } |
2222 | } |
2223 | } |
2224 | return true; |
2225 | } |
2226 | |
2227 | static bool |
2228 | OptimizeFunctions(Module &M, TargetLibraryInfo *TLI, |
2229 | function_ref<TargetTransformInfo &(Function &)> GetTTI, |
2230 | function_ref<BlockFrequencyInfo &(Function &)> GetBFI, |
2231 | function_ref<DominatorTree &(Function &)> LookupDomTree, |
2232 | SmallSet<const Comdat *, 8> &NotDiscardableComdats) { |
2233 | |
2234 | bool Changed = false; |
2235 | |
2236 | std::vector<Function *> AllCallsCold; |
2237 | for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) { |
2238 | Function *F = &*FI++; |
2239 | if (hasOnlyColdCalls(*F, GetBFI)) |
2240 | AllCallsCold.push_back(F); |
2241 | } |
2242 | |
2243 | // Optimize functions. |
2244 | for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { |
2245 | Function *F = &*FI++; |
2246 | |
2247 | // Don't perform global opt pass on naked functions; we don't want fast |
2248 | // calling conventions for naked functions. |
2249 | if (F->hasFnAttribute(Attribute::Naked)) |
2250 | continue; |
2251 | |
2252 | // Functions without names cannot be referenced outside this module. |
2253 | if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) |
2254 | F->setLinkage(GlobalValue::InternalLinkage); |
2255 | |
2256 | if (deleteIfDead(*F, NotDiscardableComdats)) { |
2257 | Changed = true; |
2258 | continue; |
2259 | } |
2260 | |
2261 | // LLVM's definition of dominance allows instructions that are cyclic |
2262 | // in unreachable blocks, e.g.: |
2263 | // %pat = select i1 %condition, @global, i16* %pat |
2264 | // because any instruction dominates an instruction in a block that's |
2265 | // not reachable from entry. |
2266 | // So, remove unreachable blocks from the function, because a) there's |
2267 | // no point in analyzing them and b) GlobalOpt should otherwise grow |
2268 | // some more complicated logic to break these cycles. |
2269 | // Removing unreachable blocks might invalidate the dominator so we |
2270 | // recalculate it. |
2271 | if (!F->isDeclaration()) { |
2272 | if (removeUnreachableBlocks(*F)) { |
2273 | auto &DT = LookupDomTree(*F); |
2274 | DT.recalculate(*F); |
2275 | Changed = true; |
2276 | } |
2277 | } |
2278 | |
2279 | Changed |= processGlobal(*F, TLI, LookupDomTree); |
2280 | |
2281 | if (!F->hasLocalLinkage()) |
2282 | continue; |
2283 | |
2284 | if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { |
2285 | NumInternalFunc++; |
2286 | TargetTransformInfo &TTI = GetTTI(*F); |
2287 | // Change the calling convention to coldcc if either stress testing is |
2288 | // enabled or the target would like to use coldcc on functions which are |
2289 | // cold at all call sites and the callers contain no other non coldcc |
2290 | // calls. |
2291 | if (EnableColdCCStressTest || |
2292 | (isValidCandidateForColdCC(*F, GetBFI, AllCallsCold) && |
2293 | TTI.useColdCCForColdCall(*F))) { |
2294 | F->setCallingConv(CallingConv::Cold); |
2295 | changeCallSitesToColdCC(F); |
2296 | Changed = true; |
2297 | NumColdCC++; |
2298 | } |
2299 | } |
2300 | |
2301 | if (hasChangeableCC(F) && !F->isVarArg() && |
2302 | !F->hasAddressTaken()) { |
2303 | // If this function has a calling convention worth changing, is not a |
2304 | // varargs function, and is only called directly, promote it to use the |
2305 | // Fast calling convention. |
2306 | F->setCallingConv(CallingConv::Fast); |
2307 | ChangeCalleesToFastCall(F); |
2308 | ++NumFastCallFns; |
2309 | Changed = true; |
2310 | } |
2311 | |
2312 | if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && |
2313 | !F->hasAddressTaken()) { |
2314 | // The function is not used by a trampoline intrinsic, so it is safe |
2315 | // to remove the 'nest' attribute. |
2316 | RemoveNestAttribute(F); |
2317 | ++NumNestRemoved; |
2318 | Changed = true; |
2319 | } |
2320 | } |
2321 | return Changed; |
2322 | } |
2323 | |
2324 | static bool |
2325 | OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI, |
2326 | function_ref<DominatorTree &(Function &)> LookupDomTree, |
2327 | SmallSet<const Comdat *, 8> &NotDiscardableComdats) { |
2328 | bool Changed = false; |
2329 | |
2330 | for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); |
2331 | GVI != E; ) { |
2332 | GlobalVariable *GV = &*GVI++; |
2333 | // Global variables without names cannot be referenced outside this module. |
2334 | if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) |
2335 | GV->setLinkage(GlobalValue::InternalLinkage); |
2336 | // Simplify the initializer. |
2337 | if (GV->hasInitializer()) |
2338 | if (auto *C = dyn_cast<Constant>(GV->getInitializer())) { |
2339 | auto &DL = M.getDataLayout(); |
2340 | Constant *New = ConstantFoldConstant(C, DL, TLI); |
2341 | if (New && New != C) |
2342 | GV->setInitializer(New); |
2343 | } |
2344 | |
2345 | if (deleteIfDead(*GV, NotDiscardableComdats)) { |
2346 | Changed = true; |
2347 | continue; |
2348 | } |
2349 | |
2350 | Changed |= processGlobal(*GV, TLI, LookupDomTree); |
2351 | } |
2352 | return Changed; |
2353 | } |
2354 | |
2355 | /// Evaluate a piece of a constantexpr store into a global initializer. This |
2356 | /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the |
2357 | /// GEP operands of Addr [0, OpNo) have been stepped into. |
2358 | static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, |
2359 | ConstantExpr *Addr, unsigned OpNo) { |
2360 | // Base case of the recursion. |
2361 | if (OpNo == Addr->getNumOperands()) { |
2362 | assert(Val->getType() == Init->getType() && "Type mismatch!")(static_cast <bool> (Val->getType() == Init->getType () && "Type mismatch!") ? void (0) : __assert_fail ("Val->getType() == Init->getType() && \"Type mismatch!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2362, __extension__ __PRETTY_FUNCTION__)); |
2363 | return Val; |
2364 | } |
2365 | |
2366 | SmallVector<Constant*, 32> Elts; |
2367 | if (StructType *STy = dyn_cast<StructType>(Init->getType())) { |
2368 | // Break up the constant into its elements. |
2369 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) |
2370 | Elts.push_back(Init->getAggregateElement(i)); |
2371 | |
2372 | // Replace the element that we are supposed to. |
2373 | ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); |
2374 | unsigned Idx = CU->getZExtValue(); |
2375 | assert(Idx < STy->getNumElements() && "Struct index out of range!")(static_cast <bool> (Idx < STy->getNumElements() && "Struct index out of range!") ? void (0) : __assert_fail ("Idx < STy->getNumElements() && \"Struct index out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2375, __extension__ __PRETTY_FUNCTION__)); |
2376 | Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); |
2377 | |
2378 | // Return the modified struct. |
2379 | return ConstantStruct::get(STy, Elts); |
2380 | } |
2381 | |
2382 | ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); |
2383 | SequentialType *InitTy = cast<SequentialType>(Init->getType()); |
2384 | uint64_t NumElts = InitTy->getNumElements(); |
2385 | |
2386 | // Break up the array into elements. |
2387 | for (uint64_t i = 0, e = NumElts; i != e; ++i) |
2388 | Elts.push_back(Init->getAggregateElement(i)); |
2389 | |
2390 | assert(CI->getZExtValue() < NumElts)(static_cast <bool> (CI->getZExtValue() < NumElts ) ? void (0) : __assert_fail ("CI->getZExtValue() < NumElts" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2390, __extension__ __PRETTY_FUNCTION__)); |
2391 | Elts[CI->getZExtValue()] = |
2392 | EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); |
2393 | |
2394 | if (Init->getType()->isArrayTy()) |
2395 | return ConstantArray::get(cast<ArrayType>(InitTy), Elts); |
2396 | return ConstantVector::get(Elts); |
2397 | } |
2398 | |
2399 | /// We have decided that Addr (which satisfies the predicate |
2400 | /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. |
2401 | static void CommitValueTo(Constant *Val, Constant *Addr) { |
2402 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { |
2403 | assert(GV->hasInitializer())(static_cast <bool> (GV->hasInitializer()) ? void (0 ) : __assert_fail ("GV->hasInitializer()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2403, __extension__ __PRETTY_FUNCTION__)); |
2404 | GV->setInitializer(Val); |
2405 | return; |
2406 | } |
2407 | |
2408 | ConstantExpr *CE = cast<ConstantExpr>(Addr); |
2409 | GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); |
2410 | GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); |
2411 | } |
2412 | |
2413 | /// Given a map of address -> value, where addresses are expected to be some form |
2414 | /// of either a global or a constant GEP, set the initializer for the address to |
2415 | /// be the value. This performs mostly the same function as CommitValueTo() |
2416 | /// and EvaluateStoreInto() but is optimized to be more efficient for the common |
2417 | /// case where the set of addresses are GEPs sharing the same underlying global, |
2418 | /// processing the GEPs in batches rather than individually. |
2419 | /// |
2420 | /// To give an example, consider the following C++ code adapted from the clang |
2421 | /// regression tests: |
2422 | /// struct S { |
2423 | /// int n = 10; |
2424 | /// int m = 2 * n; |
2425 | /// S(int a) : n(a) {} |
2426 | /// }; |
2427 | /// |
2428 | /// template<typename T> |
2429 | /// struct U { |
2430 | /// T *r = &q; |
2431 | /// T q = 42; |
2432 | /// U *p = this; |
2433 | /// }; |
2434 | /// |
2435 | /// U<S> e; |
2436 | /// |
2437 | /// The global static constructor for 'e' will need to initialize 'r' and 'p' of |
2438 | /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm' |
2439 | /// members. This batch algorithm will simply use general CommitValueTo() method |
2440 | /// to handle the complex nested S struct initialization of 'q', before |
2441 | /// processing the outermost members in a single batch. Using CommitValueTo() to |
2442 | /// handle member in the outer struct is inefficient when the struct/array is |
2443 | /// very large as we end up creating and destroy constant arrays for each |
2444 | /// initialization. |
2445 | /// For the above case, we expect the following IR to be generated: |
2446 | /// |
2447 | /// %struct.U = type { %struct.S*, %struct.S, %struct.U* } |
2448 | /// %struct.S = type { i32, i32 } |
2449 | /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e, |
2450 | /// i64 0, i32 1), |
2451 | /// %struct.S { i32 42, i32 84 }, %struct.U* @e } |
2452 | /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex |
2453 | /// constant expression, while the other two elements of @e are "simple". |
2454 | static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) { |
2455 | SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs; |
2456 | SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs; |
2457 | SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs; |
2458 | SimpleCEs.reserve(Mem.size()); |
2459 | |
2460 | for (const auto &I : Mem) { |
2461 | if (auto *GV = dyn_cast<GlobalVariable>(I.first)) { |
2462 | GVs.push_back(std::make_pair(GV, I.second)); |
2463 | } else { |
2464 | ConstantExpr *GEP = cast<ConstantExpr>(I.first); |
2465 | // We don't handle the deeply recursive case using the batch method. |
2466 | if (GEP->getNumOperands() > 3) |
2467 | ComplexCEs.push_back(std::make_pair(GEP, I.second)); |
2468 | else |
2469 | SimpleCEs.push_back(std::make_pair(GEP, I.second)); |
2470 | } |
2471 | } |
2472 | |
2473 | // The algorithm below doesn't handle cases like nested structs, so use the |
2474 | // slower fully general method if we have to. |
2475 | for (auto ComplexCE : ComplexCEs) |
2476 | CommitValueTo(ComplexCE.second, ComplexCE.first); |
2477 | |
2478 | for (auto GVPair : GVs) { |
2479 | assert(GVPair.first->hasInitializer())(static_cast <bool> (GVPair.first->hasInitializer()) ? void (0) : __assert_fail ("GVPair.first->hasInitializer()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2479, __extension__ __PRETTY_FUNCTION__)); |
2480 | GVPair.first->setInitializer(GVPair.second); |
2481 | } |
2482 | |
2483 | if (SimpleCEs.empty()) |
2484 | return; |
2485 | |
2486 | // We cache a single global's initializer elements in the case where the |
2487 | // subsequent address/val pair uses the same one. This avoids throwing away and |
2488 | // rebuilding the constant struct/vector/array just because one element is |
2489 | // modified at a time. |
2490 | SmallVector<Constant *, 32> Elts; |
2491 | Elts.reserve(SimpleCEs.size()); |
2492 | GlobalVariable *CurrentGV = nullptr; |
2493 | |
2494 | auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) { |
2495 | Constant *Init = GV->getInitializer(); |
2496 | Type *Ty = Init->getType(); |
2497 | if (Update) { |
2498 | if (CurrentGV) { |
2499 | assert(CurrentGV && "Expected a GV to commit to!")(static_cast <bool> (CurrentGV && "Expected a GV to commit to!" ) ? void (0) : __assert_fail ("CurrentGV && \"Expected a GV to commit to!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2499, __extension__ __PRETTY_FUNCTION__)); |
2500 | Type *CurrentInitTy = CurrentGV->getInitializer()->getType(); |
2501 | // We have a valid cache that needs to be committed. |
2502 | if (StructType *STy = dyn_cast<StructType>(CurrentInitTy)) |
2503 | CurrentGV->setInitializer(ConstantStruct::get(STy, Elts)); |
2504 | else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy)) |
2505 | CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts)); |
2506 | else |
2507 | CurrentGV->setInitializer(ConstantVector::get(Elts)); |
2508 | } |
2509 | if (CurrentGV == GV) |
2510 | return; |
2511 | // Need to clear and set up cache for new initializer. |
2512 | CurrentGV = GV; |
2513 | Elts.clear(); |
2514 | unsigned NumElts; |
2515 | if (auto *STy = dyn_cast<StructType>(Ty)) |
2516 | NumElts = STy->getNumElements(); |
2517 | else |
2518 | NumElts = cast<SequentialType>(Ty)->getNumElements(); |
2519 | for (unsigned i = 0, e = NumElts; i != e; ++i) |
2520 | Elts.push_back(Init->getAggregateElement(i)); |
2521 | } |
2522 | }; |
2523 | |
2524 | for (auto CEPair : SimpleCEs) { |
2525 | ConstantExpr *GEP = CEPair.first; |
2526 | Constant *Val = CEPair.second; |
2527 | |
2528 | GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0)); |
2529 | commitAndSetupCache(GV, GV != CurrentGV); |
2530 | ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2)); |
2531 | Elts[CI->getZExtValue()] = Val; |
2532 | } |
2533 | // The last initializer in the list needs to be committed, others |
2534 | // will be committed on a new initializer being processed. |
2535 | commitAndSetupCache(CurrentGV, true); |
2536 | } |
2537 | |
2538 | /// Evaluate static constructors in the function, if we can. Return true if we |
2539 | /// can, false otherwise. |
2540 | static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, |
2541 | TargetLibraryInfo *TLI) { |
2542 | // Call the function. |
2543 | Evaluator Eval(DL, TLI); |
2544 | Constant *RetValDummy; |
2545 | bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, |
2546 | SmallVector<Constant*, 0>()); |
2547 | |
2548 | if (EvalSuccess) { |
2549 | ++NumCtorsEvaluated; |
2550 | |
2551 | // We succeeded at evaluation: commit the result. |
2552 | DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" << F->getName() << "' to " << Eval.getMutatedMemory ().size() << " stores.\n"; } } while (false) |
2553 | << F->getName() << "' to " << Eval.getMutatedMemory().size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" << F->getName() << "' to " << Eval.getMutatedMemory ().size() << " stores.\n"; } } while (false) |
2554 | << " stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" << F->getName() << "' to " << Eval.getMutatedMemory ().size() << " stores.\n"; } } while (false); |
2555 | BatchCommitValueTo(Eval.getMutatedMemory()); |
2556 | for (GlobalVariable *GV : Eval.getInvariants()) |
2557 | GV->setConstant(true); |
2558 | } |
2559 | |
2560 | return EvalSuccess; |
2561 | } |
2562 | |
2563 | static int compareNames(Constant *const *A, Constant *const *B) { |
2564 | Value *AStripped = (*A)->stripPointerCastsNoFollowAliases(); |
2565 | Value *BStripped = (*B)->stripPointerCastsNoFollowAliases(); |
2566 | return AStripped->getName().compare(BStripped->getName()); |
2567 | } |
2568 | |
2569 | static void setUsedInitializer(GlobalVariable &V, |
2570 | const SmallPtrSet<GlobalValue *, 8> &Init) { |
2571 | if (Init.empty()) { |
2572 | V.eraseFromParent(); |
2573 | return; |
2574 | } |
2575 | |
2576 | // Type of pointer to the array of pointers. |
2577 | PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); |
2578 | |
2579 | SmallVector<Constant *, 8> UsedArray; |
2580 | for (GlobalValue *GV : Init) { |
2581 | Constant *Cast |
2582 | = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); |
2583 | UsedArray.push_back(Cast); |
2584 | } |
2585 | // Sort to get deterministic order. |
2586 | array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); |
2587 | ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); |
2588 | |
2589 | Module *M = V.getParent(); |
2590 | V.removeFromParent(); |
2591 | GlobalVariable *NV = |
2592 | new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, |
2593 | ConstantArray::get(ATy, UsedArray), ""); |
2594 | NV->takeName(&V); |
2595 | NV->setSection("llvm.metadata"); |
2596 | delete &V; |
2597 | } |
2598 | |
2599 | namespace { |
2600 | |
2601 | /// An easy to access representation of llvm.used and llvm.compiler.used. |
2602 | class LLVMUsed { |
2603 | SmallPtrSet<GlobalValue *, 8> Used; |
2604 | SmallPtrSet<GlobalValue *, 8> CompilerUsed; |
2605 | GlobalVariable *UsedV; |
2606 | GlobalVariable *CompilerUsedV; |
2607 | |
2608 | public: |
2609 | LLVMUsed(Module &M) { |
2610 | UsedV = collectUsedGlobalVariables(M, Used, false); |
2611 | CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); |
2612 | } |
2613 | |
2614 | using iterator = SmallPtrSet<GlobalValue *, 8>::iterator; |
2615 | using used_iterator_range = iterator_range<iterator>; |
2616 | |
2617 | iterator usedBegin() { return Used.begin(); } |
2618 | iterator usedEnd() { return Used.end(); } |
2619 | |
2620 | used_iterator_range used() { |
2621 | return used_iterator_range(usedBegin(), usedEnd()); |
2622 | } |
2623 | |
2624 | iterator compilerUsedBegin() { return CompilerUsed.begin(); } |
2625 | iterator compilerUsedEnd() { return CompilerUsed.end(); } |
2626 | |
2627 | used_iterator_range compilerUsed() { |
2628 | return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); |
2629 | } |
2630 | |
2631 | bool usedCount(GlobalValue *GV) const { return Used.count(GV); } |
2632 | |
2633 | bool compilerUsedCount(GlobalValue *GV) const { |
2634 | return CompilerUsed.count(GV); |
2635 | } |
2636 | |
2637 | bool usedErase(GlobalValue *GV) { return Used.erase(GV); } |
2638 | bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } |
2639 | bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } |
2640 | |
2641 | bool compilerUsedInsert(GlobalValue *GV) { |
2642 | return CompilerUsed.insert(GV).second; |
2643 | } |
2644 | |
2645 | void syncVariablesAndSets() { |
2646 | if (UsedV) |
2647 | setUsedInitializer(*UsedV, Used); |
2648 | if (CompilerUsedV) |
2649 | setUsedInitializer(*CompilerUsedV, CompilerUsed); |
2650 | } |
2651 | }; |
2652 | |
2653 | } // end anonymous namespace |
2654 | |
2655 | static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { |
2656 | if (GA.use_empty()) // No use at all. |
2657 | return false; |
2658 | |
2659 | assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&(static_cast <bool> ((!U.usedCount(&GA) || !U.compilerUsedCount (&GA)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2661, __extension__ __PRETTY_FUNCTION__)) |
2660 | "We should have removed the duplicated "(static_cast <bool> ((!U.usedCount(&GA) || !U.compilerUsedCount (&GA)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2661, __extension__ __PRETTY_FUNCTION__)) |
2661 | "element from llvm.compiler.used")(static_cast <bool> ((!U.usedCount(&GA) || !U.compilerUsedCount (&GA)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2661, __extension__ __PRETTY_FUNCTION__)); |
2662 | if (!GA.hasOneUse()) |
2663 | // Strictly more than one use. So at least one is not in llvm.used and |
2664 | // llvm.compiler.used. |
2665 | return true; |
2666 | |
2667 | // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. |
2668 | return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); |
2669 | } |
2670 | |
2671 | static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, |
2672 | const LLVMUsed &U) { |
2673 | unsigned N = 2; |
2674 | assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&(static_cast <bool> ((!U.usedCount(&V) || !U.compilerUsedCount (&V)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2676, __extension__ __PRETTY_FUNCTION__)) |
2675 | "We should have removed the duplicated "(static_cast <bool> ((!U.usedCount(&V) || !U.compilerUsedCount (&V)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2676, __extension__ __PRETTY_FUNCTION__)) |
2676 | "element from llvm.compiler.used")(static_cast <bool> ((!U.usedCount(&V) || !U.compilerUsedCount (&V)) && "We should have removed the duplicated " "element from llvm.compiler.used") ? void (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/IPO/GlobalOpt.cpp" , 2676, __extension__ __PRETTY_FUNCTION__)); |
2677 | if (U.usedCount(&V) || U.compilerUsedCount(&V)) |
2678 | ++N; |
2679 | return V.hasNUsesOrMore(N); |
2680 | } |
2681 | |
2682 | static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { |
2683 | if (!GA.hasLocalLinkage()) |
2684 | return true; |
2685 | |
2686 | return U.usedCount(&GA) || U.compilerUsedCount(&GA); |
2687 | } |
2688 | |
2689 | static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, |
2690 | bool &RenameTarget) { |
2691 | RenameTarget = false; |
2692 | bool Ret = false; |
2693 | if (hasUseOtherThanLLVMUsed(GA, U)) |
2694 | Ret = true; |
2695 | |
2696 | // If the alias is externally visible, we may still be able to simplify it. |
2697 | if (!mayHaveOtherReferences(GA, U)) |
2698 | return Ret; |
2699 | |
2700 | // If the aliasee has internal linkage, give it the name and linkage |
2701 | // of the alias, and delete the alias. This turns: |
2702 | // define internal ... @f(...) |
2703 | // @a = alias ... @f |
2704 | // into: |
2705 | // define ... @a(...) |
2706 | Constant *Aliasee = GA.getAliasee(); |
2707 | GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); |
2708 | if (!Target->hasLocalLinkage()) |
2709 | return Ret; |
2710 | |
2711 | // Do not perform the transform if multiple aliases potentially target the |
2712 | // aliasee. This check also ensures that it is safe to replace the section |
2713 | // and other attributes of the aliasee with those of the alias. |
2714 | if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) |
2715 | return Ret; |
2716 | |
2717 | RenameTarget = true; |
2718 | return true; |
2719 | } |
2720 | |
2721 | static bool |
2722 | OptimizeGlobalAliases(Module &M, |
2723 | SmallSet<const Comdat *, 8> &NotDiscardableComdats) { |
2724 | bool Changed = false; |
2725 | LLVMUsed Used(M); |
2726 | |
2727 | for (GlobalValue *GV : Used.used()) |
2728 | Used.compilerUsedErase(GV); |
2729 | |
2730 | for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); |
2731 | I != E;) { |
2732 | GlobalAlias *J = &*I++; |
2733 | |
2734 | // Aliases without names cannot be referenced outside this module. |
2735 | if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) |
2736 | J->setLinkage(GlobalValue::InternalLinkage); |
2737 | |
2738 | if (deleteIfDead(*J, NotDiscardableComdats)) { |
2739 | Changed = true; |
2740 | continue; |
2741 | } |
2742 | |
2743 | // If the alias can change at link time, nothing can be done - bail out. |
2744 | if (J->isInterposable()) |
2745 | continue; |
2746 | |
2747 | Constant *Aliasee = J->getAliasee(); |
2748 | GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); |
2749 | // We can't trivially replace the alias with the aliasee if the aliasee is |
2750 | // non-trivial in some way. |
2751 | // TODO: Try to handle non-zero GEPs of local aliasees. |
2752 | if (!Target) |
2753 | continue; |
2754 | Target->removeDeadConstantUsers(); |
2755 | |
2756 | // Make all users of the alias use the aliasee instead. |
2757 | bool RenameTarget; |
2758 | if (!hasUsesToReplace(*J, Used, RenameTarget)) |
2759 | continue; |
2760 | |
2761 | J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); |
2762 | ++NumAliasesResolved; |
2763 | Changed = true; |
2764 | |
2765 | if (RenameTarget) { |
2766 | // Give the aliasee the name, linkage and other attributes of the alias. |
2767 | Target->takeName(&*J); |
2768 | Target->setLinkage(J->getLinkage()); |
2769 | Target->setDSOLocal(J->isDSOLocal()); |
2770 | Target->setVisibility(J->getVisibility()); |
2771 | Target->setDLLStorageClass(J->getDLLStorageClass()); |
2772 | |
2773 | if (Used.usedErase(&*J)) |
2774 | Used.usedInsert(Target); |
2775 | |
2776 | if (Used.compilerUsedErase(&*J)) |
2777 | Used.compilerUsedInsert(Target); |
2778 | } else if (mayHaveOtherReferences(*J, Used)) |
2779 | continue; |
2780 | |
2781 | // Delete the alias. |
2782 | M.getAliasList().erase(J); |
2783 | ++NumAliasesRemoved; |
2784 | Changed = true; |
2785 | } |
2786 | |
2787 | Used.syncVariablesAndSets(); |
2788 | |
2789 | return Changed; |
2790 | } |
2791 | |
2792 | static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { |
2793 | LibFunc F = LibFunc_cxa_atexit; |
2794 | if (!TLI->has(F)) |
2795 | return nullptr; |
2796 | |
2797 | Function *Fn = M.getFunction(TLI->getName(F)); |
2798 | if (!Fn) |
2799 | return nullptr; |
2800 | |
2801 | // Make sure that the function has the correct prototype. |
2802 | if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit) |
2803 | return nullptr; |
2804 | |
2805 | return Fn; |
2806 | } |
2807 | |
2808 | /// Returns whether the given function is an empty C++ destructor and can |
2809 | /// therefore be eliminated. |
2810 | /// Note that we assume that other optimization passes have already simplified |
2811 | /// the code so we only look for a function with a single basic block, where |
2812 | /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and |
2813 | /// other side-effect free instructions. |
2814 | static bool cxxDtorIsEmpty(const Function &Fn, |
2815 | SmallPtrSet<const Function *, 8> &CalledFunctions) { |
2816 | // FIXME: We could eliminate C++ destructors if they're readonly/readnone and |
2817 | // nounwind, but that doesn't seem worth doing. |
2818 | if (Fn.isDeclaration()) |
2819 | return false; |
2820 | |
2821 | if (++Fn.begin() != Fn.end()) |
2822 | return false; |
2823 | |
2824 | const BasicBlock &EntryBlock = Fn.getEntryBlock(); |
2825 | for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); |
2826 | I != E; ++I) { |
2827 | if (const CallInst *CI = dyn_cast<CallInst>(I)) { |
2828 | // Ignore debug intrinsics. |
2829 | if (isa<DbgInfoIntrinsic>(CI)) |
2830 | continue; |
2831 | |
2832 | const Function *CalledFn = CI->getCalledFunction(); |
2833 | |
2834 | if (!CalledFn) |
2835 | return false; |
2836 | |
2837 | SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); |
2838 | |
2839 | // Don't treat recursive functions as empty. |
2840 | if (!NewCalledFunctions.insert(CalledFn).second) |
2841 | return false; |
2842 | |
2843 | if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) |
2844 | return false; |
2845 | } else if (isa<ReturnInst>(*I)) |
2846 | return true; // We're done. |
2847 | else if (I->mayHaveSideEffects()) |
2848 | return false; // Destructor with side effects, bail. |
2849 | } |
2850 | |
2851 | return false; |
2852 | } |
2853 | |
2854 | static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { |
2855 | /// Itanium C++ ABI p3.3.5: |
2856 | /// |
2857 | /// After constructing a global (or local static) object, that will require |
2858 | /// destruction on exit, a termination function is registered as follows: |
2859 | /// |
2860 | /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); |
2861 | /// |
2862 | /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the |
2863 | /// call f(p) when DSO d is unloaded, before all such termination calls |
2864 | /// registered before this one. It returns zero if registration is |
2865 | /// successful, nonzero on failure. |
2866 | |
2867 | // This pass will look for calls to __cxa_atexit where the function is trivial |
2868 | // and remove them. |
2869 | bool Changed = false; |
2870 | |
2871 | for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); |
2872 | I != E;) { |
2873 | // We're only interested in calls. Theoretically, we could handle invoke |
2874 | // instructions as well, but neither llvm-gcc nor clang generate invokes |
2875 | // to __cxa_atexit. |
2876 | CallInst *CI = dyn_cast<CallInst>(*I++); |
2877 | if (!CI) |
2878 | continue; |
2879 | |
2880 | Function *DtorFn = |
2881 | dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); |
2882 | if (!DtorFn) |
2883 | continue; |
2884 | |
2885 | SmallPtrSet<const Function *, 8> CalledFunctions; |
2886 | if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) |
2887 | continue; |
2888 | |
2889 | // Just remove the call. |
2890 | CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); |
2891 | CI->eraseFromParent(); |
2892 | |
2893 | ++NumCXXDtorsRemoved; |
2894 | |
2895 | Changed |= true; |
2896 | } |
2897 | |
2898 | return Changed; |
2899 | } |
2900 | |
2901 | static bool optimizeGlobalsInModule( |
2902 | Module &M, const DataLayout &DL, TargetLibraryInfo *TLI, |
2903 | function_ref<TargetTransformInfo &(Function &)> GetTTI, |
2904 | function_ref<BlockFrequencyInfo &(Function &)> GetBFI, |
2905 | function_ref<DominatorTree &(Function &)> LookupDomTree) { |
2906 | SmallSet<const Comdat *, 8> NotDiscardableComdats; |
2907 | bool Changed = false; |
2908 | bool LocalChange = true; |
2909 | while (LocalChange) { |
2910 | LocalChange = false; |
2911 | |
2912 | NotDiscardableComdats.clear(); |
2913 | for (const GlobalVariable &GV : M.globals()) |
2914 | if (const Comdat *C = GV.getComdat()) |
2915 | if (!GV.isDiscardableIfUnused() || !GV.use_empty()) |
2916 | NotDiscardableComdats.insert(C); |
2917 | for (Function &F : M) |
2918 | if (const Comdat *C = F.getComdat()) |
2919 | if (!F.isDefTriviallyDead()) |
2920 | NotDiscardableComdats.insert(C); |
2921 | for (GlobalAlias &GA : M.aliases()) |
2922 | if (const Comdat *C = GA.getComdat()) |
2923 | if (!GA.isDiscardableIfUnused() || !GA.use_empty()) |
2924 | NotDiscardableComdats.insert(C); |
2925 | |
2926 | // Delete functions that are trivially dead, ccc -> fastcc |
2927 | LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree, |
2928 | NotDiscardableComdats); |
2929 | |
2930 | // Optimize global_ctors list. |
2931 | LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { |
2932 | return EvaluateStaticConstructor(F, DL, TLI); |
2933 | }); |
2934 | |
2935 | // Optimize non-address-taken globals. |
2936 | LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree, |
2937 | NotDiscardableComdats); |
2938 | |
2939 | // Resolve aliases, when possible. |
2940 | LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats); |
2941 | |
2942 | // Try to remove trivial global destructors if they are not removed |
2943 | // already. |
2944 | Function *CXAAtExitFn = FindCXAAtExit(M, TLI); |
2945 | if (CXAAtExitFn) |
2946 | LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); |
2947 | |
2948 | Changed |= LocalChange; |
2949 | } |
2950 | |
2951 | // TODO: Move all global ctors functions to the end of the module for code |
2952 | // layout. |
2953 | |
2954 | return Changed; |
2955 | } |
2956 | |
2957 | PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { |
2958 | auto &DL = M.getDataLayout(); |
2959 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(M); |
2960 | auto &FAM = |
2961 | AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); |
2962 | auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ |
2963 | return FAM.getResult<DominatorTreeAnalysis>(F); |
2964 | }; |
2965 | auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { |
2966 | return FAM.getResult<TargetIRAnalysis>(F); |
2967 | }; |
2968 | |
2969 | auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { |
2970 | return FAM.getResult<BlockFrequencyAnalysis>(F); |
2971 | }; |
2972 | |
2973 | if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree)) |
2974 | return PreservedAnalyses::all(); |
2975 | return PreservedAnalyses::none(); |
2976 | } |
2977 | |
2978 | namespace { |
2979 | |
2980 | struct GlobalOptLegacyPass : public ModulePass { |
2981 | static char ID; // Pass identification, replacement for typeid |
2982 | |
2983 | GlobalOptLegacyPass() : ModulePass(ID) { |
2984 | initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry()); |
2985 | } |
2986 | |
2987 | bool runOnModule(Module &M) override { |
2988 | if (skipModule(M)) |
2989 | return false; |
2990 | |
2991 | auto &DL = M.getDataLayout(); |
2992 | auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); |
2993 | auto LookupDomTree = [this](Function &F) -> DominatorTree & { |
2994 | return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); |
2995 | }; |
2996 | auto GetTTI = [this](Function &F) -> TargetTransformInfo & { |
2997 | return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); |
2998 | }; |
2999 | |
3000 | auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { |
3001 | return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); |
3002 | }; |
3003 | |
3004 | return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree); |
3005 | } |
3006 | |
3007 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
3008 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
3009 | AU.addRequired<TargetTransformInfoWrapperPass>(); |
3010 | AU.addRequired<DominatorTreeWrapperPass>(); |
3011 | AU.addRequired<BlockFrequencyInfoWrapperPass>(); |
3012 | } |
3013 | }; |
3014 | |
3015 | } // end anonymous namespace |
3016 | |
3017 | char GlobalOptLegacyPass::ID = 0; |
3018 | |
3019 | INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",static void *initializeGlobalOptLegacyPassPassOnce(PassRegistry &Registry) { |
3020 | "Global Variable Optimizer", false, false)static void *initializeGlobalOptLegacyPassPassOnce(PassRegistry &Registry) { |
3021 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); |
3022 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); |
3023 | INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry); |
3024 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); |
3025 | INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",PassInfo *PI = new PassInfo( "Global Variable Optimizer", "globalopt" , &GlobalOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <GlobalOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeGlobalOptLegacyPassPassFlag ; void llvm::initializeGlobalOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeGlobalOptLegacyPassPassFlag , initializeGlobalOptLegacyPassPassOnce, std::ref(Registry)); } |
3026 | "Global Variable Optimizer", false, false)PassInfo *PI = new PassInfo( "Global Variable Optimizer", "globalopt" , &GlobalOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <GlobalOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeGlobalOptLegacyPassPassFlag ; void llvm::initializeGlobalOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeGlobalOptLegacyPassPassFlag , initializeGlobalOptLegacyPassPassOnce, std::ref(Registry)); } |
3027 | |
3028 | ModulePass *llvm::createGlobalOptimizerPass() { |
3029 | return new GlobalOptLegacyPass(); |
3030 | } |