Bug Summary

File:llvm/lib/Transforms/IPO/GlobalOpt.cpp
Warning:line 2538, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name GlobalOpt.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/IPO -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp

1//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass transforms simple global variables that never have their address
10// taken. If obviously true, it marks read/write globals as constant, deletes
11// variables only stored to, etc.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/IPO/GlobalOpt.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/ConstantFolding.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/TargetLibraryInfo.h"
27#include "llvm/Analysis/TargetTransformInfo.h"
28#include "llvm/BinaryFormat/Dwarf.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/BasicBlock.h"
31#include "llvm/IR/CallSite.h"
32#include "llvm/IR/CallingConv.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DebugInfoMetadata.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GetElementPtrTypeIterator.h"
41#include "llvm/IR/GlobalAlias.h"
42#include "llvm/IR/GlobalValue.h"
43#include "llvm/IR/GlobalVariable.h"
44#include "llvm/IR/InstrTypes.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Instructions.h"
47#include "llvm/IR/IntrinsicInst.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Use.h"
52#include "llvm/IR/User.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
55#include "llvm/InitializePasses.h"
56#include "llvm/Pass.h"
57#include "llvm/Support/AtomicOrdering.h"
58#include "llvm/Support/Casting.h"
59#include "llvm/Support/CommandLine.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/ErrorHandling.h"
62#include "llvm/Support/MathExtras.h"
63#include "llvm/Support/raw_ostream.h"
64#include "llvm/Transforms/IPO.h"
65#include "llvm/Transforms/Utils/CtorUtils.h"
66#include "llvm/Transforms/Utils/Evaluator.h"
67#include "llvm/Transforms/Utils/GlobalStatus.h"
68#include "llvm/Transforms/Utils/Local.h"
69#include <cassert>
70#include <cstdint>
71#include <utility>
72#include <vector>
73
74using namespace llvm;
75
76#define DEBUG_TYPE"globalopt" "globalopt"
77
78STATISTIC(NumMarked , "Number of globals marked constant")static llvm::Statistic NumMarked = {"globalopt", "NumMarked",
"Number of globals marked constant"}
;
79STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr")static llvm::Statistic NumUnnamed = {"globalopt", "NumUnnamed"
, "Number of globals marked unnamed_addr"}
;
80STATISTIC(NumSRA , "Number of aggregate globals broken into scalars")static llvm::Statistic NumSRA = {"globalopt", "NumSRA", "Number of aggregate globals broken into scalars"
}
;
81STATISTIC(NumHeapSRA , "Number of heap objects SRA'd")static llvm::Statistic NumHeapSRA = {"globalopt", "NumHeapSRA"
, "Number of heap objects SRA'd"}
;
82STATISTIC(NumSubstitute,"Number of globals with initializers stored into them")static llvm::Statistic NumSubstitute = {"globalopt", "NumSubstitute"
, "Number of globals with initializers stored into them"}
;
83STATISTIC(NumDeleted , "Number of globals deleted")static llvm::Statistic NumDeleted = {"globalopt", "NumDeleted"
, "Number of globals deleted"}
;
84STATISTIC(NumGlobUses , "Number of global uses devirtualized")static llvm::Statistic NumGlobUses = {"globalopt", "NumGlobUses"
, "Number of global uses devirtualized"}
;
85STATISTIC(NumLocalized , "Number of globals localized")static llvm::Statistic NumLocalized = {"globalopt", "NumLocalized"
, "Number of globals localized"}
;
86STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans")static llvm::Statistic NumShrunkToBool = {"globalopt", "NumShrunkToBool"
, "Number of global vars shrunk to booleans"}
;
87STATISTIC(NumFastCallFns , "Number of functions converted to fastcc")static llvm::Statistic NumFastCallFns = {"globalopt", "NumFastCallFns"
, "Number of functions converted to fastcc"}
;
88STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated")static llvm::Statistic NumCtorsEvaluated = {"globalopt", "NumCtorsEvaluated"
, "Number of static ctors evaluated"}
;
89STATISTIC(NumNestRemoved , "Number of nest attributes removed")static llvm::Statistic NumNestRemoved = {"globalopt", "NumNestRemoved"
, "Number of nest attributes removed"}
;
90STATISTIC(NumAliasesResolved, "Number of global aliases resolved")static llvm::Statistic NumAliasesResolved = {"globalopt", "NumAliasesResolved"
, "Number of global aliases resolved"}
;
91STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated")static llvm::Statistic NumAliasesRemoved = {"globalopt", "NumAliasesRemoved"
, "Number of global aliases eliminated"}
;
92STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed")static llvm::Statistic NumCXXDtorsRemoved = {"globalopt", "NumCXXDtorsRemoved"
, "Number of global C++ destructors removed"}
;
93STATISTIC(NumInternalFunc, "Number of internal functions")static llvm::Statistic NumInternalFunc = {"globalopt", "NumInternalFunc"
, "Number of internal functions"}
;
94STATISTIC(NumColdCC, "Number of functions marked coldcc")static llvm::Statistic NumColdCC = {"globalopt", "NumColdCC",
"Number of functions marked coldcc"}
;
95
96static cl::opt<bool>
97 EnableColdCCStressTest("enable-coldcc-stress-test",
98 cl::desc("Enable stress test of coldcc by adding "
99 "calling conv to all internal functions."),
100 cl::init(false), cl::Hidden);
101
102static cl::opt<int> ColdCCRelFreq(
103 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
104 cl::desc(
105 "Maximum block frequency, expressed as a percentage of caller's "
106 "entry frequency, for a call site to be considered cold for enabling"
107 "coldcc"));
108
109/// Is this global variable possibly used by a leak checker as a root? If so,
110/// we might not really want to eliminate the stores to it.
111static bool isLeakCheckerRoot(GlobalVariable *GV) {
112 // A global variable is a root if it is a pointer, or could plausibly contain
113 // a pointer. There are two challenges; one is that we could have a struct
114 // the has an inner member which is a pointer. We recurse through the type to
115 // detect these (up to a point). The other is that we may actually be a union
116 // of a pointer and another type, and so our LLVM type is an integer which
117 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
118 // potentially contained here.
119
120 if (GV->hasPrivateLinkage())
121 return false;
122
123 SmallVector<Type *, 4> Types;
124 Types.push_back(GV->getValueType());
125
126 unsigned Limit = 20;
127 do {
128 Type *Ty = Types.pop_back_val();
129 switch (Ty->getTypeID()) {
130 default: break;
131 case Type::PointerTyID: return true;
132 case Type::ArrayTyID:
133 case Type::VectorTyID: {
134 SequentialType *STy = cast<SequentialType>(Ty);
135 Types.push_back(STy->getElementType());
136 break;
137 }
138 case Type::StructTyID: {
139 StructType *STy = cast<StructType>(Ty);
140 if (STy->isOpaque()) return true;
141 for (StructType::element_iterator I = STy->element_begin(),
142 E = STy->element_end(); I != E; ++I) {
143 Type *InnerTy = *I;
144 if (isa<PointerType>(InnerTy)) return true;
145 if (isa<CompositeType>(InnerTy))
146 Types.push_back(InnerTy);
147 }
148 break;
149 }
150 }
151 if (--Limit == 0) return true;
152 } while (!Types.empty());
153 return false;
154}
155
156/// Given a value that is stored to a global but never read, determine whether
157/// it's safe to remove the store and the chain of computation that feeds the
158/// store.
159static bool IsSafeComputationToRemove(
160 Value *V, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
161 do {
162 if (isa<Constant>(V))
163 return true;
164 if (!V->hasOneUse())
165 return false;
166 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
167 isa<GlobalValue>(V))
168 return false;
169 if (isAllocationFn(V, GetTLI))
170 return true;
171
172 Instruction *I = cast<Instruction>(V);
173 if (I->mayHaveSideEffects())
174 return false;
175 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
176 if (!GEP->hasAllConstantIndices())
177 return false;
178 } else if (I->getNumOperands() != 1) {
179 return false;
180 }
181
182 V = I->getOperand(0);
183 } while (true);
184}
185
186/// This GV is a pointer root. Loop over all users of the global and clean up
187/// any that obviously don't assign the global a value that isn't dynamically
188/// allocated.
189static bool
190CleanupPointerRootUsers(GlobalVariable *GV,
191 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
192 // A brief explanation of leak checkers. The goal is to find bugs where
193 // pointers are forgotten, causing an accumulating growth in memory
194 // usage over time. The common strategy for leak checkers is to whitelist the
195 // memory pointed to by globals at exit. This is popular because it also
196 // solves another problem where the main thread of a C++ program may shut down
197 // before other threads that are still expecting to use those globals. To
198 // handle that case, we expect the program may create a singleton and never
199 // destroy it.
200
201 bool Changed = false;
202
203 // If Dead[n].first is the only use of a malloc result, we can delete its
204 // chain of computation and the store to the global in Dead[n].second.
205 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
206
207 // Constants can't be pointers to dynamically allocated memory.
208 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
209 UI != E;) {
210 User *U = *UI++;
211 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
212 Value *V = SI->getValueOperand();
213 if (isa<Constant>(V)) {
214 Changed = true;
215 SI->eraseFromParent();
216 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
217 if (I->hasOneUse())
218 Dead.push_back(std::make_pair(I, SI));
219 }
220 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
221 if (isa<Constant>(MSI->getValue())) {
222 Changed = true;
223 MSI->eraseFromParent();
224 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
225 if (I->hasOneUse())
226 Dead.push_back(std::make_pair(I, MSI));
227 }
228 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
229 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
230 if (MemSrc && MemSrc->isConstant()) {
231 Changed = true;
232 MTI->eraseFromParent();
233 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
234 if (I->hasOneUse())
235 Dead.push_back(std::make_pair(I, MTI));
236 }
237 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
238 if (CE->use_empty()) {
239 CE->destroyConstant();
240 Changed = true;
241 }
242 } else if (Constant *C = dyn_cast<Constant>(U)) {
243 if (isSafeToDestroyConstant(C)) {
244 C->destroyConstant();
245 // This could have invalidated UI, start over from scratch.
246 Dead.clear();
247 CleanupPointerRootUsers(GV, GetTLI);
248 return true;
249 }
250 }
251 }
252
253 for (int i = 0, e = Dead.size(); i != e; ++i) {
254 if (IsSafeComputationToRemove(Dead[i].first, GetTLI)) {
255 Dead[i].second->eraseFromParent();
256 Instruction *I = Dead[i].first;
257 do {
258 if (isAllocationFn(I, GetTLI))
259 break;
260 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
261 if (!J)
262 break;
263 I->eraseFromParent();
264 I = J;
265 } while (true);
266 I->eraseFromParent();
267 }
268 }
269
270 return Changed;
271}
272
273/// We just marked GV constant. Loop over all users of the global, cleaning up
274/// the obvious ones. This is largely just a quick scan over the use list to
275/// clean up the easy and obvious cruft. This returns true if it made a change.
276static bool CleanupConstantGlobalUsers(
277 Value *V, Constant *Init, const DataLayout &DL,
278 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
279 bool Changed = false;
280 // Note that we need to use a weak value handle for the worklist items. When
281 // we delete a constant array, we may also be holding pointer to one of its
282 // elements (or an element of one of its elements if we're dealing with an
283 // array of arrays) in the worklist.
284 SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end());
285 while (!WorkList.empty()) {
286 Value *UV = WorkList.pop_back_val();
287 if (!UV)
288 continue;
289
290 User *U = cast<User>(UV);
291
292 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
293 if (Init) {
294 // Replace the load with the initializer.
295 LI->replaceAllUsesWith(Init);
296 LI->eraseFromParent();
297 Changed = true;
298 }
299 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
300 // Store must be unreachable or storing Init into the global.
301 SI->eraseFromParent();
302 Changed = true;
303 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
304 if (CE->getOpcode() == Instruction::GetElementPtr) {
305 Constant *SubInit = nullptr;
306 if (Init)
307 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
308 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, GetTLI);
309 } else if ((CE->getOpcode() == Instruction::BitCast &&
310 CE->getType()->isPointerTy()) ||
311 CE->getOpcode() == Instruction::AddrSpaceCast) {
312 // Pointer cast, delete any stores and memsets to the global.
313 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, GetTLI);
314 }
315
316 if (CE->use_empty()) {
317 CE->destroyConstant();
318 Changed = true;
319 }
320 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
321 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
322 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
323 // and will invalidate our notion of what Init is.
324 Constant *SubInit = nullptr;
325 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
326 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
327 ConstantFoldInstruction(GEP, DL, &GetTLI(*GEP->getFunction())));
328 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
329 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
330
331 // If the initializer is an all-null value and we have an inbounds GEP,
332 // we already know what the result of any load from that GEP is.
333 // TODO: Handle splats.
334 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
335 SubInit = Constant::getNullValue(GEP->getResultElementType());
336 }
337 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, GetTLI);
338
339 if (GEP->use_empty()) {
340 GEP->eraseFromParent();
341 Changed = true;
342 }
343 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
344 if (MI->getRawDest() == V) {
345 MI->eraseFromParent();
346 Changed = true;
347 }
348
349 } else if (Constant *C = dyn_cast<Constant>(U)) {
350 // If we have a chain of dead constantexprs or other things dangling from
351 // us, and if they are all dead, nuke them without remorse.
352 if (isSafeToDestroyConstant(C)) {
353 C->destroyConstant();
354 CleanupConstantGlobalUsers(V, Init, DL, GetTLI);
355 return true;
356 }
357 }
358 }
359 return Changed;
360}
361
362static bool isSafeSROAElementUse(Value *V);
363
364/// Return true if the specified GEP is a safe user of a derived
365/// expression from a global that we want to SROA.
366static bool isSafeSROAGEP(User *U) {
367 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
368 // don't like < 3 operand CE's, and we don't like non-constant integer
369 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
370 // value of C.
371 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
372 !cast<Constant>(U->getOperand(1))->isNullValue())
373 return false;
374
375 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
376 ++GEPI; // Skip over the pointer index.
377
378 // For all other level we require that the indices are constant and inrange.
379 // In particular, consider: A[0][i]. We cannot know that the user isn't doing
380 // invalid things like allowing i to index an out-of-range subscript that
381 // accesses A[1]. This can also happen between different members of a struct
382 // in llvm IR.
383 for (; GEPI != E; ++GEPI) {
384 if (GEPI.isStruct())
385 continue;
386
387 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
388 if (!IdxVal || (GEPI.isBoundedSequential() &&
389 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
390 return false;
391 }
392
393 return llvm::all_of(U->users(),
394 [](User *UU) { return isSafeSROAElementUse(UU); });
395}
396
397/// Return true if the specified instruction is a safe user of a derived
398/// expression from a global that we want to SROA.
399static bool isSafeSROAElementUse(Value *V) {
400 // We might have a dead and dangling constant hanging off of here.
401 if (Constant *C = dyn_cast<Constant>(V))
402 return isSafeToDestroyConstant(C);
403
404 Instruction *I = dyn_cast<Instruction>(V);
405 if (!I) return false;
406
407 // Loads are ok.
408 if (isa<LoadInst>(I)) return true;
409
410 // Stores *to* the pointer are ok.
411 if (StoreInst *SI = dyn_cast<StoreInst>(I))
412 return SI->getOperand(0) != V;
413
414 // Otherwise, it must be a GEP. Check it and its users are safe to SRA.
415 return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I);
416}
417
418/// Look at all uses of the global and decide whether it is safe for us to
419/// perform this transformation.
420static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
421 for (User *U : GV->users()) {
422 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
423 if (!isa<GetElementPtrInst>(U) &&
424 (!isa<ConstantExpr>(U) ||
425 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
426 return false;
427
428 // Check the gep and it's users are safe to SRA
429 if (!isSafeSROAGEP(U))
430 return false;
431 }
432
433 return true;
434}
435
436static bool CanDoGlobalSRA(GlobalVariable *GV) {
437 Constant *Init = GV->getInitializer();
438
439 if (isa<StructType>(Init->getType())) {
440 // nothing to check
441 } else if (SequentialType *STy = dyn_cast<SequentialType>(Init->getType())) {
442 if (STy->getNumElements() > 16 && GV->hasNUsesOrMore(16))
443 return false; // It's not worth it.
444 } else
445 return false;
446
447 return GlobalUsersSafeToSRA(GV);
448}
449
450/// Copy over the debug info for a variable to its SRA replacements.
451static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
452 uint64_t FragmentOffsetInBits,
453 uint64_t FragmentSizeInBits,
454 unsigned NumElements) {
455 SmallVector<DIGlobalVariableExpression *, 1> GVs;
456 GV->getDebugInfo(GVs);
457 for (auto *GVE : GVs) {
458 DIVariable *Var = GVE->getVariable();
459 DIExpression *Expr = GVE->getExpression();
460 if (NumElements > 1) {
461 if (auto E = DIExpression::createFragmentExpression(
462 Expr, FragmentOffsetInBits, FragmentSizeInBits))
463 Expr = *E;
464 else
465 return;
466 }
467 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
468 NGV->addDebugInfo(NGVE);
469 }
470}
471
472/// Perform scalar replacement of aggregates on the specified global variable.
473/// This opens the door for other optimizations by exposing the behavior of the
474/// program in a more fine-grained way. We have determined that this
475/// transformation is safe already. We return the first global variable we
476/// insert so that the caller can reprocess it.
477static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
478 // Make sure this global only has simple uses that we can SRA.
479 if (!CanDoGlobalSRA(GV))
480 return nullptr;
481
482 assert(GV->hasLocalLinkage())((GV->hasLocalLinkage()) ? static_cast<void> (0) : __assert_fail
("GV->hasLocalLinkage()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 482, __PRETTY_FUNCTION__))
;
483 Constant *Init = GV->getInitializer();
484 Type *Ty = Init->getType();
485
486 std::map<unsigned, GlobalVariable *> NewGlobals;
487
488 // Get the alignment of the global, either explicit or target-specific.
489 unsigned StartAlignment = GV->getAlignment();
490 if (StartAlignment == 0)
491 StartAlignment = DL.getABITypeAlignment(GV->getType());
492
493 // Loop over all users and create replacement variables for used aggregate
494 // elements.
495 for (User *GEP : GV->users()) {
496 assert(((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() ==((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa
<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 499, __PRETTY_FUNCTION__))
497 Instruction::GetElementPtr) ||((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa
<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 499, __PRETTY_FUNCTION__))
498 isa<GetElementPtrInst>(GEP)) &&((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa
<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 499, __PRETTY_FUNCTION__))
499 "NonGEP CE's are not SRAable!")((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa
<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() == Instruction::GetElementPtr) || isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 499, __PRETTY_FUNCTION__))
;
500
501 // Ignore the 1th operand, which has to be zero or else the program is quite
502 // broken (undefined). Get the 2nd operand, which is the structure or array
503 // index.
504 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
505 if (NewGlobals.count(ElementIdx) == 1)
506 continue; // we`ve already created replacement variable
507 assert(NewGlobals.count(ElementIdx) == 0)((NewGlobals.count(ElementIdx) == 0) ? static_cast<void>
(0) : __assert_fail ("NewGlobals.count(ElementIdx) == 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 507, __PRETTY_FUNCTION__))
;
508
509 Type *ElTy = nullptr;
510 if (StructType *STy = dyn_cast<StructType>(Ty))
511 ElTy = STy->getElementType(ElementIdx);
512 else if (SequentialType *STy = dyn_cast<SequentialType>(Ty))
513 ElTy = STy->getElementType();
514 assert(ElTy)((ElTy) ? static_cast<void> (0) : __assert_fail ("ElTy"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 514, __PRETTY_FUNCTION__))
;
515
516 Constant *In = Init->getAggregateElement(ElementIdx);
517 assert(In && "Couldn't get element of initializer?")((In && "Couldn't get element of initializer?") ? static_cast
<void> (0) : __assert_fail ("In && \"Couldn't get element of initializer?\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 517, __PRETTY_FUNCTION__))
;
518
519 GlobalVariable *NGV = new GlobalVariable(
520 ElTy, false, GlobalVariable::InternalLinkage, In,
521 GV->getName() + "." + Twine(ElementIdx), GV->getThreadLocalMode(),
522 GV->getType()->getAddressSpace());
523 NGV->setExternallyInitialized(GV->isExternallyInitialized());
524 NGV->copyAttributesFrom(GV);
525 NewGlobals.insert(std::make_pair(ElementIdx, NGV));
526
527 if (StructType *STy = dyn_cast<StructType>(Ty)) {
528 const StructLayout &Layout = *DL.getStructLayout(STy);
529
530 // Calculate the known alignment of the field. If the original aggregate
531 // had 256 byte alignment for example, something might depend on that:
532 // propagate info to each field.
533 uint64_t FieldOffset = Layout.getElementOffset(ElementIdx);
534 Align NewAlign(MinAlign(StartAlignment, FieldOffset));
535 if (NewAlign >
536 Align(DL.getABITypeAlignment(STy->getElementType(ElementIdx))))
537 NGV->setAlignment(NewAlign);
538
539 // Copy over the debug info for the variable.
540 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
541 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx);
542 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size,
543 STy->getNumElements());
544 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
545 uint64_t EltSize = DL.getTypeAllocSize(ElTy);
546 Align EltAlign(DL.getABITypeAlignment(ElTy));
547 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
548
549 // Calculate the known alignment of the field. If the original aggregate
550 // had 256 byte alignment for example, something might depend on that:
551 // propagate info to each field.
552 Align NewAlign(MinAlign(StartAlignment, EltSize * ElementIdx));
553 if (NewAlign > EltAlign)
554 NGV->setAlignment(NewAlign);
555 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx,
556 FragmentSizeInBits, STy->getNumElements());
557 }
558 }
559
560 if (NewGlobals.empty())
561 return nullptr;
562
563 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
564 for (auto NewGlobalVar : NewGlobals)
565 Globals.push_back(NewGlobalVar.second);
566
567 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "PERFORMING GLOBAL SRA ON: "
<< *GV << "\n"; } } while (false)
;
568
569 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
570
571 // Loop over all of the uses of the global, replacing the constantexpr geps,
572 // with smaller constantexpr geps or direct references.
573 while (!GV->use_empty()) {
574 User *GEP = GV->user_back();
575 assert(((isa<ConstantExpr>(GEP) &&((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<
GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 577, __PRETTY_FUNCTION__))
576 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<
GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 577, __PRETTY_FUNCTION__))
577 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!")((((isa<ConstantExpr>(GEP) && cast<ConstantExpr
>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<
GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"
) ? static_cast<void> (0) : __assert_fail ("((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && \"NonGEP CE's are not SRAable!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 577, __PRETTY_FUNCTION__))
;
578
579 // Ignore the 1th operand, which has to be zero or else the program is quite
580 // broken (undefined). Get the 2nd operand, which is the structure or array
581 // index.
582 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
583 assert(NewGlobals.count(ElementIdx) == 1)((NewGlobals.count(ElementIdx) == 1) ? static_cast<void>
(0) : __assert_fail ("NewGlobals.count(ElementIdx) == 1", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 583, __PRETTY_FUNCTION__))
;
584
585 Value *NewPtr = NewGlobals[ElementIdx];
586 Type *NewTy = NewGlobals[ElementIdx]->getValueType();
587
588 // Form a shorter GEP if needed.
589 if (GEP->getNumOperands() > 3) {
590 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
591 SmallVector<Constant*, 8> Idxs;
592 Idxs.push_back(NullInt);
593 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
594 Idxs.push_back(CE->getOperand(i));
595 NewPtr =
596 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
597 } else {
598 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
599 SmallVector<Value*, 8> Idxs;
600 Idxs.push_back(NullInt);
601 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
602 Idxs.push_back(GEPI->getOperand(i));
603 NewPtr = GetElementPtrInst::Create(
604 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(ElementIdx),
605 GEPI);
606 }
607 }
608 GEP->replaceAllUsesWith(NewPtr);
609
610 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
611 GEPI->eraseFromParent();
612 else
613 cast<ConstantExpr>(GEP)->destroyConstant();
614 }
615
616 // Delete the old global, now that it is dead.
617 Globals.erase(GV);
618 ++NumSRA;
619
620 assert(NewGlobals.size() > 0)((NewGlobals.size() > 0) ? static_cast<void> (0) : __assert_fail
("NewGlobals.size() > 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 620, __PRETTY_FUNCTION__))
;
621 return NewGlobals.begin()->second;
622}
623
624/// Return true if all users of the specified value will trap if the value is
625/// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid
626/// reprocessing them.
627static bool AllUsesOfValueWillTrapIfNull(const Value *V,
628 SmallPtrSetImpl<const PHINode*> &PHIs) {
629 for (const User *U : V->users()) {
630 if (const Instruction *I = dyn_cast<Instruction>(U)) {
631 // If null pointer is considered valid, then all uses are non-trapping.
632 // Non address-space 0 globals have already been pruned by the caller.
633 if (NullPointerIsDefined(I->getFunction()))
634 return false;
635 }
636 if (isa<LoadInst>(U)) {
637 // Will trap.
638 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
639 if (SI->getOperand(0) == V) {
640 //cerr << "NONTRAPPING USE: " << *U;
641 return false; // Storing the value.
642 }
643 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
644 if (CI->getCalledValue() != V) {
645 //cerr << "NONTRAPPING USE: " << *U;
646 return false; // Not calling the ptr
647 }
648 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
649 if (II->getCalledValue() != V) {
650 //cerr << "NONTRAPPING USE: " << *U;
651 return false; // Not calling the ptr
652 }
653 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
654 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
655 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
656 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
657 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
658 // If we've already seen this phi node, ignore it, it has already been
659 // checked.
660 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
661 return false;
662 } else if (isa<ICmpInst>(U) &&
663 isa<ConstantPointerNull>(U->getOperand(1))) {
664 // Ignore icmp X, null
665 } else {
666 //cerr << "NONTRAPPING USE: " << *U;
667 return false;
668 }
669 }
670 return true;
671}
672
673/// Return true if all uses of any loads from GV will trap if the loaded value
674/// is null. Note that this also permits comparisons of the loaded value
675/// against null, as a special case.
676static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
677 for (const User *U : GV->users())
678 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
679 SmallPtrSet<const PHINode*, 8> PHIs;
680 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
681 return false;
682 } else if (isa<StoreInst>(U)) {
683 // Ignore stores to the global.
684 } else {
685 // We don't know or understand this user, bail out.
686 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
687 return false;
688 }
689 return true;
690}
691
692static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
693 bool Changed = false;
694 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
695 Instruction *I = cast<Instruction>(*UI++);
696 // Uses are non-trapping if null pointer is considered valid.
697 // Non address-space 0 globals are already pruned by the caller.
698 if (NullPointerIsDefined(I->getFunction()))
699 return false;
700 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
701 LI->setOperand(0, NewV);
702 Changed = true;
703 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
704 if (SI->getOperand(1) == V) {
705 SI->setOperand(1, NewV);
706 Changed = true;
707 }
708 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
709 CallSite CS(I);
710 if (CS.getCalledValue() == V) {
711 // Calling through the pointer! Turn into a direct call, but be careful
712 // that the pointer is not also being passed as an argument.
713 CS.setCalledFunction(NewV);
714 Changed = true;
715 bool PassedAsArg = false;
716 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
717 if (CS.getArgument(i) == V) {
718 PassedAsArg = true;
719 CS.setArgument(i, NewV);
720 }
721
722 if (PassedAsArg) {
723 // Being passed as an argument also. Be careful to not invalidate UI!
724 UI = V->user_begin();
725 }
726 }
727 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
728 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
729 ConstantExpr::getCast(CI->getOpcode(),
730 NewV, CI->getType()));
731 if (CI->use_empty()) {
732 Changed = true;
733 CI->eraseFromParent();
734 }
735 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
736 // Should handle GEP here.
737 SmallVector<Constant*, 8> Idxs;
738 Idxs.reserve(GEPI->getNumOperands()-1);
739 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
740 i != e; ++i)
741 if (Constant *C = dyn_cast<Constant>(*i))
742 Idxs.push_back(C);
743 else
744 break;
745 if (Idxs.size() == GEPI->getNumOperands()-1)
746 Changed |= OptimizeAwayTrappingUsesOfValue(
747 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(),
748 NewV, Idxs));
749 if (GEPI->use_empty()) {
750 Changed = true;
751 GEPI->eraseFromParent();
752 }
753 }
754 }
755
756 return Changed;
757}
758
759/// The specified global has only one non-null value stored into it. If there
760/// are uses of the loaded value that would trap if the loaded value is
761/// dynamically null, then we know that they cannot be reachable with a null
762/// optimize away the load.
763static bool OptimizeAwayTrappingUsesOfLoads(
764 GlobalVariable *GV, Constant *LV, const DataLayout &DL,
765 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
766 bool Changed = false;
767
768 // Keep track of whether we are able to remove all the uses of the global
769 // other than the store that defines it.
770 bool AllNonStoreUsesGone = true;
771
772 // Replace all uses of loads with uses of uses of the stored value.
773 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
774 User *GlobalUser = *GUI++;
775 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
776 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
777 // If we were able to delete all uses of the loads
778 if (LI->use_empty()) {
779 LI->eraseFromParent();
780 Changed = true;
781 } else {
782 AllNonStoreUsesGone = false;
783 }
784 } else if (isa<StoreInst>(GlobalUser)) {
785 // Ignore the store that stores "LV" to the global.
786 assert(GlobalUser->getOperand(1) == GV &&((GlobalUser->getOperand(1) == GV && "Must be storing *to* the global"
) ? static_cast<void> (0) : __assert_fail ("GlobalUser->getOperand(1) == GV && \"Must be storing *to* the global\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 787, __PRETTY_FUNCTION__))
787 "Must be storing *to* the global")((GlobalUser->getOperand(1) == GV && "Must be storing *to* the global"
) ? static_cast<void> (0) : __assert_fail ("GlobalUser->getOperand(1) == GV && \"Must be storing *to* the global\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 787, __PRETTY_FUNCTION__))
;
788 } else {
789 AllNonStoreUsesGone = false;
790
791 // If we get here we could have other crazy uses that are transitively
792 // loaded.
793 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||(((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser
) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>
(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<
GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"
) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 797, __PRETTY_FUNCTION__))
794 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||(((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser
) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>
(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<
GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"
) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 797, __PRETTY_FUNCTION__))
795 isa<BitCastInst>(GlobalUser) ||(((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser
) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>
(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<
GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"
) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 797, __PRETTY_FUNCTION__))
796 isa<GetElementPtrInst>(GlobalUser)) &&(((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser
) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>
(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<
GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"
) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 797, __PRETTY_FUNCTION__))
797 "Only expect load and stores!")(((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser
) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>
(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<
GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"
) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && \"Only expect load and stores!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 797, __PRETTY_FUNCTION__))
;
798 }
799 }
800
801 if (Changed) {
802 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GVdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: "
<< *GV << "\n"; } } while (false)
803 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: "
<< *GV << "\n"; } } while (false)
;
804 ++NumGlobUses;
805 }
806
807 // If we nuked all of the loads, then none of the stores are needed either,
808 // nor is the global.
809 if (AllNonStoreUsesGone) {
810 if (isLeakCheckerRoot(GV)) {
811 Changed |= CleanupPointerRootUsers(GV, GetTLI);
812 } else {
813 Changed = true;
814 CleanupConstantGlobalUsers(GV, nullptr, DL, GetTLI);
815 }
816 if (GV->use_empty()) {
817 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** GLOBAL NOW DEAD!\n"; }
} while (false)
;
818 Changed = true;
819 GV->eraseFromParent();
820 ++NumDeleted;
821 }
822 }
823 return Changed;
824}
825
826/// Walk the use list of V, constant folding all of the instructions that are
827/// foldable.
828static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
829 TargetLibraryInfo *TLI) {
830 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
831 if (Instruction *I = dyn_cast<Instruction>(*UI++))
832 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
833 I->replaceAllUsesWith(NewC);
834
835 // Advance UI to the next non-I use to avoid invalidating it!
836 // Instructions could multiply use V.
837 while (UI != E && *UI == I)
838 ++UI;
839 if (isInstructionTriviallyDead(I, TLI))
840 I->eraseFromParent();
841 }
842}
843
844/// This function takes the specified global variable, and transforms the
845/// program as if it always contained the result of the specified malloc.
846/// Because it is always the result of the specified malloc, there is no reason
847/// to actually DO the malloc. Instead, turn the malloc into a global, and any
848/// loads of GV as uses of the new global.
849static GlobalVariable *
850OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
851 ConstantInt *NElements, const DataLayout &DL,
852 TargetLibraryInfo *TLI) {
853 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { errs() << "PROMOTING GLOBAL: " <<
*GV << " CALL = " << *CI << '\n'; } } while
(false)
854 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { errs() << "PROMOTING GLOBAL: " <<
*GV << " CALL = " << *CI << '\n'; } } while
(false)
;
855
856 Type *GlobalType;
857 if (NElements->getZExtValue() == 1)
858 GlobalType = AllocTy;
859 else
860 // If we have an array allocation, the global variable is of an array.
861 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
862
863 // Create the new global variable. The contents of the malloc'd memory is
864 // undefined, so initialize with an undef value.
865 GlobalVariable *NewGV = new GlobalVariable(
866 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
867 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
868 GV->getThreadLocalMode());
869
870 // If there are bitcast users of the malloc (which is typical, usually we have
871 // a malloc + bitcast) then replace them with uses of the new global. Update
872 // other users to use the global as well.
873 BitCastInst *TheBC = nullptr;
874 while (!CI->use_empty()) {
875 Instruction *User = cast<Instruction>(CI->user_back());
876 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
877 if (BCI->getType() == NewGV->getType()) {
878 BCI->replaceAllUsesWith(NewGV);
879 BCI->eraseFromParent();
880 } else {
881 BCI->setOperand(0, NewGV);
882 }
883 } else {
884 if (!TheBC)
885 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
886 User->replaceUsesOfWith(CI, TheBC);
887 }
888 }
889
890 Constant *RepValue = NewGV;
891 if (NewGV->getType() != GV->getValueType())
892 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType());
893
894 // If there is a comparison against null, we will insert a global bool to
895 // keep track of whether the global was initialized yet or not.
896 GlobalVariable *InitBool =
897 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
898 GlobalValue::InternalLinkage,
899 ConstantInt::getFalse(GV->getContext()),
900 GV->getName()+".init", GV->getThreadLocalMode());
901 bool InitBoolUsed = false;
902
903 // Loop over all uses of GV, processing them in turn.
904 while (!GV->use_empty()) {
905 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
906 // The global is initialized when the store to it occurs.
907 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false,
908 None, SI->getOrdering(), SI->getSyncScopeID(), SI);
909 SI->eraseFromParent();
910 continue;
911 }
912
913 LoadInst *LI = cast<LoadInst>(GV->user_back());
914 while (!LI->use_empty()) {
915 Use &LoadUse = *LI->use_begin();
916 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
917 if (!ICI) {
918 LoadUse = RepValue;
919 continue;
920 }
921
922 // Replace the cmp X, 0 with a use of the bool value.
923 // Sink the load to where the compare was, if atomic rules allow us to.
924 Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
925 InitBool->getName() + ".val", false, None,
926 LI->getOrdering(), LI->getSyncScopeID(),
927 LI->isUnordered() ? (Instruction *)ICI : LI);
928 InitBoolUsed = true;
929 switch (ICI->getPredicate()) {
930 default: llvm_unreachable("Unknown ICmp Predicate!")::llvm::llvm_unreachable_internal("Unknown ICmp Predicate!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 930)
;
931 case ICmpInst::ICMP_ULT:
932 case ICmpInst::ICMP_SLT: // X < null -> always false
933 LV = ConstantInt::getFalse(GV->getContext());
934 break;
935 case ICmpInst::ICMP_ULE:
936 case ICmpInst::ICMP_SLE:
937 case ICmpInst::ICMP_EQ:
938 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
939 break;
940 case ICmpInst::ICMP_NE:
941 case ICmpInst::ICMP_UGE:
942 case ICmpInst::ICMP_SGE:
943 case ICmpInst::ICMP_UGT:
944 case ICmpInst::ICMP_SGT:
945 break; // no change.
946 }
947 ICI->replaceAllUsesWith(LV);
948 ICI->eraseFromParent();
949 }
950 LI->eraseFromParent();
951 }
952
953 // If the initialization boolean was used, insert it, otherwise delete it.
954 if (!InitBoolUsed) {
955 while (!InitBool->use_empty()) // Delete initializations
956 cast<StoreInst>(InitBool->user_back())->eraseFromParent();
957 delete InitBool;
958 } else
959 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
960
961 // Now the GV is dead, nuke it and the malloc..
962 GV->eraseFromParent();
963 CI->eraseFromParent();
964
965 // To further other optimizations, loop over all users of NewGV and try to
966 // constant prop them. This will promote GEP instructions with constant
967 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
968 ConstantPropUsersOf(NewGV, DL, TLI);
969 if (RepValue != NewGV)
970 ConstantPropUsersOf(RepValue, DL, TLI);
971
972 return NewGV;
973}
974
975/// Scan the use-list of V checking to make sure that there are no complex uses
976/// of V. We permit simple things like dereferencing the pointer, but not
977/// storing through the address, unless it is to the specified global.
978static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
979 const GlobalVariable *GV,
980 SmallPtrSetImpl<const PHINode*> &PHIs) {
981 for (const User *U : V->users()) {
982 const Instruction *Inst = cast<Instruction>(U);
983
984 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
985 continue; // Fine, ignore.
986 }
987
988 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
989 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
990 return false; // Storing the pointer itself... bad.
991 continue; // Otherwise, storing through it, or storing into GV... fine.
992 }
993
994 // Must index into the array and into the struct.
995 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
996 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
997 return false;
998 continue;
999 }
1000
1001 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
1002 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1003 // cycles.
1004 if (PHIs.insert(PN).second)
1005 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
1006 return false;
1007 continue;
1008 }
1009
1010 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
1011 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1012 return false;
1013 continue;
1014 }
1015
1016 return false;
1017 }
1018 return true;
1019}
1020
1021/// The Alloc pointer is stored into GV somewhere. Transform all uses of the
1022/// allocation into loads from the global and uses of the resultant pointer.
1023/// Further, delete the store into GV. This assumes that these value pass the
1024/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1025static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1026 GlobalVariable *GV) {
1027 while (!Alloc->use_empty()) {
1028 Instruction *U = cast<Instruction>(*Alloc->user_begin());
1029 Instruction *InsertPt = U;
1030 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1031 // If this is the store of the allocation into the global, remove it.
1032 if (SI->getOperand(1) == GV) {
1033 SI->eraseFromParent();
1034 continue;
1035 }
1036 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1037 // Insert the load in the corresponding predecessor, not right before the
1038 // PHI.
1039 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
1040 } else if (isa<BitCastInst>(U)) {
1041 // Must be bitcast between the malloc and store to initialize the global.
1042 ReplaceUsesOfMallocWithGlobal(U, GV);
1043 U->eraseFromParent();
1044 continue;
1045 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1046 // If this is a "GEP bitcast" and the user is a store to the global, then
1047 // just process it as a bitcast.
1048 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1049 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
1050 if (SI->getOperand(1) == GV) {
1051 // Must be bitcast GEP between the malloc and store to initialize
1052 // the global.
1053 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1054 GEPI->eraseFromParent();
1055 continue;
1056 }
1057 }
1058
1059 // Insert a load from the global, and use it instead of the malloc.
1060 Value *NL =
1061 new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt);
1062 U->replaceUsesOfWith(Alloc, NL);
1063 }
1064}
1065
1066/// Verify that all uses of V (a load, or a phi of a load) are simple enough to
1067/// perform heap SRA on. This permits GEP's that index through the array and
1068/// struct field, icmps of null, and PHIs.
1069static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1070 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1071 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1072 // We permit two users of the load: setcc comparing against the null
1073 // pointer, and a getelementptr of a specific form.
1074 for (const User *U : V->users()) {
1075 const Instruction *UI = cast<Instruction>(U);
1076
1077 // Comparison against null is ok.
1078 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1079 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1080 return false;
1081 continue;
1082 }
1083
1084 // getelementptr is also ok, but only a simple form.
1085 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1086 // Must index into the array and into the struct.
1087 if (GEPI->getNumOperands() < 3)
1088 return false;
1089
1090 // Otherwise the GEP is ok.
1091 continue;
1092 }
1093
1094 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1095 if (!LoadUsingPHIsPerLoad.insert(PN).second)
1096 // This means some phi nodes are dependent on each other.
1097 // Avoid infinite looping!
1098 return false;
1099 if (!LoadUsingPHIs.insert(PN).second)
1100 // If we have already analyzed this PHI, then it is safe.
1101 continue;
1102
1103 // Make sure all uses of the PHI are simple enough to transform.
1104 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1105 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1106 return false;
1107
1108 continue;
1109 }
1110
1111 // Otherwise we don't know what this is, not ok.
1112 return false;
1113 }
1114
1115 return true;
1116}
1117
1118/// If all users of values loaded from GV are simple enough to perform HeapSRA,
1119/// return true.
1120static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1121 Instruction *StoredVal) {
1122 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1123 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1124 for (const User *U : GV->users())
1125 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1126 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1127 LoadUsingPHIsPerLoad))
1128 return false;
1129 LoadUsingPHIsPerLoad.clear();
1130 }
1131
1132 // If we reach here, we know that all uses of the loads and transitive uses
1133 // (through PHI nodes) are simple enough to transform. However, we don't know
1134 // that all inputs the to the PHI nodes are in the same equivalence sets.
1135 // Check to verify that all operands of the PHIs are either PHIS that can be
1136 // transformed, loads from GV, or MI itself.
1137 for (const PHINode *PN : LoadUsingPHIs) {
1138 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1139 Value *InVal = PN->getIncomingValue(op);
1140
1141 // PHI of the stored value itself is ok.
1142 if (InVal == StoredVal) continue;
1143
1144 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1145 // One of the PHIs in our set is (optimistically) ok.
1146 if (LoadUsingPHIs.count(InPN))
1147 continue;
1148 return false;
1149 }
1150
1151 // Load from GV is ok.
1152 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1153 if (LI->getOperand(0) == GV)
1154 continue;
1155
1156 // UNDEF? NULL?
1157
1158 // Anything else is rejected.
1159 return false;
1160 }
1161 }
1162
1163 return true;
1164}
1165
1166static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1167 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1168 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1169 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V];
1170
1171 if (FieldNo >= FieldVals.size())
1172 FieldVals.resize(FieldNo+1);
1173
1174 // If we already have this value, just reuse the previously scalarized
1175 // version.
1176 if (Value *FieldVal = FieldVals[FieldNo])
1177 return FieldVal;
1178
1179 // Depending on what instruction this is, we have several cases.
1180 Value *Result;
1181 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1182 // This is a scalarized version of the load from the global. Just create
1183 // a new Load of the scalarized global.
1184 Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo,
1185 InsertedScalarizedValues, PHIsToRewrite);
1186 Result = new LoadInst(V->getType()->getPointerElementType(), V,
1187 LI->getName() + ".f" + Twine(FieldNo), LI);
1188 } else {
1189 PHINode *PN = cast<PHINode>(V);
1190 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1191 // field.
1192
1193 PointerType *PTy = cast<PointerType>(PN->getType());
1194 StructType *ST = cast<StructType>(PTy->getElementType());
1195
1196 unsigned AS = PTy->getAddressSpace();
1197 PHINode *NewPN =
1198 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1199 PN->getNumIncomingValues(),
1200 PN->getName()+".f"+Twine(FieldNo), PN);
1201 Result = NewPN;
1202 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1203 }
1204
1205 return FieldVals[FieldNo] = Result;
1206}
1207
1208/// Given a load instruction and a value derived from the load, rewrite the
1209/// derived value to use the HeapSRoA'd load.
1210static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1211 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1212 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1213 // If this is a comparison against null, handle it.
1214 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1215 assert(isa<ConstantPointerNull>(SCI->getOperand(1)))((isa<ConstantPointerNull>(SCI->getOperand(1))) ? static_cast
<void> (0) : __assert_fail ("isa<ConstantPointerNull>(SCI->getOperand(1))"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1215, __PRETTY_FUNCTION__))
;
1216 // If we have a setcc of the loaded pointer, we can use a setcc of any
1217 // field.
1218 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1219 InsertedScalarizedValues, PHIsToRewrite);
1220
1221 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1222 Constant::getNullValue(NPtr->getType()),
1223 SCI->getName());
1224 SCI->replaceAllUsesWith(New);
1225 SCI->eraseFromParent();
1226 return;
1227 }
1228
1229 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1230 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1231 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))((GEPI->getNumOperands() >= 3 && isa<ConstantInt
>(GEPI->getOperand(2)) && "Unexpected GEPI!") ?
static_cast<void> (0) : __assert_fail ("GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && \"Unexpected GEPI!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1232, __PRETTY_FUNCTION__))
1232 && "Unexpected GEPI!")((GEPI->getNumOperands() >= 3 && isa<ConstantInt
>(GEPI->getOperand(2)) && "Unexpected GEPI!") ?
static_cast<void> (0) : __assert_fail ("GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && \"Unexpected GEPI!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1232, __PRETTY_FUNCTION__))
;
1233
1234 // Load the pointer for this field.
1235 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1236 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1237 InsertedScalarizedValues, PHIsToRewrite);
1238
1239 // Create the new GEP idx vector.
1240 SmallVector<Value*, 8> GEPIdx;
1241 GEPIdx.push_back(GEPI->getOperand(1));
1242 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1243
1244 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1245 GEPI->getName(), GEPI);
1246 GEPI->replaceAllUsesWith(NGEPI);
1247 GEPI->eraseFromParent();
1248 return;
1249 }
1250
1251 // Recursively transform the users of PHI nodes. This will lazily create the
1252 // PHIs that are needed for individual elements. Keep track of what PHIs we
1253 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1254 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1255 // already been seen first by another load, so its uses have already been
1256 // processed.
1257 PHINode *PN = cast<PHINode>(LoadUser);
1258 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1259 std::vector<Value *>())).second)
1260 return;
1261
1262 // If this is the first time we've seen this PHI, recursively process all
1263 // users.
1264 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1265 Instruction *User = cast<Instruction>(*UI++);
1266 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1267 }
1268}
1269
1270/// We are performing Heap SRoA on a global. Ptr is a value loaded from the
1271/// global. Eliminate all uses of Ptr, making them use FieldGlobals instead.
1272/// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1273static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1274 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1275 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) {
1276 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1277 Instruction *User = cast<Instruction>(*UI++);
1278 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1279 }
1280
1281 if (Load->use_empty()) {
1282 Load->eraseFromParent();
1283 InsertedScalarizedValues.erase(Load);
1284 }
1285}
1286
1287/// CI is an allocation of an array of structures. Break it up into multiple
1288/// allocations of arrays of the fields.
1289static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1290 Value *NElems, const DataLayout &DL,
1291 const TargetLibraryInfo *TLI) {
1292 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "SROA HEAP ALLOC: " <<
*GV << " MALLOC = " << *CI << '\n'; } } while
(false)
1293 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "SROA HEAP ALLOC: " <<
*GV << " MALLOC = " << *CI << '\n'; } } while
(false)
;
1294 Type *MAT = getMallocAllocatedType(CI, TLI);
1295 StructType *STy = cast<StructType>(MAT);
1296
1297 // There is guaranteed to be at least one use of the malloc (storing
1298 // it into GV). If there are other uses, change them to be uses of
1299 // the global to simplify later code. This also deletes the store
1300 // into GV.
1301 ReplaceUsesOfMallocWithGlobal(CI, GV);
1302
1303 // Okay, at this point, there are no users of the malloc. Insert N
1304 // new mallocs at the same place as CI, and N globals.
1305 std::vector<Value *> FieldGlobals;
1306 std::vector<Value *> FieldMallocs;
1307
1308 SmallVector<OperandBundleDef, 1> OpBundles;
1309 CI->getOperandBundlesAsDefs(OpBundles);
1310
1311 unsigned AS = GV->getType()->getPointerAddressSpace();
1312 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1313 Type *FieldTy = STy->getElementType(FieldNo);
1314 PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1315
1316 GlobalVariable *NGV = new GlobalVariable(
1317 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
1318 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
1319 nullptr, GV->getThreadLocalMode());
1320 NGV->copyAttributesFrom(GV);
1321 FieldGlobals.push_back(NGV);
1322
1323 unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1324 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1325 TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1326 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1327 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1328 ConstantInt::get(IntPtrTy, TypeSize),
1329 NElems, OpBundles, nullptr,
1330 CI->getName() + ".f" + Twine(FieldNo));
1331 FieldMallocs.push_back(NMI);
1332 new StoreInst(NMI, NGV, CI);
1333 }
1334
1335 // The tricky aspect of this transformation is handling the case when malloc
1336 // fails. In the original code, malloc failing would set the result pointer
1337 // of malloc to null. In this case, some mallocs could succeed and others
1338 // could fail. As such, we emit code that looks like this:
1339 // F0 = malloc(field0)
1340 // F1 = malloc(field1)
1341 // F2 = malloc(field2)
1342 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1343 // if (F0) { free(F0); F0 = 0; }
1344 // if (F1) { free(F1); F1 = 0; }
1345 // if (F2) { free(F2); F2 = 0; }
1346 // }
1347 // The malloc can also fail if its argument is too large.
1348 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1349 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1350 ConstantZero, "isneg");
1351 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1352 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1353 Constant::getNullValue(FieldMallocs[i]->getType()),
1354 "isnull");
1355 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1356 }
1357
1358 // Split the basic block at the old malloc.
1359 BasicBlock *OrigBB = CI->getParent();
1360 BasicBlock *ContBB =
1361 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
1362
1363 // Create the block to check the first condition. Put all these blocks at the
1364 // end of the function as they are unlikely to be executed.
1365 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1366 "malloc_ret_null",
1367 OrigBB->getParent());
1368
1369 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1370 // branch on RunningOr.
1371 OrigBB->getTerminator()->eraseFromParent();
1372 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1373
1374 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1375 // pointer, because some may be null while others are not.
1376 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1377 Value *GVVal =
1378 new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(),
1379 FieldGlobals[i], "tmp", NullPtrBlock);
1380 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1381 Constant::getNullValue(GVVal->getType()));
1382 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1383 OrigBB->getParent());
1384 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1385 OrigBB->getParent());
1386 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1387 Cmp, NullPtrBlock);
1388
1389 // Fill in FreeBlock.
1390 CallInst::CreateFree(GVVal, OpBundles, BI);
1391 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1392 FreeBlock);
1393 BranchInst::Create(NextBlock, FreeBlock);
1394
1395 NullPtrBlock = NextBlock;
1396 }
1397
1398 BranchInst::Create(ContBB, NullPtrBlock);
1399
1400 // CI is no longer needed, remove it.
1401 CI->eraseFromParent();
1402
1403 /// As we process loads, if we can't immediately update all uses of the load,
1404 /// keep track of what scalarized loads are inserted for a given load.
1405 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues;
1406 InsertedScalarizedValues[GV] = FieldGlobals;
1407
1408 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite;
1409
1410 // Okay, the malloc site is completely handled. All of the uses of GV are now
1411 // loads, and all uses of those loads are simple. Rewrite them to use loads
1412 // of the per-field globals instead.
1413 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1414 Instruction *User = cast<Instruction>(*UI++);
1415
1416 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1417 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1418 continue;
1419 }
1420
1421 // Must be a store of null.
1422 StoreInst *SI = cast<StoreInst>(User);
1423 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&((isa<ConstantPointerNull>(SI->getOperand(0)) &&
"Unexpected heap-sra user!") ? static_cast<void> (0) :
__assert_fail ("isa<ConstantPointerNull>(SI->getOperand(0)) && \"Unexpected heap-sra user!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1424, __PRETTY_FUNCTION__))
1424 "Unexpected heap-sra user!")((isa<ConstantPointerNull>(SI->getOperand(0)) &&
"Unexpected heap-sra user!") ? static_cast<void> (0) :
__assert_fail ("isa<ConstantPointerNull>(SI->getOperand(0)) && \"Unexpected heap-sra user!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1424, __PRETTY_FUNCTION__))
;
1425
1426 // Insert a store of null into each global.
1427 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1428 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType();
1429 Constant *Null = Constant::getNullValue(ValTy);
1430 new StoreInst(Null, FieldGlobals[i], SI);
1431 }
1432 // Erase the original store.
1433 SI->eraseFromParent();
1434 }
1435
1436 // While we have PHIs that are interesting to rewrite, do it.
1437 while (!PHIsToRewrite.empty()) {
1438 PHINode *PN = PHIsToRewrite.back().first;
1439 unsigned FieldNo = PHIsToRewrite.back().second;
1440 PHIsToRewrite.pop_back();
1441 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1442 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi")((FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"
) ? static_cast<void> (0) : __assert_fail ("FieldPN->getNumIncomingValues() == 0 &&\"Already processed this phi\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1442, __PRETTY_FUNCTION__))
;
1443
1444 // Add all the incoming values. This can materialize more phis.
1445 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1446 Value *InVal = PN->getIncomingValue(i);
1447 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1448 PHIsToRewrite);
1449 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1450 }
1451 }
1452
1453 // Drop all inter-phi links and any loads that made it this far.
1454 for (DenseMap<Value *, std::vector<Value *>>::iterator
1455 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1456 I != E; ++I) {
1457 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1458 PN->dropAllReferences();
1459 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1460 LI->dropAllReferences();
1461 }
1462
1463 // Delete all the phis and loads now that inter-references are dead.
1464 for (DenseMap<Value *, std::vector<Value *>>::iterator
1465 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1466 I != E; ++I) {
1467 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1468 PN->eraseFromParent();
1469 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1470 LI->eraseFromParent();
1471 }
1472
1473 // The old global is now dead, remove it.
1474 GV->eraseFromParent();
1475
1476 ++NumHeapSRA;
1477 return cast<GlobalVariable>(FieldGlobals[0]);
1478}
1479
1480/// This function is called when we see a pointer global variable with a single
1481/// value stored it that is a malloc or cast of malloc.
1482static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1483 Type *AllocTy,
1484 AtomicOrdering Ordering,
1485 const DataLayout &DL,
1486 TargetLibraryInfo *TLI) {
1487 // If this is a malloc of an abstract type, don't touch it.
1488 if (!AllocTy->isSized())
1489 return false;
1490
1491 // We can't optimize this global unless all uses of it are *known* to be
1492 // of the malloc value, not of the null initializer value (consider a use
1493 // that compares the global's value against zero to see if the malloc has
1494 // been reached). To do this, we check to see if all uses of the global
1495 // would trap if the global were null: this proves that they must all
1496 // happen after the malloc.
1497 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1498 return false;
1499
1500 // We can't optimize this if the malloc itself is used in a complex way,
1501 // for example, being stored into multiple globals. This allows the
1502 // malloc to be stored into the specified global, loaded icmp'd, and
1503 // GEP'd. These are all things we could transform to using the global
1504 // for.
1505 SmallPtrSet<const PHINode*, 8> PHIs;
1506 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1507 return false;
1508
1509 // If we have a global that is only initialized with a fixed size malloc,
1510 // transform the program to use global memory instead of malloc'd memory.
1511 // This eliminates dynamic allocation, avoids an indirection accessing the
1512 // data, and exposes the resultant global to further GlobalOpt.
1513 // We cannot optimize the malloc if we cannot determine malloc array size.
1514 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1515 if (!NElems)
1516 return false;
1517
1518 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1519 // Restrict this transformation to only working on small allocations
1520 // (2048 bytes currently), as we don't want to introduce a 16M global or
1521 // something.
1522 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1523 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1524 return true;
1525 }
1526
1527 // If the allocation is an array of structures, consider transforming this
1528 // into multiple malloc'd arrays, one for each field. This is basically
1529 // SRoA for malloc'd memory.
1530
1531 if (Ordering != AtomicOrdering::NotAtomic)
1532 return false;
1533
1534 // If this is an allocation of a fixed size array of structs, analyze as a
1535 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1536 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1537 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1538 AllocTy = AT->getElementType();
1539
1540 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1541 if (!AllocSTy)
1542 return false;
1543
1544 // This the structure has an unreasonable number of fields, leave it
1545 // alone.
1546 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1547 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1548
1549 // If this is a fixed size array, transform the Malloc to be an alloc of
1550 // structs. malloc [100 x struct],1 -> malloc struct, 100
1551 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1552 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1553 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1554 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1555 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1556 SmallVector<OperandBundleDef, 1> OpBundles;
1557 CI->getOperandBundlesAsDefs(OpBundles);
1558 Instruction *Malloc =
1559 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements,
1560 OpBundles, nullptr, CI->getName());
1561 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1562 CI->replaceAllUsesWith(Cast);
1563 CI->eraseFromParent();
1564 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1565 CI = cast<CallInst>(BCI->getOperand(0));
1566 else
1567 CI = cast<CallInst>(Malloc);
1568 }
1569
1570 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
1571 TLI);
1572 return true;
1573 }
1574
1575 return false;
1576}
1577
1578// Try to optimize globals based on the knowledge that only one value (besides
1579// its initializer) is ever stored to the global.
1580static bool
1581optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1582 AtomicOrdering Ordering, const DataLayout &DL,
1583 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
1584 // Ignore no-op GEPs and bitcasts.
1585 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1586
1587 // If we are dealing with a pointer global that is initialized to null and
1588 // only has one (non-null) value stored into it, then we can optimize any
1589 // users of the loaded value (often calls and loads) that would trap if the
1590 // value was null.
1591 if (GV->getInitializer()->getType()->isPointerTy() &&
1592 GV->getInitializer()->isNullValue() &&
1593 !NullPointerIsDefined(
1594 nullptr /* F */,
1595 GV->getInitializer()->getType()->getPointerAddressSpace())) {
1596 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1597 if (GV->getInitializer()->getType() != SOVC->getType())
1598 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1599
1600 // Optimize away any trapping uses of the loaded value.
1601 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI))
1602 return true;
1603 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, GetTLI)) {
1604 auto *TLI = &GetTLI(*CI->getFunction());
1605 Type *MallocType = getMallocAllocatedType(CI, TLI);
1606 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1607 Ordering, DL, TLI))
1608 return true;
1609 }
1610 }
1611
1612 return false;
1613}
1614
1615/// At this point, we have learned that the only two values ever stored into GV
1616/// are its initializer and OtherVal. See if we can shrink the global into a
1617/// boolean and select between the two values whenever it is used. This exposes
1618/// the values to other scalar optimizations.
1619static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1620 Type *GVElType = GV->getValueType();
1621
1622 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1623 // an FP value, pointer or vector, don't do this optimization because a select
1624 // between them is very expensive and unlikely to lead to later
1625 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1626 // where v1 and v2 both require constant pool loads, a big loss.
1627 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1628 GVElType->isFloatingPointTy() ||
1629 GVElType->isPointerTy() || GVElType->isVectorTy())
1630 return false;
1631
1632 // Walk the use list of the global seeing if all the uses are load or store.
1633 // If there is anything else, bail out.
1634 for (User *U : GV->users())
1635 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1636 return false;
1637
1638 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** SHRINKING TO BOOL: "
<< *GV << "\n"; } } while (false)
;
1639
1640 // Create the new global, initializing it to false.
1641 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1642 false,
1643 GlobalValue::InternalLinkage,
1644 ConstantInt::getFalse(GV->getContext()),
1645 GV->getName()+".b",
1646 GV->getThreadLocalMode(),
1647 GV->getType()->getAddressSpace());
1648 NewGV->copyAttributesFrom(GV);
1649 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
1650
1651 Constant *InitVal = GV->getInitializer();
1652 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&((InitVal->getType() != Type::getInt1Ty(GV->getContext(
)) && "No reason to shrink to bool!") ? static_cast<
void> (0) : __assert_fail ("InitVal->getType() != Type::getInt1Ty(GV->getContext()) && \"No reason to shrink to bool!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1653, __PRETTY_FUNCTION__))
1653 "No reason to shrink to bool!")((InitVal->getType() != Type::getInt1Ty(GV->getContext(
)) && "No reason to shrink to bool!") ? static_cast<
void> (0) : __assert_fail ("InitVal->getType() != Type::getInt1Ty(GV->getContext()) && \"No reason to shrink to bool!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1653, __PRETTY_FUNCTION__))
;
1654
1655 SmallVector<DIGlobalVariableExpression *, 1> GVs;
1656 GV->getDebugInfo(GVs);
1657
1658 // If initialized to zero and storing one into the global, we can use a cast
1659 // instead of a select to synthesize the desired value.
1660 bool IsOneZero = false;
1661 bool EmitOneOrZero = true;
1662 auto *CI = dyn_cast<ConstantInt>(OtherVal);
1663 if (CI && CI->getValue().getActiveBits() <= 64) {
1664 IsOneZero = InitVal->isNullValue() && CI->isOne();
1665
1666 auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer());
1667 if (CIInit && CIInit->getValue().getActiveBits() <= 64) {
1668 uint64_t ValInit = CIInit->getZExtValue();
1669 uint64_t ValOther = CI->getZExtValue();
1670 uint64_t ValMinus = ValOther - ValInit;
1671
1672 for(auto *GVe : GVs){
1673 DIGlobalVariable *DGV = GVe->getVariable();
1674 DIExpression *E = GVe->getExpression();
1675 const DataLayout &DL = GV->getParent()->getDataLayout();
1676 unsigned SizeInOctets =
1677 DL.getTypeAllocSizeInBits(NewGV->getType()->getElementType()) / 8;
1678
1679 // It is expected that the address of global optimized variable is on
1680 // top of the stack. After optimization, value of that variable will
1681 // be ether 0 for initial value or 1 for other value. The following
1682 // expression should return constant integer value depending on the
1683 // value at global object address:
1684 // val * (ValOther - ValInit) + ValInit:
1685 // DW_OP_deref DW_OP_constu <ValMinus>
1686 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value
1687 SmallVector<uint64_t, 12> Ops = {
1688 dwarf::DW_OP_deref_size, SizeInOctets,
1689 dwarf::DW_OP_constu, ValMinus,
1690 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit,
1691 dwarf::DW_OP_plus};
1692 bool WithStackValue = true;
1693 E = DIExpression::prependOpcodes(E, Ops, WithStackValue);
1694 DIGlobalVariableExpression *DGVE =
1695 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E);
1696 NewGV->addDebugInfo(DGVE);
1697 }
1698 EmitOneOrZero = false;
1699 }
1700 }
1701
1702 if (EmitOneOrZero) {
1703 // FIXME: This will only emit address for debugger on which will
1704 // be written only 0 or 1.
1705 for(auto *GV : GVs)
1706 NewGV->addDebugInfo(GV);
1707 }
1708
1709 while (!GV->use_empty()) {
1710 Instruction *UI = cast<Instruction>(GV->user_back());
1711 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1712 // Change the store into a boolean store.
1713 bool StoringOther = SI->getOperand(0) == OtherVal;
1714 // Only do this if we weren't storing a loaded value.
1715 Value *StoreVal;
1716 if (StoringOther || SI->getOperand(0) == InitVal) {
1717 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1718 StoringOther);
1719 } else {
1720 // Otherwise, we are storing a previously loaded copy. To do this,
1721 // change the copy from copying the original value to just copying the
1722 // bool.
1723 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1724
1725 // If we've already replaced the input, StoredVal will be a cast or
1726 // select instruction. If not, it will be a load of the original
1727 // global.
1728 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1729 assert(LI->getOperand(0) == GV && "Not a copy!")((LI->getOperand(0) == GV && "Not a copy!") ? static_cast
<void> (0) : __assert_fail ("LI->getOperand(0) == GV && \"Not a copy!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1729, __PRETTY_FUNCTION__))
;
1730 // Insert a new load, to preserve the saved value.
1731 StoreVal = new LoadInst(NewGV->getValueType(), NewGV,
1732 LI->getName() + ".b", false, None,
1733 LI->getOrdering(), LI->getSyncScopeID(), LI);
1734 } else {
1735 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&(((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal
)) && "This is not a form that we understand!") ? static_cast
<void> (0) : __assert_fail ("(isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && \"This is not a form that we understand!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1736, __PRETTY_FUNCTION__))
1736 "This is not a form that we understand!")(((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal
)) && "This is not a form that we understand!") ? static_cast
<void> (0) : __assert_fail ("(isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && \"This is not a form that we understand!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1736, __PRETTY_FUNCTION__))
;
1737 StoreVal = StoredVal->getOperand(0);
1738 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!")((isa<LoadInst>(StoreVal) && "Not a load of NewGV!"
) ? static_cast<void> (0) : __assert_fail ("isa<LoadInst>(StoreVal) && \"Not a load of NewGV!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1738, __PRETTY_FUNCTION__))
;
1739 }
1740 }
1741 StoreInst *NSI =
1742 new StoreInst(StoreVal, NewGV, false, None, SI->getOrdering(),
1743 SI->getSyncScopeID(), SI);
1744 NSI->setDebugLoc(SI->getDebugLoc());
1745 } else {
1746 // Change the load into a load of bool then a select.
1747 LoadInst *LI = cast<LoadInst>(UI);
1748 LoadInst *NLI = new LoadInst(NewGV->getValueType(), NewGV,
1749 LI->getName() + ".b", false, None,
1750 LI->getOrdering(), LI->getSyncScopeID(), LI);
1751 Instruction *NSI;
1752 if (IsOneZero)
1753 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1754 else
1755 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1756 NSI->takeName(LI);
1757 // Since LI is split into two instructions, NLI and NSI both inherit the
1758 // same DebugLoc
1759 NLI->setDebugLoc(LI->getDebugLoc());
1760 NSI->setDebugLoc(LI->getDebugLoc());
1761 LI->replaceAllUsesWith(NSI);
1762 }
1763 UI->eraseFromParent();
1764 }
1765
1766 // Retain the name of the old global variable. People who are debugging their
1767 // programs may expect these variables to be named the same.
1768 NewGV->takeName(GV);
1769 GV->eraseFromParent();
1770 return true;
1771}
1772
1773static bool deleteIfDead(
1774 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
1775 GV.removeDeadConstantUsers();
1776
1777 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration())
1778 return false;
1779
1780 if (const Comdat *C = GV.getComdat())
1781 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1782 return false;
1783
1784 bool Dead;
1785 if (auto *F = dyn_cast<Function>(&GV))
1786 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead();
1787 else
1788 Dead = GV.use_empty();
1789 if (!Dead)
1790 return false;
1791
1792 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "GLOBAL DEAD: " << GV <<
"\n"; } } while (false)
;
1793 GV.eraseFromParent();
1794 ++NumDeleted;
1795 return true;
1796}
1797
1798static bool isPointerValueDeadOnEntryToFunction(
1799 const Function *F, GlobalValue *GV,
1800 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1801 // Find all uses of GV. We expect them all to be in F, and if we can't
1802 // identify any of the uses we bail out.
1803 //
1804 // On each of these uses, identify if the memory that GV points to is
1805 // used/required/live at the start of the function. If it is not, for example
1806 // if the first thing the function does is store to the GV, the GV can
1807 // possibly be demoted.
1808 //
1809 // We don't do an exhaustive search for memory operations - simply look
1810 // through bitcasts as they're quite common and benign.
1811 const DataLayout &DL = GV->getParent()->getDataLayout();
1812 SmallVector<LoadInst *, 4> Loads;
1813 SmallVector<StoreInst *, 4> Stores;
1814 for (auto *U : GV->users()) {
1815 if (Operator::getOpcode(U) == Instruction::BitCast) {
1816 for (auto *UU : U->users()) {
1817 if (auto *LI = dyn_cast<LoadInst>(UU))
1818 Loads.push_back(LI);
1819 else if (auto *SI = dyn_cast<StoreInst>(UU))
1820 Stores.push_back(SI);
1821 else
1822 return false;
1823 }
1824 continue;
1825 }
1826
1827 Instruction *I = dyn_cast<Instruction>(U);
1828 if (!I)
1829 return false;
1830 assert(I->getParent()->getParent() == F)((I->getParent()->getParent() == F) ? static_cast<void
> (0) : __assert_fail ("I->getParent()->getParent() == F"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1830, __PRETTY_FUNCTION__))
;
1831
1832 if (auto *LI = dyn_cast<LoadInst>(I))
1833 Loads.push_back(LI);
1834 else if (auto *SI = dyn_cast<StoreInst>(I))
1835 Stores.push_back(SI);
1836 else
1837 return false;
1838 }
1839
1840 // We have identified all uses of GV into loads and stores. Now check if all
1841 // of them are known not to depend on the value of the global at the function
1842 // entry point. We do this by ensuring that every load is dominated by at
1843 // least one store.
1844 auto &DT = LookupDomTree(*const_cast<Function *>(F));
1845
1846 // The below check is quadratic. Check we're not going to do too many tests.
1847 // FIXME: Even though this will always have worst-case quadratic time, we
1848 // could put effort into minimizing the average time by putting stores that
1849 // have been shown to dominate at least one load at the beginning of the
1850 // Stores array, making subsequent dominance checks more likely to succeed
1851 // early.
1852 //
1853 // The threshold here is fairly large because global->local demotion is a
1854 // very powerful optimization should it fire.
1855 const unsigned Threshold = 100;
1856 if (Loads.size() * Stores.size() > Threshold)
1857 return false;
1858
1859 for (auto *L : Loads) {
1860 auto *LTy = L->getType();
1861 if (none_of(Stores, [&](const StoreInst *S) {
1862 auto *STy = S->getValueOperand()->getType();
1863 // The load is only dominated by the store if DomTree says so
1864 // and the number of bits loaded in L is less than or equal to
1865 // the number of bits stored in S.
1866 return DT.dominates(S, L) &&
1867 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
1868 }))
1869 return false;
1870 }
1871 // All loads have known dependences inside F, so the global can be localized.
1872 return true;
1873}
1874
1875/// C may have non-instruction users. Can all of those users be turned into
1876/// instructions?
1877static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
1878 // We don't do this exhaustively. The most common pattern that we really need
1879 // to care about is a constant GEP or constant bitcast - so just looking
1880 // through one single ConstantExpr.
1881 //
1882 // The set of constants that this function returns true for must be able to be
1883 // handled by makeAllConstantUsesInstructions.
1884 for (auto *U : C->users()) {
1885 if (isa<Instruction>(U))
1886 continue;
1887 if (!isa<ConstantExpr>(U))
1888 // Non instruction, non-constantexpr user; cannot convert this.
1889 return false;
1890 for (auto *UU : U->users())
1891 if (!isa<Instruction>(UU))
1892 // A constantexpr used by another constant. We don't try and recurse any
1893 // further but just bail out at this point.
1894 return false;
1895 }
1896
1897 return true;
1898}
1899
1900/// C may have non-instruction users, and
1901/// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
1902/// non-instruction users to instructions.
1903static void makeAllConstantUsesInstructions(Constant *C) {
1904 SmallVector<ConstantExpr*,4> Users;
1905 for (auto *U : C->users()) {
1906 if (isa<ConstantExpr>(U))
1907 Users.push_back(cast<ConstantExpr>(U));
1908 else
1909 // We should never get here; allNonInstructionUsersCanBeMadeInstructions
1910 // should not have returned true for C.
1911 assert(((isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!"
) ? static_cast<void> (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1913, __PRETTY_FUNCTION__))
1912 isa<Instruction>(U) &&((isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!"
) ? static_cast<void> (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1913, __PRETTY_FUNCTION__))
1913 "Can't transform non-constantexpr non-instruction to instruction!")((isa<Instruction>(U) && "Can't transform non-constantexpr non-instruction to instruction!"
) ? static_cast<void> (0) : __assert_fail ("isa<Instruction>(U) && \"Can't transform non-constantexpr non-instruction to instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 1913, __PRETTY_FUNCTION__))
;
1914 }
1915
1916 SmallVector<Value*,4> UUsers;
1917 for (auto *U : Users) {
1918 UUsers.clear();
1919 for (auto *UU : U->users())
1920 UUsers.push_back(UU);
1921 for (auto *UU : UUsers) {
1922 Instruction *UI = cast<Instruction>(UU);
1923 Instruction *NewU = U->getAsInstruction();
1924 NewU->insertBefore(UI);
1925 UI->replaceUsesOfWith(U, NewU);
1926 }
1927 // We've replaced all the uses, so destroy the constant. (destroyConstant
1928 // will update value handles and metadata.)
1929 U->destroyConstant();
1930 }
1931}
1932
1933/// Analyze the specified global variable and optimize
1934/// it if possible. If we make a change, return true.
1935static bool
1936processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
1937 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
1938 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1939 auto &DL = GV->getParent()->getDataLayout();
1940 // If this is a first class global and has only one accessing function and
1941 // this function is non-recursive, we replace the global with a local alloca
1942 // in this function.
1943 //
1944 // NOTE: It doesn't make sense to promote non-single-value types since we
1945 // are just replacing static memory to stack memory.
1946 //
1947 // If the global is in different address space, don't bring it to stack.
1948 if (!GS.HasMultipleAccessingFunctions &&
1949 GS.AccessingFunction &&
1950 GV->getValueType()->isSingleValueType() &&
1951 GV->getType()->getAddressSpace() == 0 &&
1952 !GV->isExternallyInitialized() &&
1953 allNonInstructionUsersCanBeMadeInstructions(GV) &&
1954 GS.AccessingFunction->doesNotRecurse() &&
1955 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
1956 LookupDomTree)) {
1957 const DataLayout &DL = GV->getParent()->getDataLayout();
1958
1959 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "LOCALIZING GLOBAL: " <<
*GV << "\n"; } } while (false)
;
1960 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1961 ->getEntryBlock().begin());
1962 Type *ElemTy = GV->getValueType();
1963 // FIXME: Pass Global's alignment when globals have alignment
1964 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr,
1965 GV->getName(), &FirstI);
1966 if (!isa<UndefValue>(GV->getInitializer()))
1967 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1968
1969 makeAllConstantUsesInstructions(GV);
1970
1971 GV->replaceAllUsesWith(Alloca);
1972 GV->eraseFromParent();
1973 ++NumLocalized;
1974 return true;
1975 }
1976
1977 // If the global is never loaded (but may be stored to), it is dead.
1978 // Delete it now.
1979 if (!GS.IsLoaded) {
1980 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "GLOBAL NEVER LOADED: " <<
*GV << "\n"; } } while (false)
;
1981
1982 bool Changed;
1983 if (isLeakCheckerRoot(GV)) {
1984 // Delete any constant stores to the global.
1985 Changed = CleanupPointerRootUsers(GV, GetTLI);
1986 } else {
1987 // Delete any stores we can find to the global. We may not be able to
1988 // make it completely dead though.
1989 Changed =
1990 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
1991 }
1992
1993 // If the global is dead now, delete it.
1994 if (GV->use_empty()) {
1995 GV->eraseFromParent();
1996 ++NumDeleted;
1997 Changed = true;
1998 }
1999 return Changed;
2000
2001 }
2002 if (GS.StoredType <= GlobalStatus::InitializerStored) {
2003 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "MARKING CONSTANT: " <<
*GV << "\n"; } } while (false)
;
2004
2005 // Don't actually mark a global constant if it's atomic because atomic loads
2006 // are implemented by a trivial cmpxchg in some edge-cases and that usually
2007 // requires write access to the variable even if it's not actually changed.
2008 if (GS.Ordering == AtomicOrdering::NotAtomic)
2009 GV->setConstant(true);
2010
2011 // Clean up any obviously simplifiable users now.
2012 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
2013
2014 // If the global is dead now, just nuke it.
2015 if (GV->use_empty()) {
2016 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** Marking constant allowed us to simplify "
<< "all users and delete global!\n"; } } while (false)
2017 << "all users and delete global!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** Marking constant allowed us to simplify "
<< "all users and delete global!\n"; } } while (false)
;
2018 GV->eraseFromParent();
2019 ++NumDeleted;
2020 return true;
2021 }
2022
2023 // Fall through to the next check; see if we can optimize further.
2024 ++NumMarked;
2025 }
2026 if (!GV->getInitializer()->getType()->isSingleValueType()) {
2027 const DataLayout &DL = GV->getParent()->getDataLayout();
2028 if (SRAGlobal(GV, DL))
2029 return true;
2030 }
2031 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
2032 // If the initial value for the global was an undef value, and if only
2033 // one other value was stored into it, we can just change the
2034 // initializer to be the stored value, then delete all stores to the
2035 // global. This allows us to mark it constant.
2036 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2037 if (isa<UndefValue>(GV->getInitializer())) {
2038 // Change the initial value here.
2039 GV->setInitializer(SOVConstant);
2040
2041 // Clean up any obviously simplifiable users now.
2042 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
2043
2044 if (GV->use_empty()) {
2045 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** Substituting initializer allowed us to "
<< "simplify all users and delete global!\n"; } } while
(false)
2046 << "simplify all users and delete global!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << " *** Substituting initializer allowed us to "
<< "simplify all users and delete global!\n"; } } while
(false)
;
2047 GV->eraseFromParent();
2048 ++NumDeleted;
2049 }
2050 ++NumSubstitute;
2051 return true;
2052 }
2053
2054 // Try to optimize globals based on the knowledge that only one value
2055 // (besides its initializer) is ever stored to the global.
2056 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL,
2057 GetTLI))
2058 return true;
2059
2060 // Otherwise, if the global was not a boolean, we can shrink it to be a
2061 // boolean.
2062 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
2063 if (GS.Ordering == AtomicOrdering::NotAtomic) {
2064 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
2065 ++NumShrunkToBool;
2066 return true;
2067 }
2068 }
2069 }
2070 }
2071
2072 return false;
2073}
2074
2075/// Analyze the specified global variable and optimize it if possible. If we
2076/// make a change, return true.
2077static bool
2078processGlobal(GlobalValue &GV,
2079 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2080 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2081 if (GV.getName().startswith("llvm."))
2082 return false;
2083
2084 GlobalStatus GS;
2085
2086 if (GlobalStatus::analyzeGlobal(&GV, GS))
2087 return false;
2088
2089 bool Changed = false;
2090 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
2091 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
2092 : GlobalValue::UnnamedAddr::Local;
2093 if (NewUnnamedAddr != GV.getUnnamedAddr()) {
2094 GV.setUnnamedAddr(NewUnnamedAddr);
2095 NumUnnamed++;
2096 Changed = true;
2097 }
2098 }
2099
2100 // Do more involved optimizations if the global is internal.
2101 if (!GV.hasLocalLinkage())
2102 return Changed;
2103
2104 auto *GVar = dyn_cast<GlobalVariable>(&GV);
2105 if (!GVar)
2106 return Changed;
2107
2108 if (GVar->isConstant() || !GVar->hasInitializer())
2109 return Changed;
2110
2111 return processInternalGlobal(GVar, GS, GetTLI, LookupDomTree) || Changed;
2112}
2113
2114/// Walk all of the direct calls of the specified function, changing them to
2115/// FastCC.
2116static void ChangeCalleesToFastCall(Function *F) {
2117 for (User *U : F->users()) {
2118 if (isa<BlockAddress>(U))
2119 continue;
2120 CallSite CS(cast<Instruction>(U));
2121 CS.setCallingConv(CallingConv::Fast);
2122 }
2123}
2124
2125static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs,
2126 Attribute::AttrKind A) {
2127 unsigned AttrIndex;
2128 if (Attrs.hasAttrSomewhere(A, &AttrIndex))
2129 return Attrs.removeAttribute(C, AttrIndex, A);
2130 return Attrs;
2131}
2132
2133static void RemoveAttribute(Function *F, Attribute::AttrKind A) {
2134 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A));
2135 for (User *U : F->users()) {
2136 if (isa<BlockAddress>(U))
2137 continue;
2138 CallSite CS(cast<Instruction>(U));
2139 CS.setAttributes(StripAttr(F->getContext(), CS.getAttributes(), A));
2140 }
2141}
2142
2143/// Return true if this is a calling convention that we'd like to change. The
2144/// idea here is that we don't want to mess with the convention if the user
2145/// explicitly requested something with performance implications like coldcc,
2146/// GHC, or anyregcc.
2147static bool hasChangeableCC(Function *F) {
2148 CallingConv::ID CC = F->getCallingConv();
2149
2150 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
2151 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall)
2152 return false;
2153
2154 // FIXME: Change CC for the whole chain of musttail calls when possible.
2155 //
2156 // Can't change CC of the function that either has musttail calls, or is a
2157 // musttail callee itself
2158 for (User *U : F->users()) {
2159 if (isa<BlockAddress>(U))
2160 continue;
2161 CallInst* CI = dyn_cast<CallInst>(U);
2162 if (!CI)
2163 continue;
2164
2165 if (CI->isMustTailCall())
2166 return false;
2167 }
2168
2169 for (BasicBlock &BB : *F)
2170 if (BB.getTerminatingMustTailCall())
2171 return false;
2172
2173 return true;
2174}
2175
2176/// Return true if the block containing the call site has a BlockFrequency of
2177/// less than ColdCCRelFreq% of the entry block.
2178static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) {
2179 const BranchProbability ColdProb(ColdCCRelFreq, 100);
2180 auto CallSiteBB = CS.getInstruction()->getParent();
2181 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB);
2182 auto CallerEntryFreq =
2183 CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock()));
2184 return CallSiteFreq < CallerEntryFreq * ColdProb;
2185}
2186
2187// This function checks if the input function F is cold at all call sites. It
2188// also looks each call site's containing function, returning false if the
2189// caller function contains other non cold calls. The input vector AllCallsCold
2190// contains a list of functions that only have call sites in cold blocks.
2191static bool
2192isValidCandidateForColdCC(Function &F,
2193 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2194 const std::vector<Function *> &AllCallsCold) {
2195
2196 if (F.user_empty())
2197 return false;
2198
2199 for (User *U : F.users()) {
2200 if (isa<BlockAddress>(U))
2201 continue;
2202
2203 CallSite CS(cast<Instruction>(U));
2204 Function *CallerFunc = CS.getInstruction()->getParent()->getParent();
2205 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
2206 if (!isColdCallSite(CS, CallerBFI))
2207 return false;
2208 auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc);
2209 if (It == AllCallsCold.end())
2210 return false;
2211 }
2212 return true;
2213}
2214
2215static void changeCallSitesToColdCC(Function *F) {
2216 for (User *U : F->users()) {
2217 if (isa<BlockAddress>(U))
2218 continue;
2219 CallSite CS(cast<Instruction>(U));
2220 CS.setCallingConv(CallingConv::Cold);
2221 }
2222}
2223
2224// This function iterates over all the call instructions in the input Function
2225// and checks that all call sites are in cold blocks and are allowed to use the
2226// coldcc calling convention.
2227static bool
2228hasOnlyColdCalls(Function &F,
2229 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) {
2230 for (BasicBlock &BB : F) {
2231 for (Instruction &I : BB) {
2232 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2233 CallSite CS(cast<Instruction>(CI));
2234 // Skip over isline asm instructions since they aren't function calls.
2235 if (CI->isInlineAsm())
2236 continue;
2237 Function *CalledFn = CI->getCalledFunction();
2238 if (!CalledFn)
2239 return false;
2240 if (!CalledFn->hasLocalLinkage())
2241 return false;
2242 // Skip over instrinsics since they won't remain as function calls.
2243 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
2244 continue;
2245 // Check if it's valid to use coldcc calling convention.
2246 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() ||
2247 CalledFn->hasAddressTaken())
2248 return false;
2249 BlockFrequencyInfo &CallerBFI = GetBFI(F);
2250 if (!isColdCallSite(CS, CallerBFI))
2251 return false;
2252 }
2253 }
2254 }
2255 return true;
2256}
2257
2258static bool
2259OptimizeFunctions(Module &M,
2260 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2261 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2262 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2263 function_ref<DominatorTree &(Function &)> LookupDomTree,
2264 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2265
2266 bool Changed = false;
2267
2268 std::vector<Function *> AllCallsCold;
2269 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) {
2270 Function *F = &*FI++;
2271 if (hasOnlyColdCalls(*F, GetBFI))
2272 AllCallsCold.push_back(F);
2273 }
2274
2275 // Optimize functions.
2276 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2277 Function *F = &*FI++;
2278
2279 // Don't perform global opt pass on naked functions; we don't want fast
2280 // calling conventions for naked functions.
2281 if (F->hasFnAttribute(Attribute::Naked))
2282 continue;
2283
2284 // Functions without names cannot be referenced outside this module.
2285 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
2286 F->setLinkage(GlobalValue::InternalLinkage);
2287
2288 if (deleteIfDead(*F, NotDiscardableComdats)) {
2289 Changed = true;
2290 continue;
2291 }
2292
2293 // LLVM's definition of dominance allows instructions that are cyclic
2294 // in unreachable blocks, e.g.:
2295 // %pat = select i1 %condition, @global, i16* %pat
2296 // because any instruction dominates an instruction in a block that's
2297 // not reachable from entry.
2298 // So, remove unreachable blocks from the function, because a) there's
2299 // no point in analyzing them and b) GlobalOpt should otherwise grow
2300 // some more complicated logic to break these cycles.
2301 if (!F->isDeclaration()) {
2302 auto &DT = LookupDomTree(*F);
2303 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2304 Changed |= removeUnreachableBlocks(*F, &DTU);
2305 }
2306
2307 Changed |= processGlobal(*F, GetTLI, LookupDomTree);
2308
2309 if (!F->hasLocalLinkage())
2310 continue;
2311
2312 // If we have an inalloca parameter that we can safely remove the
2313 // inalloca attribute from, do so. This unlocks optimizations that
2314 // wouldn't be safe in the presence of inalloca.
2315 // FIXME: We should also hoist alloca affected by this to the entry
2316 // block if possible.
2317 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca) &&
2318 !F->hasAddressTaken()) {
2319 RemoveAttribute(F, Attribute::InAlloca);
2320 Changed = true;
2321 }
2322
2323 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) {
2324 NumInternalFunc++;
2325 TargetTransformInfo &TTI = GetTTI(*F);
2326 // Change the calling convention to coldcc if either stress testing is
2327 // enabled or the target would like to use coldcc on functions which are
2328 // cold at all call sites and the callers contain no other non coldcc
2329 // calls.
2330 if (EnableColdCCStressTest ||
2331 (TTI.useColdCCForColdCall(*F) &&
2332 isValidCandidateForColdCC(*F, GetBFI, AllCallsCold))) {
2333 F->setCallingConv(CallingConv::Cold);
2334 changeCallSitesToColdCC(F);
2335 Changed = true;
2336 NumColdCC++;
2337 }
2338 }
2339
2340 if (hasChangeableCC(F) && !F->isVarArg() &&
2341 !F->hasAddressTaken()) {
2342 // If this function has a calling convention worth changing, is not a
2343 // varargs function, and is only called directly, promote it to use the
2344 // Fast calling convention.
2345 F->setCallingConv(CallingConv::Fast);
2346 ChangeCalleesToFastCall(F);
2347 ++NumFastCallFns;
2348 Changed = true;
2349 }
2350
2351 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2352 !F->hasAddressTaken()) {
2353 // The function is not used by a trampoline intrinsic, so it is safe
2354 // to remove the 'nest' attribute.
2355 RemoveAttribute(F, Attribute::Nest);
2356 ++NumNestRemoved;
2357 Changed = true;
2358 }
2359 }
2360 return Changed;
2361}
2362
2363static bool
2364OptimizeGlobalVars(Module &M,
2365 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2366 function_ref<DominatorTree &(Function &)> LookupDomTree,
2367 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2368 bool Changed = false;
2369
2370 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2371 GVI != E; ) {
2372 GlobalVariable *GV = &*GVI++;
2373 // Global variables without names cannot be referenced outside this module.
2374 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
2375 GV->setLinkage(GlobalValue::InternalLinkage);
2376 // Simplify the initializer.
2377 if (GV->hasInitializer())
2378 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) {
2379 auto &DL = M.getDataLayout();
2380 // TLI is not used in the case of a Constant, so use default nullptr
2381 // for that optional parameter, since we don't have a Function to
2382 // provide GetTLI anyway.
2383 Constant *New = ConstantFoldConstant(C, DL, /*TLI*/ nullptr);
2384 if (New && New != C)
2385 GV->setInitializer(New);
2386 }
2387
2388 if (deleteIfDead(*GV, NotDiscardableComdats)) {
2389 Changed = true;
2390 continue;
2391 }
2392
2393 Changed |= processGlobal(*GV, GetTLI, LookupDomTree);
2394 }
2395 return Changed;
2396}
2397
2398/// Evaluate a piece of a constantexpr store into a global initializer. This
2399/// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
2400/// GEP operands of Addr [0, OpNo) have been stepped into.
2401static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2402 ConstantExpr *Addr, unsigned OpNo) {
2403 // Base case of the recursion.
2404 if (OpNo == Addr->getNumOperands()) {
2405 assert(Val->getType() == Init->getType() && "Type mismatch!")((Val->getType() == Init->getType() && "Type mismatch!"
) ? static_cast<void> (0) : __assert_fail ("Val->getType() == Init->getType() && \"Type mismatch!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2405, __PRETTY_FUNCTION__))
;
2406 return Val;
2407 }
2408
2409 SmallVector<Constant*, 32> Elts;
2410 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2411 // Break up the constant into its elements.
2412 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2413 Elts.push_back(Init->getAggregateElement(i));
2414
2415 // Replace the element that we are supposed to.
2416 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2417 unsigned Idx = CU->getZExtValue();
2418 assert(Idx < STy->getNumElements() && "Struct index out of range!")((Idx < STy->getNumElements() && "Struct index out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < STy->getNumElements() && \"Struct index out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2418, __PRETTY_FUNCTION__))
;
2419 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2420
2421 // Return the modified struct.
2422 return ConstantStruct::get(STy, Elts);
2423 }
2424
2425 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2426 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2427 uint64_t NumElts = InitTy->getNumElements();
2428
2429 // Break up the array into elements.
2430 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2431 Elts.push_back(Init->getAggregateElement(i));
2432
2433 assert(CI->getZExtValue() < NumElts)((CI->getZExtValue() < NumElts) ? static_cast<void>
(0) : __assert_fail ("CI->getZExtValue() < NumElts", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2433, __PRETTY_FUNCTION__))
;
2434 Elts[CI->getZExtValue()] =
2435 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2436
2437 if (Init->getType()->isArrayTy())
2438 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2439 return ConstantVector::get(Elts);
2440}
2441
2442/// We have decided that Addr (which satisfies the predicate
2443/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2444static void CommitValueTo(Constant *Val, Constant *Addr) {
2445 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2446 assert(GV->hasInitializer())((GV->hasInitializer()) ? static_cast<void> (0) : __assert_fail
("GV->hasInitializer()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2446, __PRETTY_FUNCTION__))
;
2447 GV->setInitializer(Val);
2448 return;
2449 }
2450
2451 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2452 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2453 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2454}
2455
2456/// Given a map of address -> value, where addresses are expected to be some form
2457/// of either a global or a constant GEP, set the initializer for the address to
2458/// be the value. This performs mostly the same function as CommitValueTo()
2459/// and EvaluateStoreInto() but is optimized to be more efficient for the common
2460/// case where the set of addresses are GEPs sharing the same underlying global,
2461/// processing the GEPs in batches rather than individually.
2462///
2463/// To give an example, consider the following C++ code adapted from the clang
2464/// regression tests:
2465/// struct S {
2466/// int n = 10;
2467/// int m = 2 * n;
2468/// S(int a) : n(a) {}
2469/// };
2470///
2471/// template<typename T>
2472/// struct U {
2473/// T *r = &q;
2474/// T q = 42;
2475/// U *p = this;
2476/// };
2477///
2478/// U<S> e;
2479///
2480/// The global static constructor for 'e' will need to initialize 'r' and 'p' of
2481/// the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
2482/// members. This batch algorithm will simply use general CommitValueTo() method
2483/// to handle the complex nested S struct initialization of 'q', before
2484/// processing the outermost members in a single batch. Using CommitValueTo() to
2485/// handle member in the outer struct is inefficient when the struct/array is
2486/// very large as we end up creating and destroy constant arrays for each
2487/// initialization.
2488/// For the above case, we expect the following IR to be generated:
2489///
2490/// %struct.U = type { %struct.S*, %struct.S, %struct.U* }
2491/// %struct.S = type { i32, i32 }
2492/// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
2493/// i64 0, i32 1),
2494/// %struct.S { i32 42, i32 84 }, %struct.U* @e }
2495/// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
2496/// constant expression, while the other two elements of @e are "simple".
2497static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
2498 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs;
2499 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs;
2500 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs;
2501 SimpleCEs.reserve(Mem.size());
2502
2503 for (const auto &I : Mem) {
2504 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) {
2505 GVs.push_back(std::make_pair(GV, I.second));
2506 } else {
2507 ConstantExpr *GEP = cast<ConstantExpr>(I.first);
2508 // We don't handle the deeply recursive case using the batch method.
2509 if (GEP->getNumOperands() > 3)
2510 ComplexCEs.push_back(std::make_pair(GEP, I.second));
2511 else
2512 SimpleCEs.push_back(std::make_pair(GEP, I.second));
2513 }
2514 }
2515
2516 // The algorithm below doesn't handle cases like nested structs, so use the
2517 // slower fully general method if we have to.
2518 for (auto ComplexCE : ComplexCEs)
7
Assuming '__begin1' is equal to '__end1'
2519 CommitValueTo(ComplexCE.second, ComplexCE.first);
2520
2521 for (auto GVPair : GVs) {
8
Assuming '__begin1' is equal to '__end1'
2522 assert(GVPair.first->hasInitializer())((GVPair.first->hasInitializer()) ? static_cast<void>
(0) : __assert_fail ("GVPair.first->hasInitializer()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2522, __PRETTY_FUNCTION__))
;
2523 GVPair.first->setInitializer(GVPair.second);
2524 }
2525
2526 if (SimpleCEs.empty())
9
Calling 'SmallVectorBase::empty'
12
Returning from 'SmallVectorBase::empty'
13
Taking false branch
2527 return;
2528
2529 // We cache a single global's initializer elements in the case where the
2530 // subsequent address/val pair uses the same one. This avoids throwing away and
2531 // rebuilding the constant struct/vector/array just because one element is
2532 // modified at a time.
2533 SmallVector<Constant *, 32> Elts;
2534 Elts.reserve(SimpleCEs.size());
2535 GlobalVariable *CurrentGV = nullptr;
14
'CurrentGV' initialized to a null pointer value
2536
2537 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) {
2538 Constant *Init = GV->getInitializer();
18
Called C++ object pointer is null
2539 Type *Ty = Init->getType();
2540 if (Update) {
2541 if (CurrentGV) {
2542 assert(CurrentGV && "Expected a GV to commit to!")((CurrentGV && "Expected a GV to commit to!") ? static_cast
<void> (0) : __assert_fail ("CurrentGV && \"Expected a GV to commit to!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2542, __PRETTY_FUNCTION__))
;
2543 Type *CurrentInitTy = CurrentGV->getInitializer()->getType();
2544 // We have a valid cache that needs to be committed.
2545 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy))
2546 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts));
2547 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy))
2548 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts));
2549 else
2550 CurrentGV->setInitializer(ConstantVector::get(Elts));
2551 }
2552 if (CurrentGV == GV)
2553 return;
2554 // Need to clear and set up cache for new initializer.
2555 CurrentGV = GV;
2556 Elts.clear();
2557 unsigned NumElts;
2558 if (auto *STy = dyn_cast<StructType>(Ty))
2559 NumElts = STy->getNumElements();
2560 else
2561 NumElts = cast<SequentialType>(Ty)->getNumElements();
2562 for (unsigned i = 0, e = NumElts; i != e; ++i)
2563 Elts.push_back(Init->getAggregateElement(i));
2564 }
2565 };
2566
2567 for (auto CEPair : SimpleCEs) {
15
Assuming '__begin1' is equal to '__end1'
2568 ConstantExpr *GEP = CEPair.first;
2569 Constant *Val = CEPair.second;
2570
2571 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0));
2572 commitAndSetupCache(GV, GV != CurrentGV);
2573 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2));
2574 Elts[CI->getZExtValue()] = Val;
2575 }
2576 // The last initializer in the list needs to be committed, others
2577 // will be committed on a new initializer being processed.
2578 commitAndSetupCache(CurrentGV, true);
16
Passing null pointer value via 1st parameter 'GV'
17
Calling 'operator()'
2579}
2580
2581/// Evaluate static constructors in the function, if we can. Return true if we
2582/// can, false otherwise.
2583static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2584 TargetLibraryInfo *TLI) {
2585 // Call the function.
2586 Evaluator Eval(DL, TLI);
2587 Constant *RetValDummy;
2588 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2589 SmallVector<Constant*, 0>());
2590
2591 if (EvalSuccess) {
2
Assuming 'EvalSuccess' is true
3
Taking true branch
2592 ++NumCtorsEvaluated;
2593
2594 // We succeeded at evaluation: commit the result.
2595 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
<< F->getName() << "' to " << Eval.getMutatedMemory
().size() << " stores.\n"; } } while (false)
4
Assuming 'DebugFlag' is false
5
Loop condition is false. Exiting loop
2596 << F->getName() << "' to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
<< F->getName() << "' to " << Eval.getMutatedMemory
().size() << " stores.\n"; } } while (false)
2597 << Eval.getMutatedMemory().size() << " stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalopt")) { dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
<< F->getName() << "' to " << Eval.getMutatedMemory
().size() << " stores.\n"; } } while (false)
;
2598 BatchCommitValueTo(Eval.getMutatedMemory());
6
Calling 'BatchCommitValueTo'
2599 for (GlobalVariable *GV : Eval.getInvariants())
2600 GV->setConstant(true);
2601 }
2602
2603 return EvalSuccess;
2604}
2605
2606static int compareNames(Constant *const *A, Constant *const *B) {
2607 Value *AStripped = (*A)->stripPointerCasts();
2608 Value *BStripped = (*B)->stripPointerCasts();
2609 return AStripped->getName().compare(BStripped->getName());
2610}
2611
2612static void setUsedInitializer(GlobalVariable &V,
2613 const SmallPtrSetImpl<GlobalValue *> &Init) {
2614 if (Init.empty()) {
2615 V.eraseFromParent();
2616 return;
2617 }
2618
2619 // Type of pointer to the array of pointers.
2620 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2621
2622 SmallVector<Constant *, 8> UsedArray;
2623 for (GlobalValue *GV : Init) {
2624 Constant *Cast
2625 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2626 UsedArray.push_back(Cast);
2627 }
2628 // Sort to get deterministic order.
2629 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2630 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2631
2632 Module *M = V.getParent();
2633 V.removeFromParent();
2634 GlobalVariable *NV =
2635 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
2636 ConstantArray::get(ATy, UsedArray), "");
2637 NV->takeName(&V);
2638 NV->setSection("llvm.metadata");
2639 delete &V;
2640}
2641
2642namespace {
2643
2644/// An easy to access representation of llvm.used and llvm.compiler.used.
2645class LLVMUsed {
2646 SmallPtrSet<GlobalValue *, 8> Used;
2647 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2648 GlobalVariable *UsedV;
2649 GlobalVariable *CompilerUsedV;
2650
2651public:
2652 LLVMUsed(Module &M) {
2653 UsedV = collectUsedGlobalVariables(M, Used, false);
2654 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2655 }
2656
2657 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator;
2658 using used_iterator_range = iterator_range<iterator>;
2659
2660 iterator usedBegin() { return Used.begin(); }
2661 iterator usedEnd() { return Used.end(); }
2662
2663 used_iterator_range used() {
2664 return used_iterator_range(usedBegin(), usedEnd());
2665 }
2666
2667 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2668 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2669
2670 used_iterator_range compilerUsed() {
2671 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2672 }
2673
2674 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2675
2676 bool compilerUsedCount(GlobalValue *GV) const {
2677 return CompilerUsed.count(GV);
2678 }
2679
2680 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2681 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2682 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2683
2684 bool compilerUsedInsert(GlobalValue *GV) {
2685 return CompilerUsed.insert(GV).second;
2686 }
2687
2688 void syncVariablesAndSets() {
2689 if (UsedV)
2690 setUsedInitializer(*UsedV, Used);
2691 if (CompilerUsedV)
2692 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2693 }
2694};
2695
2696} // end anonymous namespace
2697
2698static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2699 if (GA.use_empty()) // No use at all.
2700 return false;
2701
2702 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&(((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2704, __PRETTY_FUNCTION__))
2703 "We should have removed the duplicated "(((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2704, __PRETTY_FUNCTION__))
2704 "element from llvm.compiler.used")(((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2704, __PRETTY_FUNCTION__))
;
2705 if (!GA.hasOneUse())
2706 // Strictly more than one use. So at least one is not in llvm.used and
2707 // llvm.compiler.used.
2708 return true;
2709
2710 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2711 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2712}
2713
2714static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2715 const LLVMUsed &U) {
2716 unsigned N = 2;
2717 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&(((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2719, __PRETTY_FUNCTION__))
2718 "We should have removed the duplicated "(((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2719, __PRETTY_FUNCTION__))
2719 "element from llvm.compiler.used")(((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
"We should have removed the duplicated " "element from llvm.compiler.used"
) ? static_cast<void> (0) : __assert_fail ("(!U.usedCount(&V) || !U.compilerUsedCount(&V)) && \"We should have removed the duplicated \" \"element from llvm.compiler.used\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/IPO/GlobalOpt.cpp"
, 2719, __PRETTY_FUNCTION__))
;
2720 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2721 ++N;
2722 return V.hasNUsesOrMore(N);
2723}
2724
2725static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2726 if (!GA.hasLocalLinkage())
2727 return true;
2728
2729 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2730}
2731
2732static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2733 bool &RenameTarget) {
2734 RenameTarget = false;
2735 bool Ret = false;
2736 if (hasUseOtherThanLLVMUsed(GA, U))
2737 Ret = true;
2738
2739 // If the alias is externally visible, we may still be able to simplify it.
2740 if (!mayHaveOtherReferences(GA, U))
2741 return Ret;
2742
2743 // If the aliasee has internal linkage, give it the name and linkage
2744 // of the alias, and delete the alias. This turns:
2745 // define internal ... @f(...)
2746 // @a = alias ... @f
2747 // into:
2748 // define ... @a(...)
2749 Constant *Aliasee = GA.getAliasee();
2750 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2751 if (!Target->hasLocalLinkage())
2752 return Ret;
2753
2754 // Do not perform the transform if multiple aliases potentially target the
2755 // aliasee. This check also ensures that it is safe to replace the section
2756 // and other attributes of the aliasee with those of the alias.
2757 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2758 return Ret;
2759
2760 RenameTarget = true;
2761 return true;
2762}
2763
2764static bool
2765OptimizeGlobalAliases(Module &M,
2766 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2767 bool Changed = false;
2768 LLVMUsed Used(M);
2769
2770 for (GlobalValue *GV : Used.used())
2771 Used.compilerUsedErase(GV);
2772
2773 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2774 I != E;) {
2775 GlobalAlias *J = &*I++;
2776
2777 // Aliases without names cannot be referenced outside this module.
2778 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
2779 J->setLinkage(GlobalValue::InternalLinkage);
2780
2781 if (deleteIfDead(*J, NotDiscardableComdats)) {
2782 Changed = true;
2783 continue;
2784 }
2785
2786 // If the alias can change at link time, nothing can be done - bail out.
2787 if (J->isInterposable())
2788 continue;
2789
2790 Constant *Aliasee = J->getAliasee();
2791 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
2792 // We can't trivially replace the alias with the aliasee if the aliasee is
2793 // non-trivial in some way.
2794 // TODO: Try to handle non-zero GEPs of local aliasees.
2795 if (!Target)
2796 continue;
2797 Target->removeDeadConstantUsers();
2798
2799 // Make all users of the alias use the aliasee instead.
2800 bool RenameTarget;
2801 if (!hasUsesToReplace(*J, Used, RenameTarget))
2802 continue;
2803
2804 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
2805 ++NumAliasesResolved;
2806 Changed = true;
2807
2808 if (RenameTarget) {
2809 // Give the aliasee the name, linkage and other attributes of the alias.
2810 Target->takeName(&*J);
2811 Target->setLinkage(J->getLinkage());
2812 Target->setDSOLocal(J->isDSOLocal());
2813 Target->setVisibility(J->getVisibility());
2814 Target->setDLLStorageClass(J->getDLLStorageClass());
2815
2816 if (Used.usedErase(&*J))
2817 Used.usedInsert(Target);
2818
2819 if (Used.compilerUsedErase(&*J))
2820 Used.compilerUsedInsert(Target);
2821 } else if (mayHaveOtherReferences(*J, Used))
2822 continue;
2823
2824 // Delete the alias.
2825 M.getAliasList().erase(J);
2826 ++NumAliasesRemoved;
2827 Changed = true;
2828 }
2829
2830 Used.syncVariablesAndSets();
2831
2832 return Changed;
2833}
2834
2835static Function *
2836FindCXAAtExit(Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
2837 // Hack to get a default TLI before we have actual Function.
2838 auto FuncIter = M.begin();
2839 if (FuncIter == M.end())
2840 return nullptr;
2841 auto *TLI = &GetTLI(*FuncIter);
2842
2843 LibFunc F = LibFunc_cxa_atexit;
2844 if (!TLI->has(F))
2845 return nullptr;
2846
2847 Function *Fn = M.getFunction(TLI->getName(F));
2848 if (!Fn)
2849 return nullptr;
2850
2851 // Now get the actual TLI for Fn.
2852 TLI = &GetTLI(*Fn);
2853
2854 // Make sure that the function has the correct prototype.
2855 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit)
2856 return nullptr;
2857
2858 return Fn;
2859}
2860
2861/// Returns whether the given function is an empty C++ destructor and can
2862/// therefore be eliminated.
2863/// Note that we assume that other optimization passes have already simplified
2864/// the code so we simply check for 'ret'.
2865static bool cxxDtorIsEmpty(const Function &Fn) {
2866 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2867 // nounwind, but that doesn't seem worth doing.
2868 if (Fn.isDeclaration())
2869 return false;
2870
2871 for (auto &I : Fn.getEntryBlock()) {
2872 if (isa<DbgInfoIntrinsic>(I))
2873 continue;
2874 if (isa<ReturnInst>(I))
2875 return true;
2876 break;
2877 }
2878 return false;
2879}
2880
2881static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
2882 /// Itanium C++ ABI p3.3.5:
2883 ///
2884 /// After constructing a global (or local static) object, that will require
2885 /// destruction on exit, a termination function is registered as follows:
2886 ///
2887 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2888 ///
2889 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2890 /// call f(p) when DSO d is unloaded, before all such termination calls
2891 /// registered before this one. It returns zero if registration is
2892 /// successful, nonzero on failure.
2893
2894 // This pass will look for calls to __cxa_atexit where the function is trivial
2895 // and remove them.
2896 bool Changed = false;
2897
2898 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
2899 I != E;) {
2900 // We're only interested in calls. Theoretically, we could handle invoke
2901 // instructions as well, but neither llvm-gcc nor clang generate invokes
2902 // to __cxa_atexit.
2903 CallInst *CI = dyn_cast<CallInst>(*I++);
2904 if (!CI)
2905 continue;
2906
2907 Function *DtorFn =
2908 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2909 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn))
2910 continue;
2911
2912 // Just remove the call.
2913 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2914 CI->eraseFromParent();
2915
2916 ++NumCXXDtorsRemoved;
2917
2918 Changed |= true;
2919 }
2920
2921 return Changed;
2922}
2923
2924static bool optimizeGlobalsInModule(
2925 Module &M, const DataLayout &DL,
2926 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2927 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2928 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2929 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2930 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats;
2931 bool Changed = false;
2932 bool LocalChange = true;
2933 while (LocalChange) {
2934 LocalChange = false;
2935
2936 NotDiscardableComdats.clear();
2937 for (const GlobalVariable &GV : M.globals())
2938 if (const Comdat *C = GV.getComdat())
2939 if (!GV.isDiscardableIfUnused() || !GV.use_empty())
2940 NotDiscardableComdats.insert(C);
2941 for (Function &F : M)
2942 if (const Comdat *C = F.getComdat())
2943 if (!F.isDefTriviallyDead())
2944 NotDiscardableComdats.insert(C);
2945 for (GlobalAlias &GA : M.aliases())
2946 if (const Comdat *C = GA.getComdat())
2947 if (!GA.isDiscardableIfUnused() || !GA.use_empty())
2948 NotDiscardableComdats.insert(C);
2949
2950 // Delete functions that are trivially dead, ccc -> fastcc
2951 LocalChange |= OptimizeFunctions(M, GetTLI, GetTTI, GetBFI, LookupDomTree,
2952 NotDiscardableComdats);
2953
2954 // Optimize global_ctors list.
2955 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
2956 return EvaluateStaticConstructor(F, DL, &GetTLI(*F));
1
Calling 'EvaluateStaticConstructor'
2957 });
2958
2959 // Optimize non-address-taken globals.
2960 LocalChange |=
2961 OptimizeGlobalVars(M, GetTLI, LookupDomTree, NotDiscardableComdats);
2962
2963 // Resolve aliases, when possible.
2964 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
2965
2966 // Try to remove trivial global destructors if they are not removed
2967 // already.
2968 Function *CXAAtExitFn = FindCXAAtExit(M, GetTLI);
2969 if (CXAAtExitFn)
2970 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
2971
2972 Changed |= LocalChange;
2973 }
2974
2975 // TODO: Move all global ctors functions to the end of the module for code
2976 // layout.
2977
2978 return Changed;
2979}
2980
2981PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) {
2982 auto &DL = M.getDataLayout();
2983 auto &FAM =
2984 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2985 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
2986 return FAM.getResult<DominatorTreeAnalysis>(F);
2987 };
2988 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
2989 return FAM.getResult<TargetLibraryAnalysis>(F);
2990 };
2991 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & {
2992 return FAM.getResult<TargetIRAnalysis>(F);
2993 };
2994
2995 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & {
2996 return FAM.getResult<BlockFrequencyAnalysis>(F);
2997 };
2998
2999 if (!optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree))
3000 return PreservedAnalyses::all();
3001 return PreservedAnalyses::none();
3002}
3003
3004namespace {
3005
3006struct GlobalOptLegacyPass : public ModulePass {
3007 static char ID; // Pass identification, replacement for typeid
3008
3009 GlobalOptLegacyPass() : ModulePass(ID) {
3010 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry());
3011 }
3012
3013 bool runOnModule(Module &M) override {
3014 if (skipModule(M))
3015 return false;
3016
3017 auto &DL = M.getDataLayout();
3018 auto LookupDomTree = [this](Function &F) -> DominatorTree & {
3019 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
3020 };
3021 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
3022 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3023 };
3024 auto GetTTI = [this](Function &F) -> TargetTransformInfo & {
3025 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3026 };
3027
3028 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & {
3029 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI();
3030 };
3031
3032 return optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI,
3033 LookupDomTree);
3034 }
3035
3036 void getAnalysisUsage(AnalysisUsage &AU) const override {
3037 AU.addRequired<TargetLibraryInfoWrapperPass>();
3038 AU.addRequired<TargetTransformInfoWrapperPass>();
3039 AU.addRequired<DominatorTreeWrapperPass>();
3040 AU.addRequired<BlockFrequencyInfoWrapperPass>();
3041 }
3042};
3043
3044} // end anonymous namespace
3045
3046char GlobalOptLegacyPass::ID = 0;
3047
3048INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",static void *initializeGlobalOptLegacyPassPassOnce(PassRegistry
&Registry) {
3049 "Global Variable Optimizer", false, false)static void *initializeGlobalOptLegacyPassPassOnce(PassRegistry
&Registry) {
3050INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
3051INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
3052INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry);
3053INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
3054INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",PassInfo *PI = new PassInfo( "Global Variable Optimizer", "globalopt"
, &GlobalOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<GlobalOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeGlobalOptLegacyPassPassFlag
; void llvm::initializeGlobalOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeGlobalOptLegacyPassPassFlag
, initializeGlobalOptLegacyPassPassOnce, std::ref(Registry));
}
3055 "Global Variable Optimizer", false, false)PassInfo *PI = new PassInfo( "Global Variable Optimizer", "globalopt"
, &GlobalOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<GlobalOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeGlobalOptLegacyPassPassFlag
; void llvm::initializeGlobalOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeGlobalOptLegacyPassPassFlag
, initializeGlobalOptLegacyPassPassOnce, std::ref(Registry));
}
3056
3057ModulePass *llvm::createGlobalOptimizerPass() {
3058 return new GlobalOptLegacyPass();
3059}

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/AlignOf.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/MemAlloc.h"
21#include "llvm/Support/type_traits.h"
22#include "llvm/Support/ErrorHandling.h"
23#include <algorithm>
24#include <cassert>
25#include <cstddef>
26#include <cstdlib>
27#include <cstring>
28#include <initializer_list>
29#include <iterator>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the non-templated stuff common to all SmallVectors.
38class SmallVectorBase {
39protected:
40 void *BeginX;
41 unsigned Size = 0, Capacity;
42
43 SmallVectorBase() = delete;
44 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
45 : BeginX(FirstEl), Capacity(TotalCapacity) {}
46
47 /// This is an implementation of the grow() method which only works
48 /// on POD-like data types and is out of line to reduce code duplication.
49 void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize);
50
51public:
52 size_t size() const { return Size; }
53 size_t capacity() const { return Capacity; }
54
55 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
10
Assuming field 'Size' is not equal to 0
11
Returning zero, which participates in a condition later
56
57 /// Set the array size to \p N, which the current array must have enough
58 /// capacity for.
59 ///
60 /// This does not construct or destroy any elements in the vector.
61 ///
62 /// Clients can use this in conjunction with capacity() to write past the end
63 /// of the buffer when they know that more elements are available, and only
64 /// update the size later. This avoids the cost of value initializing elements
65 /// which will only be overwritten.
66 void set_size(size_t N) {
67 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 67, __PRETTY_FUNCTION__))
;
68 Size = N;
69 }
70};
71
72/// Figure out the offset of the first element.
73template <class T, typename = void> struct SmallVectorAlignmentAndSize {
74 AlignedCharArrayUnion<SmallVectorBase> Base;
75 AlignedCharArrayUnion<T> FirstEl;
76};
77
78/// This is the part of SmallVectorTemplateBase which does not depend on whether
79/// the type T is a POD. The extra dummy template argument is used by ArrayRef
80/// to avoid unnecessarily requiring T to be complete.
81template <typename T, typename = void>
82class SmallVectorTemplateCommon : public SmallVectorBase {
83 /// Find the address of the first element. For this pointer math to be valid
84 /// with small-size of 0 for T with lots of alignment, it's important that
85 /// SmallVectorStorage is properly-aligned even for small-size of 0.
86 void *getFirstEl() const {
87 return const_cast<void *>(reinterpret_cast<const void *>(
88 reinterpret_cast<const char *>(this) +
89 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
90 }
91 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
92
93protected:
94 SmallVectorTemplateCommon(size_t Size)
95 : SmallVectorBase(getFirstEl(), Size) {}
96
97 void grow_pod(size_t MinCapacity, size_t TSize) {
98 SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize);
99 }
100
101 /// Return true if this is a smallvector which has not had dynamic
102 /// memory allocated for it.
103 bool isSmall() const { return BeginX == getFirstEl(); }
104
105 /// Put this vector in a state of being small.
106 void resetToSmall() {
107 BeginX = getFirstEl();
108 Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
109 }
110
111public:
112 using size_type = size_t;
113 using difference_type = ptrdiff_t;
114 using value_type = T;
115 using iterator = T *;
116 using const_iterator = const T *;
117
118 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
119 using reverse_iterator = std::reverse_iterator<iterator>;
120
121 using reference = T &;
122 using const_reference = const T &;
123 using pointer = T *;
124 using const_pointer = const T *;
125
126 // forward iterator creation methods.
127 iterator begin() { return (iterator)this->BeginX; }
128 const_iterator begin() const { return (const_iterator)this->BeginX; }
129 iterator end() { return begin() + size(); }
130 const_iterator end() const { return begin() + size(); }
131
132 // reverse iterator creation methods.
133 reverse_iterator rbegin() { return reverse_iterator(end()); }
134 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
135 reverse_iterator rend() { return reverse_iterator(begin()); }
136 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
137
138 size_type size_in_bytes() const { return size() * sizeof(T); }
139 size_type max_size() const { return size_type(-1) / sizeof(T); }
140
141 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
142
143 /// Return a pointer to the vector's buffer, even if empty().
144 pointer data() { return pointer(begin()); }
145 /// Return a pointer to the vector's buffer, even if empty().
146 const_pointer data() const { return const_pointer(begin()); }
147
148 reference operator[](size_type idx) {
149 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 149, __PRETTY_FUNCTION__))
;
150 return begin()[idx];
151 }
152 const_reference operator[](size_type idx) const {
153 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 153, __PRETTY_FUNCTION__))
;
154 return begin()[idx];
155 }
156
157 reference front() {
158 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 158, __PRETTY_FUNCTION__))
;
159 return begin()[0];
160 }
161 const_reference front() const {
162 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 162, __PRETTY_FUNCTION__))
;
163 return begin()[0];
164 }
165
166 reference back() {
167 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 167, __PRETTY_FUNCTION__))
;
168 return end()[-1];
169 }
170 const_reference back() const {
171 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 171, __PRETTY_FUNCTION__))
;
172 return end()[-1];
173 }
174};
175
176/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method
177/// implementations that are designed to work with non-POD-like T's.
178template <typename T, bool = is_trivially_copyable<T>::value>
179class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
180protected:
181 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
182
183 static void destroy_range(T *S, T *E) {
184 while (S != E) {
185 --E;
186 E->~T();
187 }
188 }
189
190 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
191 /// constructing elements as needed.
192 template<typename It1, typename It2>
193 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
194 std::uninitialized_copy(std::make_move_iterator(I),
195 std::make_move_iterator(E), Dest);
196 }
197
198 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
199 /// constructing elements as needed.
200 template<typename It1, typename It2>
201 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
202 std::uninitialized_copy(I, E, Dest);
203 }
204
205 /// Grow the allocated memory (without initializing new elements), doubling
206 /// the size of the allocated memory. Guarantees space for at least one more
207 /// element, or MinSize more elements if specified.
208 void grow(size_t MinSize = 0);
209
210public:
211 void push_back(const T &Elt) {
212 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
213 this->grow();
214 ::new ((void*) this->end()) T(Elt);
215 this->set_size(this->size() + 1);
216 }
217
218 void push_back(T &&Elt) {
219 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
220 this->grow();
221 ::new ((void*) this->end()) T(::std::move(Elt));
222 this->set_size(this->size() + 1);
223 }
224
225 void pop_back() {
226 this->set_size(this->size() - 1);
227 this->end()->~T();
228 }
229};
230
231// Define this out-of-line to dissuade the C++ compiler from inlining it.
232template <typename T, bool TriviallyCopyable>
233void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
234 if (MinSize > UINT32_MAX(4294967295U))
235 report_bad_alloc_error("SmallVector capacity overflow during allocation");
236
237 // Always grow, even from zero.
238 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
239 NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX(4294967295U)));
240 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
241
242 // Move the elements over.
243 this->uninitialized_move(this->begin(), this->end(), NewElts);
244
245 // Destroy the original elements.
246 destroy_range(this->begin(), this->end());
247
248 // If this wasn't grown from the inline copy, deallocate the old space.
249 if (!this->isSmall())
250 free(this->begin());
251
252 this->BeginX = NewElts;
253 this->Capacity = NewCapacity;
254}
255
256/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
257/// method implementations that are designed to work with POD-like T's.
258template <typename T>
259class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
260protected:
261 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
262
263 // No need to do a destroy loop for POD's.
264 static void destroy_range(T *, T *) {}
265
266 /// Move the range [I, E) onto the uninitialized memory
267 /// starting with "Dest", constructing elements into it as needed.
268 template<typename It1, typename It2>
269 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
270 // Just do a copy.
271 uninitialized_copy(I, E, Dest);
272 }
273
274 /// Copy the range [I, E) onto the uninitialized memory
275 /// starting with "Dest", constructing elements into it as needed.
276 template<typename It1, typename It2>
277 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
278 // Arbitrary iterator types; just use the basic implementation.
279 std::uninitialized_copy(I, E, Dest);
280 }
281
282 /// Copy the range [I, E) onto the uninitialized memory
283 /// starting with "Dest", constructing elements into it as needed.
284 template <typename T1, typename T2>
285 static void uninitialized_copy(
286 T1 *I, T1 *E, T2 *Dest,
287 typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
288 T2>::value>::type * = nullptr) {
289 // Use memcpy for PODs iterated by pointers (which includes SmallVector
290 // iterators): std::uninitialized_copy optimizes to memmove, but we can
291 // use memcpy here. Note that I and E are iterators and thus might be
292 // invalid for memcpy if they are equal.
293 if (I != E)
294 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
295 }
296
297 /// Double the size of the allocated memory, guaranteeing space for at
298 /// least one more element or MinSize if specified.
299 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
300
301public:
302 void push_back(const T &Elt) {
303 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
304 this->grow();
305 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
306 this->set_size(this->size() + 1);
307 }
308
309 void pop_back() { this->set_size(this->size() - 1); }
310};
311
312/// This class consists of common code factored out of the SmallVector class to
313/// reduce code duplication based on the SmallVector 'N' template parameter.
314template <typename T>
315class SmallVectorImpl : public SmallVectorTemplateBase<T> {
316 using SuperClass = SmallVectorTemplateBase<T>;
317
318public:
319 using iterator = typename SuperClass::iterator;
320 using const_iterator = typename SuperClass::const_iterator;
321 using reference = typename SuperClass::reference;
322 using size_type = typename SuperClass::size_type;
323
324protected:
325 // Default ctor - Initialize to empty.
326 explicit SmallVectorImpl(unsigned N)
327 : SmallVectorTemplateBase<T>(N) {}
328
329public:
330 SmallVectorImpl(const SmallVectorImpl &) = delete;
331
332 ~SmallVectorImpl() {
333 // Subclass has already destructed this vector's elements.
334 // If this wasn't grown from the inline copy, deallocate the old space.
335 if (!this->isSmall())
336 free(this->begin());
337 }
338
339 void clear() {
340 this->destroy_range(this->begin(), this->end());
341 this->Size = 0;
342 }
343
344 void resize(size_type N) {
345 if (N < this->size()) {
346 this->destroy_range(this->begin()+N, this->end());
347 this->set_size(N);
348 } else if (N > this->size()) {
349 if (this->capacity() < N)
350 this->grow(N);
351 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
352 new (&*I) T();
353 this->set_size(N);
354 }
355 }
356
357 void resize(size_type N, const T &NV) {
358 if (N < this->size()) {
359 this->destroy_range(this->begin()+N, this->end());
360 this->set_size(N);
361 } else if (N > this->size()) {
362 if (this->capacity() < N)
363 this->grow(N);
364 std::uninitialized_fill(this->end(), this->begin()+N, NV);
365 this->set_size(N);
366 }
367 }
368
369 void reserve(size_type N) {
370 if (this->capacity() < N)
371 this->grow(N);
372 }
373
374 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
375 T Result = ::std::move(this->back());
376 this->pop_back();
377 return Result;
378 }
379
380 void swap(SmallVectorImpl &RHS);
381
382 /// Add the specified range to the end of the SmallVector.
383 template <typename in_iter,
384 typename = typename std::enable_if<std::is_convertible<
385 typename std::iterator_traits<in_iter>::iterator_category,
386 std::input_iterator_tag>::value>::type>
387 void append(in_iter in_start, in_iter in_end) {
388 size_type NumInputs = std::distance(in_start, in_end);
389 if (NumInputs > this->capacity() - this->size())
390 this->grow(this->size()+NumInputs);
391
392 this->uninitialized_copy(in_start, in_end, this->end());
393 this->set_size(this->size() + NumInputs);
394 }
395
396 /// Append \p NumInputs copies of \p Elt to the end.
397 void append(size_type NumInputs, const T &Elt) {
398 if (NumInputs > this->capacity() - this->size())
399 this->grow(this->size()+NumInputs);
400
401 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
402 this->set_size(this->size() + NumInputs);
403 }
404
405 void append(std::initializer_list<T> IL) {
406 append(IL.begin(), IL.end());
407 }
408
409 // FIXME: Consider assigning over existing elements, rather than clearing &
410 // re-initializing them - for all assign(...) variants.
411
412 void assign(size_type NumElts, const T &Elt) {
413 clear();
414 if (this->capacity() < NumElts)
415 this->grow(NumElts);
416 this->set_size(NumElts);
417 std::uninitialized_fill(this->begin(), this->end(), Elt);
418 }
419
420 template <typename in_iter,
421 typename = typename std::enable_if<std::is_convertible<
422 typename std::iterator_traits<in_iter>::iterator_category,
423 std::input_iterator_tag>::value>::type>
424 void assign(in_iter in_start, in_iter in_end) {
425 clear();
426 append(in_start, in_end);
427 }
428
429 void assign(std::initializer_list<T> IL) {
430 clear();
431 append(IL);
432 }
433
434 iterator erase(const_iterator CI) {
435 // Just cast away constness because this is a non-const member function.
436 iterator I = const_cast<iterator>(CI);
437
438 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 438, __PRETTY_FUNCTION__))
;
439 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 439, __PRETTY_FUNCTION__))
;
440
441 iterator N = I;
442 // Shift all elts down one.
443 std::move(I+1, this->end(), I);
444 // Drop the last elt.
445 this->pop_back();
446 return(N);
447 }
448
449 iterator erase(const_iterator CS, const_iterator CE) {
450 // Just cast away constness because this is a non-const member function.
451 iterator S = const_cast<iterator>(CS);
452 iterator E = const_cast<iterator>(CE);
453
454 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 454, __PRETTY_FUNCTION__))
;
455 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 455, __PRETTY_FUNCTION__))
;
456 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 456, __PRETTY_FUNCTION__))
;
457
458 iterator N = S;
459 // Shift all elts down.
460 iterator I = std::move(E, this->end(), S);
461 // Drop the last elts.
462 this->destroy_range(I, this->end());
463 this->set_size(I - this->begin());
464 return(N);
465 }
466
467 iterator insert(iterator I, T &&Elt) {
468 if (I == this->end()) { // Important special case for empty vector.
469 this->push_back(::std::move(Elt));
470 return this->end()-1;
471 }
472
473 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 473, __PRETTY_FUNCTION__))
;
474 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 474, __PRETTY_FUNCTION__))
;
475
476 if (this->size() >= this->capacity()) {
477 size_t EltNo = I-this->begin();
478 this->grow();
479 I = this->begin()+EltNo;
480 }
481
482 ::new ((void*) this->end()) T(::std::move(this->back()));
483 // Push everything else over.
484 std::move_backward(I, this->end()-1, this->end());
485 this->set_size(this->size() + 1);
486
487 // If we just moved the element we're inserting, be sure to update
488 // the reference.
489 T *EltPtr = &Elt;
490 if (I <= EltPtr && EltPtr < this->end())
491 ++EltPtr;
492
493 *I = ::std::move(*EltPtr);
494 return I;
495 }
496
497 iterator insert(iterator I, const T &Elt) {
498 if (I == this->end()) { // Important special case for empty vector.
499 this->push_back(Elt);
500 return this->end()-1;
501 }
502
503 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 503, __PRETTY_FUNCTION__))
;
504 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 504, __PRETTY_FUNCTION__))
;
505
506 if (this->size() >= this->capacity()) {
507 size_t EltNo = I-this->begin();
508 this->grow();
509 I = this->begin()+EltNo;
510 }
511 ::new ((void*) this->end()) T(std::move(this->back()));
512 // Push everything else over.
513 std::move_backward(I, this->end()-1, this->end());
514 this->set_size(this->size() + 1);
515
516 // If we just moved the element we're inserting, be sure to update
517 // the reference.
518 const T *EltPtr = &Elt;
519 if (I <= EltPtr && EltPtr < this->end())
520 ++EltPtr;
521
522 *I = *EltPtr;
523 return I;
524 }
525
526 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
527 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
528 size_t InsertElt = I - this->begin();
529
530 if (I == this->end()) { // Important special case for empty vector.
531 append(NumToInsert, Elt);
532 return this->begin()+InsertElt;
533 }
534
535 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 535, __PRETTY_FUNCTION__))
;
536 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 536, __PRETTY_FUNCTION__))
;
537
538 // Ensure there is enough space.
539 reserve(this->size() + NumToInsert);
540
541 // Uninvalidate the iterator.
542 I = this->begin()+InsertElt;
543
544 // If there are more elements between the insertion point and the end of the
545 // range than there are being inserted, we can use a simple approach to
546 // insertion. Since we already reserved space, we know that this won't
547 // reallocate the vector.
548 if (size_t(this->end()-I) >= NumToInsert) {
549 T *OldEnd = this->end();
550 append(std::move_iterator<iterator>(this->end() - NumToInsert),
551 std::move_iterator<iterator>(this->end()));
552
553 // Copy the existing elements that get replaced.
554 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
555
556 std::fill_n(I, NumToInsert, Elt);
557 return I;
558 }
559
560 // Otherwise, we're inserting more elements than exist already, and we're
561 // not inserting at the end.
562
563 // Move over the elements that we're about to overwrite.
564 T *OldEnd = this->end();
565 this->set_size(this->size() + NumToInsert);
566 size_t NumOverwritten = OldEnd-I;
567 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
568
569 // Replace the overwritten part.
570 std::fill_n(I, NumOverwritten, Elt);
571
572 // Insert the non-overwritten middle part.
573 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
574 return I;
575 }
576
577 template <typename ItTy,
578 typename = typename std::enable_if<std::is_convertible<
579 typename std::iterator_traits<ItTy>::iterator_category,
580 std::input_iterator_tag>::value>::type>
581 iterator insert(iterator I, ItTy From, ItTy To) {
582 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
583 size_t InsertElt = I - this->begin();
584
585 if (I == this->end()) { // Important special case for empty vector.
586 append(From, To);
587 return this->begin()+InsertElt;
588 }
589
590 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 590, __PRETTY_FUNCTION__))
;
591 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/SmallVector.h"
, 591, __PRETTY_FUNCTION__))
;
592
593 size_t NumToInsert = std::distance(From, To);
594
595 // Ensure there is enough space.
596 reserve(this->size() + NumToInsert);
597
598 // Uninvalidate the iterator.
599 I = this->begin()+InsertElt;
600
601 // If there are more elements between the insertion point and the end of the
602 // range than there are being inserted, we can use a simple approach to
603 // insertion. Since we already reserved space, we know that this won't
604 // reallocate the vector.
605 if (size_t(this->end()-I) >= NumToInsert) {
606 T *OldEnd = this->end();
607 append(std::move_iterator<iterator>(this->end() - NumToInsert),
608 std::move_iterator<iterator>(this->end()));
609
610 // Copy the existing elements that get replaced.
611 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
612
613 std::copy(From, To, I);
614 return I;
615 }
616
617 // Otherwise, we're inserting more elements than exist already, and we're
618 // not inserting at the end.
619
620 // Move over the elements that we're about to overwrite.
621 T *OldEnd = this->end();
622 this->set_size(this->size() + NumToInsert);
623 size_t NumOverwritten = OldEnd-I;
624 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
625
626 // Replace the overwritten part.
627 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
628 *J = *From;
629 ++J; ++From;
630 }
631
632 // Insert the non-overwritten middle part.
633 this->uninitialized_copy(From, To, OldEnd);
634 return I;
635 }
636
637 void insert(iterator I, std::initializer_list<T> IL) {
638 insert(I, IL.begin(), IL.end());
639 }
640
641 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
642 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
643 this->grow();
644 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
645 this->set_size(this->size() + 1);
646 return this->back();
647 }
648
649 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
650
651 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
652
653 bool operator==(const SmallVectorImpl &RHS) const {
654 if (this->size() != RHS.size()) return false;
655 return std::equal(this->begin(), this->end(), RHS.begin());
656 }
657 bool operator!=(const SmallVectorImpl &RHS) const {
658 return !(*this == RHS);
659 }
660
661 bool operator<(const SmallVectorImpl &RHS) const {
662 return std::lexicographical_compare(this->begin(), this->end(),
663 RHS.begin(), RHS.end());
664 }
665};
666
667template <typename T>
668void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
669 if (this == &RHS) return;
670
671 // We can only avoid copying elements if neither vector is small.
672 if (!this->isSmall() && !RHS.isSmall()) {
673 std::swap(this->BeginX, RHS.BeginX);
674 std::swap(this->Size, RHS.Size);
675 std::swap(this->Capacity, RHS.Capacity);
676 return;
677 }
678 if (RHS.size() > this->capacity())
679 this->grow(RHS.size());
680 if (this->size() > RHS.capacity())
681 RHS.grow(this->size());
682
683 // Swap the shared elements.
684 size_t NumShared = this->size();
685 if (NumShared > RHS.size()) NumShared = RHS.size();
686 for (size_type i = 0; i != NumShared; ++i)
687 std::swap((*this)[i], RHS[i]);
688
689 // Copy over the extra elts.
690 if (this->size() > RHS.size()) {
691 size_t EltDiff = this->size() - RHS.size();
692 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
693 RHS.set_size(RHS.size() + EltDiff);
694 this->destroy_range(this->begin()+NumShared, this->end());
695 this->set_size(NumShared);
696 } else if (RHS.size() > this->size()) {
697 size_t EltDiff = RHS.size() - this->size();
698 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
699 this->set_size(this->size() + EltDiff);
700 this->destroy_range(RHS.begin()+NumShared, RHS.end());
701 RHS.set_size(NumShared);
702 }
703}
704
705template <typename T>
706SmallVectorImpl<T> &SmallVectorImpl<T>::
707 operator=(const SmallVectorImpl<T> &RHS) {
708 // Avoid self-assignment.
709 if (this == &RHS) return *this;
710
711 // If we already have sufficient space, assign the common elements, then
712 // destroy any excess.
713 size_t RHSSize = RHS.size();
714 size_t CurSize = this->size();
715 if (CurSize >= RHSSize) {
716 // Assign common elements.
717 iterator NewEnd;
718 if (RHSSize)
719 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
720 else
721 NewEnd = this->begin();
722
723 // Destroy excess elements.
724 this->destroy_range(NewEnd, this->end());
725
726 // Trim.
727 this->set_size(RHSSize);
728 return *this;
729 }
730
731 // If we have to grow to have enough elements, destroy the current elements.
732 // This allows us to avoid copying them during the grow.
733 // FIXME: don't do this if they're efficiently moveable.
734 if (this->capacity() < RHSSize) {
735 // Destroy current elements.
736 this->destroy_range(this->begin(), this->end());
737 this->set_size(0);
738 CurSize = 0;
739 this->grow(RHSSize);
740 } else if (CurSize) {
741 // Otherwise, use assignment for the already-constructed elements.
742 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
743 }
744
745 // Copy construct the new elements in place.
746 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
747 this->begin()+CurSize);
748
749 // Set end.
750 this->set_size(RHSSize);
751 return *this;
752}
753
754template <typename T>
755SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
756 // Avoid self-assignment.
757 if (this == &RHS) return *this;
758
759 // If the RHS isn't small, clear this vector and then steal its buffer.
760 if (!RHS.isSmall()) {
761 this->destroy_range(this->begin(), this->end());
762 if (!this->isSmall()) free(this->begin());
763 this->BeginX = RHS.BeginX;
764 this->Size = RHS.Size;
765 this->Capacity = RHS.Capacity;
766 RHS.resetToSmall();
767 return *this;
768 }
769
770 // If we already have sufficient space, assign the common elements, then
771 // destroy any excess.
772 size_t RHSSize = RHS.size();
773 size_t CurSize = this->size();
774 if (CurSize >= RHSSize) {
775 // Assign common elements.
776 iterator NewEnd = this->begin();
777 if (RHSSize)
778 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
779
780 // Destroy excess elements and trim the bounds.
781 this->destroy_range(NewEnd, this->end());
782 this->set_size(RHSSize);
783
784 // Clear the RHS.
785 RHS.clear();
786
787 return *this;
788 }
789
790 // If we have to grow to have enough elements, destroy the current elements.
791 // This allows us to avoid copying them during the grow.
792 // FIXME: this may not actually make any sense if we can efficiently move
793 // elements.
794 if (this->capacity() < RHSSize) {
795 // Destroy current elements.
796 this->destroy_range(this->begin(), this->end());
797 this->set_size(0);
798 CurSize = 0;
799 this->grow(RHSSize);
800 } else if (CurSize) {
801 // Otherwise, use assignment for the already-constructed elements.
802 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
803 }
804
805 // Move-construct the new elements in place.
806 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
807 this->begin()+CurSize);
808
809 // Set end.
810 this->set_size(RHSSize);
811
812 RHS.clear();
813 return *this;
814}
815
816/// Storage for the SmallVector elements. This is specialized for the N=0 case
817/// to avoid allocating unnecessary storage.
818template <typename T, unsigned N>
819struct SmallVectorStorage {
820 AlignedCharArrayUnion<T> InlineElts[N];
821};
822
823/// We need the storage to be properly aligned even for small-size of 0 so that
824/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
825/// well-defined.
826template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
827
828/// This is a 'vector' (really, a variable-sized array), optimized
829/// for the case when the array is small. It contains some number of elements
830/// in-place, which allows it to avoid heap allocation when the actual number of
831/// elements is below that threshold. This allows normal "small" cases to be
832/// fast without losing generality for large inputs.
833///
834/// Note that this does not attempt to be exception safe.
835///
836template <typename T, unsigned N>
837class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> {
838public:
839 SmallVector() : SmallVectorImpl<T>(N) {}
840
841 ~SmallVector() {
842 // Destroy the constructed elements in the vector.
843 this->destroy_range(this->begin(), this->end());
844 }
845
846 explicit SmallVector(size_t Size, const T &Value = T())
847 : SmallVectorImpl<T>(N) {
848 this->assign(Size, Value);
849 }
850
851 template <typename ItTy,
852 typename = typename std::enable_if<std::is_convertible<
853 typename std::iterator_traits<ItTy>::iterator_category,
854 std::input_iterator_tag>::value>::type>
855 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
856 this->append(S, E);
857 }
858
859 template <typename RangeTy>
860 explicit SmallVector(const iterator_range<RangeTy> &R)
861 : SmallVectorImpl<T>(N) {
862 this->append(R.begin(), R.end());
863 }
864
865 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
866 this->assign(IL);
867 }
868
869 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
870 if (!RHS.empty())
871 SmallVectorImpl<T>::operator=(RHS);
872 }
873
874 const SmallVector &operator=(const SmallVector &RHS) {
875 SmallVectorImpl<T>::operator=(RHS);
876 return *this;
877 }
878
879 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
880 if (!RHS.empty())
881 SmallVectorImpl<T>::operator=(::std::move(RHS));
882 }
883
884 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
885 if (!RHS.empty())
886 SmallVectorImpl<T>::operator=(::std::move(RHS));
887 }
888
889 const SmallVector &operator=(SmallVector &&RHS) {
890 SmallVectorImpl<T>::operator=(::std::move(RHS));
891 return *this;
892 }
893
894 const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
895 SmallVectorImpl<T>::operator=(::std::move(RHS));
896 return *this;
897 }
898
899 const SmallVector &operator=(std::initializer_list<T> IL) {
900 this->assign(IL);
901 return *this;
902 }
903};
904
905template <typename T, unsigned N>
906inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
907 return X.capacity_in_bytes();
908}
909
910/// Given a range of type R, iterate the entire range and return a
911/// SmallVector with elements of the vector. This is useful, for example,
912/// when you want to iterate a range and then sort the results.
913template <unsigned Size, typename R>
914SmallVector<typename std::remove_const<typename std::remove_reference<
915 decltype(*std::begin(std::declval<R &>()))>::type>::type,
916 Size>
917to_vector(R &&Range) {
918 return {std::begin(Range), std::end(Range)};
919}
920
921} // end namespace llvm
922
923namespace std {
924
925 /// Implement std::swap in terms of SmallVector swap.
926 template<typename T>
927 inline void
928 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
929 LHS.swap(RHS);
930 }
931
932 /// Implement std::swap in terms of SmallVector swap.
933 template<typename T, unsigned N>
934 inline void
935 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
936 LHS.swap(RHS);
937 }
938
939} // end namespace std
940
941#endif // LLVM_ADT_SMALLVECTOR_H