File: | lib/Transforms/Scalar/ScalarReplAggregates.cpp |
Location: | line 1997, column 13 |
Description: | Called C++ object pointer is null |
1 | //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This transformation implements the well known scalar replacement of | |||
11 | // aggregates transformation. This xform breaks up alloca instructions of | |||
12 | // aggregate type (structure or array) into individual alloca instructions for | |||
13 | // each member (if possible). Then, if possible, it transforms the individual | |||
14 | // alloca instructions into nice clean scalar SSA form. | |||
15 | // | |||
16 | // This combines a simple SRoA algorithm with the Mem2Reg algorithm because they | |||
17 | // often interact, especially for C++ programs. As such, iterating between | |||
18 | // SRoA, then Mem2Reg until we run out of things to promote works well. | |||
19 | // | |||
20 | //===----------------------------------------------------------------------===// | |||
21 | ||||
22 | #include "llvm/Transforms/Scalar.h" | |||
23 | #include "llvm/ADT/SetVector.h" | |||
24 | #include "llvm/ADT/SmallVector.h" | |||
25 | #include "llvm/ADT/Statistic.h" | |||
26 | #include "llvm/Analysis/AssumptionCache.h" | |||
27 | #include "llvm/Analysis/Loads.h" | |||
28 | #include "llvm/Analysis/ValueTracking.h" | |||
29 | #include "llvm/IR/CallSite.h" | |||
30 | #include "llvm/IR/Constants.h" | |||
31 | #include "llvm/IR/DIBuilder.h" | |||
32 | #include "llvm/IR/DataLayout.h" | |||
33 | #include "llvm/IR/DebugInfo.h" | |||
34 | #include "llvm/IR/DerivedTypes.h" | |||
35 | #include "llvm/IR/Dominators.h" | |||
36 | #include "llvm/IR/Function.h" | |||
37 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
38 | #include "llvm/IR/GlobalVariable.h" | |||
39 | #include "llvm/IR/IRBuilder.h" | |||
40 | #include "llvm/IR/Instructions.h" | |||
41 | #include "llvm/IR/IntrinsicInst.h" | |||
42 | #include "llvm/IR/LLVMContext.h" | |||
43 | #include "llvm/IR/Module.h" | |||
44 | #include "llvm/IR/Operator.h" | |||
45 | #include "llvm/Pass.h" | |||
46 | #include "llvm/Support/Debug.h" | |||
47 | #include "llvm/Support/ErrorHandling.h" | |||
48 | #include "llvm/Support/MathExtras.h" | |||
49 | #include "llvm/Support/raw_ostream.h" | |||
50 | #include "llvm/Transforms/Utils/Local.h" | |||
51 | #include "llvm/Transforms/Utils/PromoteMemToReg.h" | |||
52 | #include "llvm/Transforms/Utils/SSAUpdater.h" | |||
53 | using namespace llvm; | |||
54 | ||||
55 | #define DEBUG_TYPE"scalarrepl" "scalarrepl" | |||
56 | ||||
57 | STATISTIC(NumReplaced, "Number of allocas broken up")static llvm::Statistic NumReplaced = { "scalarrepl", "Number of allocas broken up" , 0, 0 }; | |||
58 | STATISTIC(NumPromoted, "Number of allocas promoted")static llvm::Statistic NumPromoted = { "scalarrepl", "Number of allocas promoted" , 0, 0 }; | |||
59 | STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion")static llvm::Statistic NumAdjusted = { "scalarrepl", "Number of scalar allocas adjusted to allow promotion" , 0, 0 }; | |||
60 | STATISTIC(NumConverted, "Number of aggregates converted to scalar")static llvm::Statistic NumConverted = { "scalarrepl", "Number of aggregates converted to scalar" , 0, 0 }; | |||
61 | ||||
62 | namespace { | |||
63 | #define SROASROA_ SROA_ | |||
64 | struct SROASROA_ : public FunctionPass { | |||
65 | SROASROA_(int T, bool hasDT, char &ID, int ST, int AT, int SLT) | |||
66 | : FunctionPass(ID), HasDomTree(hasDT) { | |||
67 | if (T == -1) | |||
68 | SRThreshold = 128; | |||
69 | else | |||
70 | SRThreshold = T; | |||
71 | if (ST == -1) | |||
72 | StructMemberThreshold = 32; | |||
73 | else | |||
74 | StructMemberThreshold = ST; | |||
75 | if (AT == -1) | |||
76 | ArrayElementThreshold = 8; | |||
77 | else | |||
78 | ArrayElementThreshold = AT; | |||
79 | if (SLT == -1) | |||
80 | // Do not limit the scalar integer load size if no threshold is given. | |||
81 | ScalarLoadThreshold = -1; | |||
82 | else | |||
83 | ScalarLoadThreshold = SLT; | |||
84 | } | |||
85 | ||||
86 | bool runOnFunction(Function &F) override; | |||
87 | ||||
88 | bool performScalarRepl(Function &F); | |||
89 | bool performPromotion(Function &F); | |||
90 | ||||
91 | private: | |||
92 | bool HasDomTree; | |||
93 | ||||
94 | /// DeadInsts - Keep track of instructions we have made dead, so that | |||
95 | /// we can remove them after we are done working. | |||
96 | SmallVector<Value*, 32> DeadInsts; | |||
97 | ||||
98 | /// AllocaInfo - When analyzing uses of an alloca instruction, this captures | |||
99 | /// information about the uses. All these fields are initialized to false | |||
100 | /// and set to true when something is learned. | |||
101 | struct AllocaInfo { | |||
102 | /// The alloca to promote. | |||
103 | AllocaInst *AI; | |||
104 | ||||
105 | /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite | |||
106 | /// looping and avoid redundant work. | |||
107 | SmallPtrSet<PHINode*, 8> CheckedPHIs; | |||
108 | ||||
109 | /// isUnsafe - This is set to true if the alloca cannot be SROA'd. | |||
110 | bool isUnsafe : 1; | |||
111 | ||||
112 | /// isMemCpySrc - This is true if this aggregate is memcpy'd from. | |||
113 | bool isMemCpySrc : 1; | |||
114 | ||||
115 | /// isMemCpyDst - This is true if this aggregate is memcpy'd into. | |||
116 | bool isMemCpyDst : 1; | |||
117 | ||||
118 | /// hasSubelementAccess - This is true if a subelement of the alloca is | |||
119 | /// ever accessed, or false if the alloca is only accessed with mem | |||
120 | /// intrinsics or load/store that only access the entire alloca at once. | |||
121 | bool hasSubelementAccess : 1; | |||
122 | ||||
123 | /// hasALoadOrStore - This is true if there are any loads or stores to it. | |||
124 | /// The alloca may just be accessed with memcpy, for example, which would | |||
125 | /// not set this. | |||
126 | bool hasALoadOrStore : 1; | |||
127 | ||||
128 | explicit AllocaInfo(AllocaInst *ai) | |||
129 | : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false), | |||
130 | hasSubelementAccess(false), hasALoadOrStore(false) {} | |||
131 | }; | |||
132 | ||||
133 | /// SRThreshold - The maximum alloca size to considered for SROA. | |||
134 | unsigned SRThreshold; | |||
135 | ||||
136 | /// StructMemberThreshold - The maximum number of members a struct can | |||
137 | /// contain to be considered for SROA. | |||
138 | unsigned StructMemberThreshold; | |||
139 | ||||
140 | /// ArrayElementThreshold - The maximum number of elements an array can | |||
141 | /// have to be considered for SROA. | |||
142 | unsigned ArrayElementThreshold; | |||
143 | ||||
144 | /// ScalarLoadThreshold - The maximum size in bits of scalars to load when | |||
145 | /// converting to scalar | |||
146 | unsigned ScalarLoadThreshold; | |||
147 | ||||
148 | void MarkUnsafe(AllocaInfo &I, Instruction *User) { | |||
149 | I.isUnsafe = true; | |||
150 | DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << " Transformation preventing inst: " << *User << '\n'; } } while (0); | |||
151 | } | |||
152 | ||||
153 | bool isSafeAllocaToScalarRepl(AllocaInst *AI); | |||
154 | ||||
155 | void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info); | |||
156 | void isSafePHISelectUseForScalarRepl(Instruction *User, uint64_t Offset, | |||
157 | AllocaInfo &Info); | |||
158 | void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info); | |||
159 | void isSafeMemAccess(uint64_t Offset, uint64_t MemSize, | |||
160 | Type *MemOpType, bool isStore, AllocaInfo &Info, | |||
161 | Instruction *TheAccess, bool AllowWholeAccess); | |||
162 | bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size, | |||
163 | const DataLayout &DL); | |||
164 | uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy, | |||
165 | const DataLayout &DL); | |||
166 | ||||
167 | void DoScalarReplacement(AllocaInst *AI, | |||
168 | std::vector<AllocaInst*> &WorkList); | |||
169 | void DeleteDeadInstructions(); | |||
170 | ||||
171 | void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, | |||
172 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
173 | void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, | |||
174 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
175 | void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, | |||
176 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
177 | void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, | |||
178 | uint64_t Offset, | |||
179 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
180 | void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, | |||
181 | AllocaInst *AI, | |||
182 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
183 | void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, | |||
184 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
185 | void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, | |||
186 | SmallVectorImpl<AllocaInst *> &NewElts); | |||
187 | bool ShouldAttemptScalarRepl(AllocaInst *AI); | |||
188 | }; | |||
189 | ||||
190 | // SROA_DT - SROA that uses DominatorTree. | |||
191 | struct SROA_DT : public SROASROA_ { | |||
192 | static char ID; | |||
193 | public: | |||
194 | SROA_DT(int T = -1, int ST = -1, int AT = -1, int SLT = -1) : | |||
195 | SROASROA_(T, true, ID, ST, AT, SLT) { | |||
196 | initializeSROA_DTPass(*PassRegistry::getPassRegistry()); | |||
197 | } | |||
198 | ||||
199 | // getAnalysisUsage - This pass does not require any passes, but we know it | |||
200 | // will not alter the CFG, so say so. | |||
201 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
202 | AU.addRequired<AssumptionCacheTracker>(); | |||
203 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
204 | AU.setPreservesCFG(); | |||
205 | } | |||
206 | }; | |||
207 | ||||
208 | // SROA_SSAUp - SROA that uses SSAUpdater. | |||
209 | struct SROA_SSAUp : public SROASROA_ { | |||
210 | static char ID; | |||
211 | public: | |||
212 | SROA_SSAUp(int T = -1, int ST = -1, int AT = -1, int SLT = -1) : | |||
213 | SROASROA_(T, false, ID, ST, AT, SLT) { | |||
214 | initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry()); | |||
215 | } | |||
216 | ||||
217 | // getAnalysisUsage - This pass does not require any passes, but we know it | |||
218 | // will not alter the CFG, so say so. | |||
219 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
220 | AU.addRequired<AssumptionCacheTracker>(); | |||
221 | AU.setPreservesCFG(); | |||
222 | } | |||
223 | }; | |||
224 | ||||
225 | } | |||
226 | ||||
227 | char SROA_DT::ID = 0; | |||
228 | char SROA_SSAUp::ID = 0; | |||
229 | ||||
230 | INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl",static void* initializeSROA_DTPassOnce(PassRegistry &Registry ) { | |||
231 | "Scalar Replacement of Aggregates (DT)", false, false)static void* initializeSROA_DTPassOnce(PassRegistry &Registry ) { | |||
232 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | |||
233 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||
234 | INITIALIZE_PASS_END(SROA_DT, "scalarrepl",PassInfo *PI = new PassInfo("Scalar Replacement of Aggregates (DT)" , "scalarrepl", & SROA_DT ::ID, PassInfo::NormalCtor_t(callDefaultCtor < SROA_DT >), false, false); Registry.registerPass(*PI, true); return PI; } void llvm::initializeSROA_DTPass(PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeSROA_DTPassOnce(Registry ); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||
235 | "Scalar Replacement of Aggregates (DT)", false, false)PassInfo *PI = new PassInfo("Scalar Replacement of Aggregates (DT)" , "scalarrepl", & SROA_DT ::ID, PassInfo::NormalCtor_t(callDefaultCtor < SROA_DT >), false, false); Registry.registerPass(*PI, true); return PI; } void llvm::initializeSROA_DTPass(PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeSROA_DTPassOnce(Registry ); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||
236 | ||||
237 | INITIALIZE_PASS_BEGIN(SROA_SSAUp, "scalarrepl-ssa",static void* initializeSROA_SSAUpPassOnce(PassRegistry &Registry ) { | |||
238 | "Scalar Replacement of Aggregates (SSAUp)", false, false)static void* initializeSROA_SSAUpPassOnce(PassRegistry &Registry ) { | |||
239 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | |||
240 | INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa",PassInfo *PI = new PassInfo("Scalar Replacement of Aggregates (SSAUp)" , "scalarrepl-ssa", & SROA_SSAUp ::ID, PassInfo::NormalCtor_t (callDefaultCtor< SROA_SSAUp >), false, false); Registry .registerPass(*PI, true); return PI; } void llvm::initializeSROA_SSAUpPass (PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeSROA_SSAUpPassOnce(Registry ); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||
241 | "Scalar Replacement of Aggregates (SSAUp)", false, false)PassInfo *PI = new PassInfo("Scalar Replacement of Aggregates (SSAUp)" , "scalarrepl-ssa", & SROA_SSAUp ::ID, PassInfo::NormalCtor_t (callDefaultCtor< SROA_SSAUp >), false, false); Registry .registerPass(*PI, true); return PI; } void llvm::initializeSROA_SSAUpPass (PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized , 1, 0); if (old_val == 0) { initializeSROA_SSAUpPassOnce(Registry ); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } ; } | |||
242 | ||||
243 | // Public interface to the ScalarReplAggregates pass | |||
244 | FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold, | |||
245 | bool UseDomTree, | |||
246 | int StructMemberThreshold, | |||
247 | int ArrayElementThreshold, | |||
248 | int ScalarLoadThreshold) { | |||
249 | if (UseDomTree) | |||
250 | return new SROA_DT(Threshold, StructMemberThreshold, ArrayElementThreshold, | |||
251 | ScalarLoadThreshold); | |||
252 | return new SROA_SSAUp(Threshold, StructMemberThreshold, | |||
253 | ArrayElementThreshold, ScalarLoadThreshold); | |||
254 | } | |||
255 | ||||
256 | ||||
257 | //===----------------------------------------------------------------------===// | |||
258 | // Convert To Scalar Optimization. | |||
259 | //===----------------------------------------------------------------------===// | |||
260 | ||||
261 | namespace { | |||
262 | /// ConvertToScalarInfo - This class implements the "Convert To Scalar" | |||
263 | /// optimization, which scans the uses of an alloca and determines if it can | |||
264 | /// rewrite it in terms of a single new alloca that can be mem2reg'd. | |||
265 | class ConvertToScalarInfo { | |||
266 | /// AllocaSize - The size of the alloca being considered in bytes. | |||
267 | unsigned AllocaSize; | |||
268 | const DataLayout &DL; | |||
269 | unsigned ScalarLoadThreshold; | |||
270 | ||||
271 | /// IsNotTrivial - This is set to true if there is some access to the object | |||
272 | /// which means that mem2reg can't promote it. | |||
273 | bool IsNotTrivial; | |||
274 | ||||
275 | /// ScalarKind - Tracks the kind of alloca being considered for promotion, | |||
276 | /// computed based on the uses of the alloca rather than the LLVM type system. | |||
277 | enum { | |||
278 | Unknown, | |||
279 | ||||
280 | // Accesses via GEPs that are consistent with element access of a vector | |||
281 | // type. This will not be converted into a vector unless there is a later | |||
282 | // access using an actual vector type. | |||
283 | ImplicitVector, | |||
284 | ||||
285 | // Accesses via vector operations and GEPs that are consistent with the | |||
286 | // layout of a vector type. | |||
287 | Vector, | |||
288 | ||||
289 | // An integer bag-of-bits with bitwise operations for insertion and | |||
290 | // extraction. Any combination of types can be converted into this kind | |||
291 | // of scalar. | |||
292 | Integer | |||
293 | } ScalarKind; | |||
294 | ||||
295 | /// VectorTy - This tracks the type that we should promote the vector to if | |||
296 | /// it is possible to turn it into a vector. This starts out null, and if it | |||
297 | /// isn't possible to turn into a vector type, it gets set to VoidTy. | |||
298 | VectorType *VectorTy; | |||
299 | ||||
300 | /// HadNonMemTransferAccess - True if there is at least one access to the | |||
301 | /// alloca that is not a MemTransferInst. We don't want to turn structs into | |||
302 | /// large integers unless there is some potential for optimization. | |||
303 | bool HadNonMemTransferAccess; | |||
304 | ||||
305 | /// HadDynamicAccess - True if some element of this alloca was dynamic. | |||
306 | /// We don't yet have support for turning a dynamic access into a large | |||
307 | /// integer. | |||
308 | bool HadDynamicAccess; | |||
309 | ||||
310 | public: | |||
311 | explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL, | |||
312 | unsigned SLT) | |||
313 | : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false), | |||
314 | ScalarKind(Unknown), VectorTy(nullptr), HadNonMemTransferAccess(false), | |||
315 | HadDynamicAccess(false) { } | |||
316 | ||||
317 | AllocaInst *TryConvert(AllocaInst *AI); | |||
318 | ||||
319 | private: | |||
320 | bool CanConvertToScalar(Value *V, uint64_t Offset, Value* NonConstantIdx); | |||
321 | void MergeInTypeForLoadOrStore(Type *In, uint64_t Offset); | |||
322 | bool MergeInVectorType(VectorType *VInTy, uint64_t Offset); | |||
323 | void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset, | |||
324 | Value *NonConstantIdx); | |||
325 | ||||
326 | Value *ConvertScalar_ExtractValue(Value *NV, Type *ToType, | |||
327 | uint64_t Offset, Value* NonConstantIdx, | |||
328 | IRBuilder<> &Builder); | |||
329 | Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, | |||
330 | uint64_t Offset, Value* NonConstantIdx, | |||
331 | IRBuilder<> &Builder); | |||
332 | }; | |||
333 | } // end anonymous namespace. | |||
334 | ||||
335 | ||||
336 | /// TryConvert - Analyze the specified alloca, and if it is safe to do so, | |||
337 | /// rewrite it to be a new alloca which is mem2reg'able. This returns the new | |||
338 | /// alloca if possible or null if not. | |||
339 | AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) { | |||
340 | // If we can't convert this scalar, or if mem2reg can trivially do it, bail | |||
341 | // out. | |||
342 | if (!CanConvertToScalar(AI, 0, nullptr) || !IsNotTrivial) | |||
343 | return nullptr; | |||
344 | ||||
345 | // If an alloca has only memset / memcpy uses, it may still have an Unknown | |||
346 | // ScalarKind. Treat it as an Integer below. | |||
347 | if (ScalarKind == Unknown) | |||
348 | ScalarKind = Integer; | |||
349 | ||||
350 | if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8) | |||
351 | ScalarKind = Integer; | |||
352 | ||||
353 | // If we were able to find a vector type that can handle this with | |||
354 | // insert/extract elements, and if there was at least one use that had | |||
355 | // a vector type, promote this to a vector. We don't want to promote | |||
356 | // random stuff that doesn't use vectors (e.g. <9 x double>) because then | |||
357 | // we just get a lot of insert/extracts. If at least one vector is | |||
358 | // involved, then we probably really do have a union of vector/array. | |||
359 | Type *NewTy; | |||
360 | if (ScalarKind == Vector) { | |||
361 | assert(VectorTy && "Missing type for vector scalar.")((VectorTy && "Missing type for vector scalar.") ? static_cast <void> (0) : __assert_fail ("VectorTy && \"Missing type for vector scalar.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 361, __PRETTY_FUNCTION__)); | |||
362 | DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " << *VectorTy << '\n'; } } while (0) | |||
363 | << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " << *VectorTy << '\n'; } } while (0); | |||
364 | NewTy = VectorTy; // Use the vector type. | |||
365 | } else { | |||
366 | unsigned BitWidth = AllocaSize * 8; | |||
367 | ||||
368 | // Do not convert to scalar integer if the alloca size exceeds the | |||
369 | // scalar load threshold. | |||
370 | if (BitWidth > ScalarLoadThreshold) | |||
371 | return nullptr; | |||
372 | ||||
373 | if ((ScalarKind == ImplicitVector || ScalarKind == Integer) && | |||
374 | !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth)) | |||
375 | return nullptr; | |||
376 | // Dynamic accesses on integers aren't yet supported. They need us to shift | |||
377 | // by a dynamic amount which could be difficult to work out as we might not | |||
378 | // know whether to use a left or right shift. | |||
379 | if (ScalarKind == Integer && HadDynamicAccess) | |||
380 | return nullptr; | |||
381 | ||||
382 | DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"; } } while (0); | |||
383 | // Create and insert the integer alloca. | |||
384 | NewTy = IntegerType::get(AI->getContext(), BitWidth); | |||
385 | } | |||
386 | AllocaInst *NewAI = | |||
387 | new AllocaInst(NewTy, nullptr, "", &AI->getParent()->front()); | |||
388 | ConvertUsesToScalar(AI, NewAI, 0, nullptr); | |||
389 | return NewAI; | |||
390 | } | |||
391 | ||||
392 | /// MergeInTypeForLoadOrStore - Add the 'In' type to the accumulated vector type | |||
393 | /// (VectorTy) so far at the offset specified by Offset (which is specified in | |||
394 | /// bytes). | |||
395 | /// | |||
396 | /// There are two cases we handle here: | |||
397 | /// 1) A union of vector types of the same size and potentially its elements. | |||
398 | /// Here we turn element accesses into insert/extract element operations. | |||
399 | /// This promotes a <4 x float> with a store of float to the third element | |||
400 | /// into a <4 x float> that uses insert element. | |||
401 | /// 2) A fully general blob of memory, which we turn into some (potentially | |||
402 | /// large) integer type with extract and insert operations where the loads | |||
403 | /// and stores would mutate the memory. We mark this by setting VectorTy | |||
404 | /// to VoidTy. | |||
405 | void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In, | |||
406 | uint64_t Offset) { | |||
407 | // If we already decided to turn this into a blob of integer memory, there is | |||
408 | // nothing to be done. | |||
409 | if (ScalarKind == Integer) | |||
410 | return; | |||
411 | ||||
412 | // If this could be contributing to a vector, analyze it. | |||
413 | ||||
414 | // If the In type is a vector that is the same size as the alloca, see if it | |||
415 | // matches the existing VecTy. | |||
416 | if (VectorType *VInTy = dyn_cast<VectorType>(In)) { | |||
417 | if (MergeInVectorType(VInTy, Offset)) | |||
418 | return; | |||
419 | } else if (In->isFloatTy() || In->isDoubleTy() || | |||
420 | (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 && | |||
421 | isPowerOf2_32(In->getPrimitiveSizeInBits()))) { | |||
422 | // Full width accesses can be ignored, because they can always be turned | |||
423 | // into bitcasts. | |||
424 | unsigned EltSize = In->getPrimitiveSizeInBits()/8; | |||
425 | if (EltSize == AllocaSize) | |||
426 | return; | |||
427 | ||||
428 | // If we're accessing something that could be an element of a vector, see | |||
429 | // if the implied vector agrees with what we already have and if Offset is | |||
430 | // compatible with it. | |||
431 | if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 && | |||
432 | (!VectorTy || EltSize == VectorTy->getElementType() | |||
433 | ->getPrimitiveSizeInBits()/8)) { | |||
434 | if (!VectorTy) { | |||
435 | ScalarKind = ImplicitVector; | |||
436 | VectorTy = VectorType::get(In, AllocaSize/EltSize); | |||
437 | } | |||
438 | return; | |||
439 | } | |||
440 | } | |||
441 | ||||
442 | // Otherwise, we have a case that we can't handle with an optimized vector | |||
443 | // form. We can still turn this into a large integer. | |||
444 | ScalarKind = Integer; | |||
445 | } | |||
446 | ||||
447 | /// MergeInVectorType - Handles the vector case of MergeInTypeForLoadOrStore, | |||
448 | /// returning true if the type was successfully merged and false otherwise. | |||
449 | bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy, | |||
450 | uint64_t Offset) { | |||
451 | if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { | |||
452 | // If we're storing/loading a vector of the right size, allow it as a | |||
453 | // vector. If this the first vector we see, remember the type so that | |||
454 | // we know the element size. If this is a subsequent access, ignore it | |||
455 | // even if it is a differing type but the same size. Worst case we can | |||
456 | // bitcast the resultant vectors. | |||
457 | if (!VectorTy) | |||
458 | VectorTy = VInTy; | |||
459 | ScalarKind = Vector; | |||
460 | return true; | |||
461 | } | |||
462 | ||||
463 | return false; | |||
464 | } | |||
465 | ||||
466 | /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all | |||
467 | /// its accesses to a single vector type, return true and set VecTy to | |||
468 | /// the new type. If we could convert the alloca into a single promotable | |||
469 | /// integer, return true but set VecTy to VoidTy. Further, if the use is not a | |||
470 | /// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset | |||
471 | /// is the current offset from the base of the alloca being analyzed. | |||
472 | /// | |||
473 | /// If we see at least one access to the value that is as a vector type, set the | |||
474 | /// SawVec flag. | |||
475 | bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset, | |||
476 | Value* NonConstantIdx) { | |||
477 | for (User *U : V->users()) { | |||
478 | Instruction *UI = cast<Instruction>(U); | |||
479 | ||||
480 | if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { | |||
481 | // Don't break volatile loads. | |||
482 | if (!LI->isSimple()) | |||
483 | return false; | |||
484 | // Don't touch MMX operations. | |||
485 | if (LI->getType()->isX86_MMXTy()) | |||
486 | return false; | |||
487 | HadNonMemTransferAccess = true; | |||
488 | MergeInTypeForLoadOrStore(LI->getType(), Offset); | |||
489 | continue; | |||
490 | } | |||
491 | ||||
492 | if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { | |||
493 | // Storing the pointer, not into the value? | |||
494 | if (SI->getOperand(0) == V || !SI->isSimple()) return false; | |||
495 | // Don't touch MMX operations. | |||
496 | if (SI->getOperand(0)->getType()->isX86_MMXTy()) | |||
497 | return false; | |||
498 | HadNonMemTransferAccess = true; | |||
499 | MergeInTypeForLoadOrStore(SI->getOperand(0)->getType(), Offset); | |||
500 | continue; | |||
501 | } | |||
502 | ||||
503 | if (BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) { | |||
504 | if (!onlyUsedByLifetimeMarkers(BCI)) | |||
505 | IsNotTrivial = true; // Can't be mem2reg'd. | |||
506 | if (!CanConvertToScalar(BCI, Offset, NonConstantIdx)) | |||
507 | return false; | |||
508 | continue; | |||
509 | } | |||
510 | ||||
511 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UI)) { | |||
512 | // If this is a GEP with a variable indices, we can't handle it. | |||
513 | // Compute the offset that this GEP adds to the pointer. | |||
514 | SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); | |||
515 | Value *GEPNonConstantIdx = nullptr; | |||
516 | if (!GEP->hasAllConstantIndices()) { | |||
517 | if (!isa<VectorType>(GEP->getSourceElementType())) | |||
518 | return false; | |||
519 | if (NonConstantIdx) | |||
520 | return false; | |||
521 | GEPNonConstantIdx = Indices.pop_back_val(); | |||
522 | if (!GEPNonConstantIdx->getType()->isIntegerTy(32)) | |||
523 | return false; | |||
524 | HadDynamicAccess = true; | |||
525 | } else | |||
526 | GEPNonConstantIdx = NonConstantIdx; | |||
527 | uint64_t GEPOffset = DL.getIndexedOffsetInType(GEP->getSourceElementType(), | |||
528 | Indices); | |||
529 | // See if all uses can be converted. | |||
530 | if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx)) | |||
531 | return false; | |||
532 | IsNotTrivial = true; // Can't be mem2reg'd. | |||
533 | HadNonMemTransferAccess = true; | |||
534 | continue; | |||
535 | } | |||
536 | ||||
537 | // If this is a constant sized memset of a constant value (e.g. 0) we can | |||
538 | // handle it. | |||
539 | if (MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) { | |||
540 | // Store to dynamic index. | |||
541 | if (NonConstantIdx) | |||
542 | return false; | |||
543 | // Store of constant value. | |||
544 | if (!isa<ConstantInt>(MSI->getValue())) | |||
545 | return false; | |||
546 | ||||
547 | // Store of constant size. | |||
548 | ConstantInt *Len = dyn_cast<ConstantInt>(MSI->getLength()); | |||
549 | if (!Len) | |||
550 | return false; | |||
551 | ||||
552 | // If the size differs from the alloca, we can only convert the alloca to | |||
553 | // an integer bag-of-bits. | |||
554 | // FIXME: This should handle all of the cases that are currently accepted | |||
555 | // as vector element insertions. | |||
556 | if (Len->getZExtValue() != AllocaSize || Offset != 0) | |||
557 | ScalarKind = Integer; | |||
558 | ||||
559 | IsNotTrivial = true; // Can't be mem2reg'd. | |||
560 | HadNonMemTransferAccess = true; | |||
561 | continue; | |||
562 | } | |||
563 | ||||
564 | // If this is a memcpy or memmove into or out of the whole allocation, we | |||
565 | // can handle it like a load or store of the scalar type. | |||
566 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(UI)) { | |||
567 | // Store to dynamic index. | |||
568 | if (NonConstantIdx) | |||
569 | return false; | |||
570 | ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()); | |||
571 | if (!Len || Len->getZExtValue() != AllocaSize || Offset != 0) | |||
572 | return false; | |||
573 | ||||
574 | IsNotTrivial = true; // Can't be mem2reg'd. | |||
575 | continue; | |||
576 | } | |||
577 | ||||
578 | // If this is a lifetime intrinsic, we can handle it. | |||
579 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UI)) { | |||
580 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || | |||
581 | II->getIntrinsicID() == Intrinsic::lifetime_end) { | |||
582 | continue; | |||
583 | } | |||
584 | } | |||
585 | ||||
586 | // Otherwise, we cannot handle this! | |||
587 | return false; | |||
588 | } | |||
589 | ||||
590 | return true; | |||
591 | } | |||
592 | ||||
593 | /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca | |||
594 | /// directly. This happens when we are converting an "integer union" to a | |||
595 | /// single integer scalar, or when we are converting a "vector union" to a | |||
596 | /// vector with insert/extractelement instructions. | |||
597 | /// | |||
598 | /// Offset is an offset from the original alloca, in bits that need to be | |||
599 | /// shifted to the right. By the end of this, there should be no uses of Ptr. | |||
600 | void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, | |||
601 | uint64_t Offset, | |||
602 | Value* NonConstantIdx) { | |||
603 | while (!Ptr->use_empty()) { | |||
604 | Instruction *User = cast<Instruction>(Ptr->user_back()); | |||
605 | ||||
606 | if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { | |||
607 | ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx); | |||
608 | CI->eraseFromParent(); | |||
609 | continue; | |||
610 | } | |||
611 | ||||
612 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { | |||
613 | // Compute the offset that this GEP adds to the pointer. | |||
614 | SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); | |||
615 | Value* GEPNonConstantIdx = nullptr; | |||
616 | if (!GEP->hasAllConstantIndices()) { | |||
617 | assert(!NonConstantIdx &&((!NonConstantIdx && "Dynamic GEP reading from dynamic GEP unsupported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic GEP reading from dynamic GEP unsupported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 618, __PRETTY_FUNCTION__)) | |||
618 | "Dynamic GEP reading from dynamic GEP unsupported")((!NonConstantIdx && "Dynamic GEP reading from dynamic GEP unsupported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic GEP reading from dynamic GEP unsupported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 618, __PRETTY_FUNCTION__)); | |||
619 | GEPNonConstantIdx = Indices.pop_back_val(); | |||
620 | } else | |||
621 | GEPNonConstantIdx = NonConstantIdx; | |||
622 | uint64_t GEPOffset = DL.getIndexedOffsetInType(GEP->getSourceElementType(), | |||
623 | Indices); | |||
624 | ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx); | |||
625 | GEP->eraseFromParent(); | |||
626 | continue; | |||
627 | } | |||
628 | ||||
629 | IRBuilder<> Builder(User); | |||
630 | ||||
631 | if (LoadInst *LI = dyn_cast<LoadInst>(User)) { | |||
632 | // The load is a bit extract from NewAI shifted right by Offset bits. | |||
633 | Value *LoadedVal = Builder.CreateLoad(NewAI); | |||
634 | Value *NewLoadVal | |||
635 | = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, | |||
636 | NonConstantIdx, Builder); | |||
637 | LI->replaceAllUsesWith(NewLoadVal); | |||
638 | LI->eraseFromParent(); | |||
639 | continue; | |||
640 | } | |||
641 | ||||
642 | if (StoreInst *SI = dyn_cast<StoreInst>(User)) { | |||
643 | assert(SI->getOperand(0) != Ptr && "Consistency error!")((SI->getOperand(0) != Ptr && "Consistency error!" ) ? static_cast<void> (0) : __assert_fail ("SI->getOperand(0) != Ptr && \"Consistency error!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 643, __PRETTY_FUNCTION__)); | |||
644 | Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); | |||
645 | Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, | |||
646 | NonConstantIdx, Builder); | |||
647 | Builder.CreateStore(New, NewAI); | |||
648 | SI->eraseFromParent(); | |||
649 | ||||
650 | // If the load we just inserted is now dead, then the inserted store | |||
651 | // overwrote the entire thing. | |||
652 | if (Old->use_empty()) | |||
653 | Old->eraseFromParent(); | |||
654 | continue; | |||
655 | } | |||
656 | ||||
657 | // If this is a constant sized memset of a constant value (e.g. 0) we can | |||
658 | // transform it into a store of the expanded constant value. | |||
659 | if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { | |||
660 | assert(MSI->getRawDest() == Ptr && "Consistency error!")((MSI->getRawDest() == Ptr && "Consistency error!" ) ? static_cast<void> (0) : __assert_fail ("MSI->getRawDest() == Ptr && \"Consistency error!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 660, __PRETTY_FUNCTION__)); | |||
661 | assert(!NonConstantIdx && "Cannot replace dynamic memset with insert")((!NonConstantIdx && "Cannot replace dynamic memset with insert" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Cannot replace dynamic memset with insert\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 661, __PRETTY_FUNCTION__)); | |||
662 | int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue(); | |||
663 | if (SNumBytes > 0 && (SNumBytes >> 32) == 0) { | |||
664 | unsigned NumBytes = static_cast<unsigned>(SNumBytes); | |||
665 | unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); | |||
666 | ||||
667 | // Compute the value replicated the right number of times. | |||
668 | APInt APVal(NumBytes*8, Val); | |||
669 | ||||
670 | // Splat the value if non-zero. | |||
671 | if (Val) | |||
672 | for (unsigned i = 1; i != NumBytes; ++i) | |||
673 | APVal |= APVal << 8; | |||
674 | ||||
675 | Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); | |||
676 | Value *New = ConvertScalar_InsertValue( | |||
677 | ConstantInt::get(User->getContext(), APVal), | |||
678 | Old, Offset, nullptr, Builder); | |||
679 | Builder.CreateStore(New, NewAI); | |||
680 | ||||
681 | // If the load we just inserted is now dead, then the memset overwrote | |||
682 | // the entire thing. | |||
683 | if (Old->use_empty()) | |||
684 | Old->eraseFromParent(); | |||
685 | } | |||
686 | MSI->eraseFromParent(); | |||
687 | continue; | |||
688 | } | |||
689 | ||||
690 | // If this is a memcpy or memmove into or out of the whole allocation, we | |||
691 | // can handle it like a load or store of the scalar type. | |||
692 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { | |||
693 | assert(Offset == 0 && "must be store to start of alloca")((Offset == 0 && "must be store to start of alloca") ? static_cast<void> (0) : __assert_fail ("Offset == 0 && \"must be store to start of alloca\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 693, __PRETTY_FUNCTION__)); | |||
694 | assert(!NonConstantIdx && "Cannot replace dynamic transfer with insert")((!NonConstantIdx && "Cannot replace dynamic transfer with insert" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Cannot replace dynamic transfer with insert\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 694, __PRETTY_FUNCTION__)); | |||
695 | ||||
696 | // If the source and destination are both to the same alloca, then this is | |||
697 | // a noop copy-to-self, just delete it. Otherwise, emit a load and store | |||
698 | // as appropriate. | |||
699 | AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, DL, 0)); | |||
700 | ||||
701 | if (GetUnderlyingObject(MTI->getSource(), DL, 0) != OrigAI) { | |||
702 | // Dest must be OrigAI, change this to be a load from the original | |||
703 | // pointer (bitcasted), then a store to our new alloca. | |||
704 | assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?")((MTI->getRawDest() == Ptr && "Neither use is of pointer?" ) ? static_cast<void> (0) : __assert_fail ("MTI->getRawDest() == Ptr && \"Neither use is of pointer?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 704, __PRETTY_FUNCTION__)); | |||
705 | Value *SrcPtr = MTI->getSource(); | |||
706 | PointerType* SPTy = cast<PointerType>(SrcPtr->getType()); | |||
707 | PointerType* AIPTy = cast<PointerType>(NewAI->getType()); | |||
708 | if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) { | |||
709 | AIPTy = PointerType::get(NewAI->getAllocatedType(), | |||
710 | SPTy->getAddressSpace()); | |||
711 | } | |||
712 | SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy); | |||
713 | ||||
714 | LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); | |||
715 | SrcVal->setAlignment(MTI->getAlignment()); | |||
716 | Builder.CreateStore(SrcVal, NewAI); | |||
717 | } else if (GetUnderlyingObject(MTI->getDest(), DL, 0) != OrigAI) { | |||
718 | // Src must be OrigAI, change this to be a load from NewAI then a store | |||
719 | // through the original dest pointer (bitcasted). | |||
720 | assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?")((MTI->getRawSource() == Ptr && "Neither use is of pointer?" ) ? static_cast<void> (0) : __assert_fail ("MTI->getRawSource() == Ptr && \"Neither use is of pointer?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 720, __PRETTY_FUNCTION__)); | |||
721 | LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval"); | |||
722 | ||||
723 | PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType()); | |||
724 | PointerType* AIPTy = cast<PointerType>(NewAI->getType()); | |||
725 | if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) { | |||
726 | AIPTy = PointerType::get(NewAI->getAllocatedType(), | |||
727 | DPTy->getAddressSpace()); | |||
728 | } | |||
729 | Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy); | |||
730 | ||||
731 | StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr); | |||
732 | NewStore->setAlignment(MTI->getAlignment()); | |||
733 | } else { | |||
734 | // Noop transfer. Src == Dst | |||
735 | } | |||
736 | ||||
737 | MTI->eraseFromParent(); | |||
738 | continue; | |||
739 | } | |||
740 | ||||
741 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { | |||
742 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || | |||
743 | II->getIntrinsicID() == Intrinsic::lifetime_end) { | |||
744 | // There's no need to preserve these, as the resulting alloca will be | |||
745 | // converted to a register anyways. | |||
746 | II->eraseFromParent(); | |||
747 | continue; | |||
748 | } | |||
749 | } | |||
750 | ||||
751 | llvm_unreachable("Unsupported operation!")::llvm::llvm_unreachable_internal("Unsupported operation!", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 751); | |||
752 | } | |||
753 | } | |||
754 | ||||
755 | /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer | |||
756 | /// or vector value FromVal, extracting the bits from the offset specified by | |||
757 | /// Offset. This returns the value, which is of type ToType. | |||
758 | /// | |||
759 | /// This happens when we are converting an "integer union" to a single | |||
760 | /// integer scalar, or when we are converting a "vector union" to a vector with | |||
761 | /// insert/extractelement instructions. | |||
762 | /// | |||
763 | /// Offset is an offset from the original alloca, in bits that need to be | |||
764 | /// shifted to the right. | |||
765 | Value *ConvertToScalarInfo:: | |||
766 | ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, | |||
767 | uint64_t Offset, Value* NonConstantIdx, | |||
768 | IRBuilder<> &Builder) { | |||
769 | // If the load is of the whole new alloca, no conversion is needed. | |||
770 | Type *FromType = FromVal->getType(); | |||
771 | if (FromType == ToType && Offset == 0) | |||
772 | return FromVal; | |||
773 | ||||
774 | // If the result alloca is a vector type, this is either an element | |||
775 | // access or a bitcast to another vector type of the same size. | |||
776 | if (VectorType *VTy = dyn_cast<VectorType>(FromType)) { | |||
777 | unsigned FromTypeSize = DL.getTypeAllocSize(FromType); | |||
778 | unsigned ToTypeSize = DL.getTypeAllocSize(ToType); | |||
779 | if (FromTypeSize == ToTypeSize) | |||
780 | return Builder.CreateBitCast(FromVal, ToType); | |||
781 | ||||
782 | // Otherwise it must be an element access. | |||
783 | unsigned Elt = 0; | |||
784 | if (Offset) { | |||
785 | unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType()); | |||
786 | Elt = Offset/EltSize; | |||
787 | assert(EltSize*Elt == Offset && "Invalid modulus in validity checking")((EltSize*Elt == Offset && "Invalid modulus in validity checking" ) ? static_cast<void> (0) : __assert_fail ("EltSize*Elt == Offset && \"Invalid modulus in validity checking\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 787, __PRETTY_FUNCTION__)); | |||
788 | } | |||
789 | // Return the element extracted out of it. | |||
790 | Value *Idx; | |||
791 | if (NonConstantIdx) { | |||
792 | if (Elt) | |||
793 | Idx = Builder.CreateAdd(NonConstantIdx, | |||
794 | Builder.getInt32(Elt), | |||
795 | "dyn.offset"); | |||
796 | else | |||
797 | Idx = NonConstantIdx; | |||
798 | } else | |||
799 | Idx = Builder.getInt32(Elt); | |||
800 | Value *V = Builder.CreateExtractElement(FromVal, Idx); | |||
801 | if (V->getType() != ToType) | |||
802 | V = Builder.CreateBitCast(V, ToType); | |||
803 | return V; | |||
804 | } | |||
805 | ||||
806 | // If ToType is a first class aggregate, extract out each of the pieces and | |||
807 | // use insertvalue's to form the FCA. | |||
808 | if (StructType *ST = dyn_cast<StructType>(ToType)) { | |||
809 | assert(!NonConstantIdx &&((!NonConstantIdx && "Dynamic indexing into struct types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into struct types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 810, __PRETTY_FUNCTION__)) | |||
810 | "Dynamic indexing into struct types not supported")((!NonConstantIdx && "Dynamic indexing into struct types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into struct types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 810, __PRETTY_FUNCTION__)); | |||
811 | const StructLayout &Layout = *DL.getStructLayout(ST); | |||
812 | Value *Res = UndefValue::get(ST); | |||
813 | for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { | |||
814 | Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), | |||
815 | Offset+Layout.getElementOffsetInBits(i), | |||
816 | nullptr, Builder); | |||
817 | Res = Builder.CreateInsertValue(Res, Elt, i); | |||
818 | } | |||
819 | return Res; | |||
820 | } | |||
821 | ||||
822 | if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) { | |||
823 | assert(!NonConstantIdx &&((!NonConstantIdx && "Dynamic indexing into array types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into array types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 824, __PRETTY_FUNCTION__)) | |||
824 | "Dynamic indexing into array types not supported")((!NonConstantIdx && "Dynamic indexing into array types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into array types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 824, __PRETTY_FUNCTION__)); | |||
825 | uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); | |||
826 | Value *Res = UndefValue::get(AT); | |||
827 | for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { | |||
828 | Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), | |||
829 | Offset+i*EltSize, nullptr, | |||
830 | Builder); | |||
831 | Res = Builder.CreateInsertValue(Res, Elt, i); | |||
832 | } | |||
833 | return Res; | |||
834 | } | |||
835 | ||||
836 | // Otherwise, this must be a union that was converted to an integer value. | |||
837 | IntegerType *NTy = cast<IntegerType>(FromVal->getType()); | |||
838 | ||||
839 | // If this is a big-endian system and the load is narrower than the | |||
840 | // full alloca type, we need to do a shift to get the right bits. | |||
841 | int ShAmt = 0; | |||
842 | if (DL.isBigEndian()) { | |||
843 | // On big-endian machines, the lowest bit is stored at the bit offset | |||
844 | // from the pointer given by getTypeStoreSizeInBits. This matters for | |||
845 | // integers with a bitwidth that is not a multiple of 8. | |||
846 | ShAmt = DL.getTypeStoreSizeInBits(NTy) - | |||
847 | DL.getTypeStoreSizeInBits(ToType) - Offset; | |||
848 | } else { | |||
849 | ShAmt = Offset; | |||
850 | } | |||
851 | ||||
852 | // Note: we support negative bitwidths (with shl) which are not defined. | |||
853 | // We do this to support (f.e.) loads off the end of a structure where | |||
854 | // only some bits are used. | |||
855 | if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) | |||
856 | FromVal = Builder.CreateLShr(FromVal, | |||
857 | ConstantInt::get(FromVal->getType(), ShAmt)); | |||
858 | else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) | |||
859 | FromVal = Builder.CreateShl(FromVal, | |||
860 | ConstantInt::get(FromVal->getType(), -ShAmt)); | |||
861 | ||||
862 | // Finally, unconditionally truncate the integer to the right width. | |||
863 | unsigned LIBitWidth = DL.getTypeSizeInBits(ToType); | |||
864 | if (LIBitWidth < NTy->getBitWidth()) | |||
865 | FromVal = | |||
866 | Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), | |||
867 | LIBitWidth)); | |||
868 | else if (LIBitWidth > NTy->getBitWidth()) | |||
869 | FromVal = | |||
870 | Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(), | |||
871 | LIBitWidth)); | |||
872 | ||||
873 | // If the result is an integer, this is a trunc or bitcast. | |||
874 | if (ToType->isIntegerTy()) { | |||
875 | // Should be done. | |||
876 | } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) { | |||
877 | // Just do a bitcast, we know the sizes match up. | |||
878 | FromVal = Builder.CreateBitCast(FromVal, ToType); | |||
879 | } else { | |||
880 | // Otherwise must be a pointer. | |||
881 | FromVal = Builder.CreateIntToPtr(FromVal, ToType); | |||
882 | } | |||
883 | assert(FromVal->getType() == ToType && "Didn't convert right?")((FromVal->getType() == ToType && "Didn't convert right?" ) ? static_cast<void> (0) : __assert_fail ("FromVal->getType() == ToType && \"Didn't convert right?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 883, __PRETTY_FUNCTION__)); | |||
884 | return FromVal; | |||
885 | } | |||
886 | ||||
887 | /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer | |||
888 | /// or vector value "Old" at the offset specified by Offset. | |||
889 | /// | |||
890 | /// This happens when we are converting an "integer union" to a | |||
891 | /// single integer scalar, or when we are converting a "vector union" to a | |||
892 | /// vector with insert/extractelement instructions. | |||
893 | /// | |||
894 | /// Offset is an offset from the original alloca, in bits that need to be | |||
895 | /// shifted to the right. | |||
896 | /// | |||
897 | /// NonConstantIdx is an index value if there was a GEP with a non-constant | |||
898 | /// index value. If this is 0 then all GEPs used to find this insert address | |||
899 | /// are constant. | |||
900 | Value *ConvertToScalarInfo:: | |||
901 | ConvertScalar_InsertValue(Value *SV, Value *Old, | |||
902 | uint64_t Offset, Value* NonConstantIdx, | |||
903 | IRBuilder<> &Builder) { | |||
904 | // Convert the stored type to the actual type, shift it left to insert | |||
905 | // then 'or' into place. | |||
906 | Type *AllocaType = Old->getType(); | |||
907 | LLVMContext &Context = Old->getContext(); | |||
908 | ||||
909 | if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { | |||
910 | uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy); | |||
911 | uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType()); | |||
912 | ||||
913 | // Changing the whole vector with memset or with an access of a different | |||
914 | // vector type? | |||
915 | if (ValSize == VecSize) | |||
916 | return Builder.CreateBitCast(SV, AllocaType); | |||
917 | ||||
918 | // Must be an element insertion. | |||
919 | Type *EltTy = VTy->getElementType(); | |||
920 | if (SV->getType() != EltTy) | |||
921 | SV = Builder.CreateBitCast(SV, EltTy); | |||
922 | uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy); | |||
923 | unsigned Elt = Offset/EltSize; | |||
924 | Value *Idx; | |||
925 | if (NonConstantIdx) { | |||
926 | if (Elt) | |||
927 | Idx = Builder.CreateAdd(NonConstantIdx, | |||
928 | Builder.getInt32(Elt), | |||
929 | "dyn.offset"); | |||
930 | else | |||
931 | Idx = NonConstantIdx; | |||
932 | } else | |||
933 | Idx = Builder.getInt32(Elt); | |||
934 | return Builder.CreateInsertElement(Old, SV, Idx); | |||
935 | } | |||
936 | ||||
937 | // If SV is a first-class aggregate value, insert each value recursively. | |||
938 | if (StructType *ST = dyn_cast<StructType>(SV->getType())) { | |||
939 | assert(!NonConstantIdx &&((!NonConstantIdx && "Dynamic indexing into struct types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into struct types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 940, __PRETTY_FUNCTION__)) | |||
940 | "Dynamic indexing into struct types not supported")((!NonConstantIdx && "Dynamic indexing into struct types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into struct types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 940, __PRETTY_FUNCTION__)); | |||
941 | const StructLayout &Layout = *DL.getStructLayout(ST); | |||
942 | for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { | |||
943 | Value *Elt = Builder.CreateExtractValue(SV, i); | |||
944 | Old = ConvertScalar_InsertValue(Elt, Old, | |||
945 | Offset+Layout.getElementOffsetInBits(i), | |||
946 | nullptr, Builder); | |||
947 | } | |||
948 | return Old; | |||
949 | } | |||
950 | ||||
951 | if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { | |||
952 | assert(!NonConstantIdx &&((!NonConstantIdx && "Dynamic indexing into array types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into array types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 953, __PRETTY_FUNCTION__)) | |||
953 | "Dynamic indexing into array types not supported")((!NonConstantIdx && "Dynamic indexing into array types not supported" ) ? static_cast<void> (0) : __assert_fail ("!NonConstantIdx && \"Dynamic indexing into array types not supported\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 953, __PRETTY_FUNCTION__)); | |||
954 | uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); | |||
955 | for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { | |||
956 | Value *Elt = Builder.CreateExtractValue(SV, i); | |||
957 | Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, nullptr, | |||
958 | Builder); | |||
959 | } | |||
960 | return Old; | |||
961 | } | |||
962 | ||||
963 | // If SV is a float, convert it to the appropriate integer type. | |||
964 | // If it is a pointer, do the same. | |||
965 | unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType()); | |||
966 | unsigned DestWidth = DL.getTypeSizeInBits(AllocaType); | |||
967 | unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType()); | |||
968 | unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType); | |||
969 | if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy()) | |||
970 | SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth)); | |||
971 | else if (SV->getType()->isPointerTy()) | |||
972 | SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType())); | |||
973 | ||||
974 | // Zero extend or truncate the value if needed. | |||
975 | if (SV->getType() != AllocaType) { | |||
976 | if (SV->getType()->getPrimitiveSizeInBits() < | |||
977 | AllocaType->getPrimitiveSizeInBits()) | |||
978 | SV = Builder.CreateZExt(SV, AllocaType); | |||
979 | else { | |||
980 | // Truncation may be needed if storing more than the alloca can hold | |||
981 | // (undefined behavior). | |||
982 | SV = Builder.CreateTrunc(SV, AllocaType); | |||
983 | SrcWidth = DestWidth; | |||
984 | SrcStoreWidth = DestStoreWidth; | |||
985 | } | |||
986 | } | |||
987 | ||||
988 | // If this is a big-endian system and the store is narrower than the | |||
989 | // full alloca type, we need to do a shift to get the right bits. | |||
990 | int ShAmt = 0; | |||
991 | if (DL.isBigEndian()) { | |||
992 | // On big-endian machines, the lowest bit is stored at the bit offset | |||
993 | // from the pointer given by getTypeStoreSizeInBits. This matters for | |||
994 | // integers with a bitwidth that is not a multiple of 8. | |||
995 | ShAmt = DestStoreWidth - SrcStoreWidth - Offset; | |||
996 | } else { | |||
997 | ShAmt = Offset; | |||
998 | } | |||
999 | ||||
1000 | // Note: we support negative bitwidths (with shr) which are not defined. | |||
1001 | // We do this to support (f.e.) stores off the end of a structure where | |||
1002 | // only some bits in the structure are set. | |||
1003 | APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); | |||
1004 | if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { | |||
1005 | SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), ShAmt)); | |||
1006 | Mask <<= ShAmt; | |||
1007 | } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { | |||
1008 | SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), -ShAmt)); | |||
1009 | Mask = Mask.lshr(-ShAmt); | |||
1010 | } | |||
1011 | ||||
1012 | // Mask out the bits we are about to insert from the old value, and or | |||
1013 | // in the new bits. | |||
1014 | if (SrcWidth != DestWidth) { | |||
1015 | assert(DestWidth > SrcWidth)((DestWidth > SrcWidth) ? static_cast<void> (0) : __assert_fail ("DestWidth > SrcWidth", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 1015, __PRETTY_FUNCTION__)); | |||
1016 | Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask"); | |||
1017 | SV = Builder.CreateOr(Old, SV, "ins"); | |||
1018 | } | |||
1019 | return SV; | |||
1020 | } | |||
1021 | ||||
1022 | ||||
1023 | //===----------------------------------------------------------------------===// | |||
1024 | // SRoA Driver | |||
1025 | //===----------------------------------------------------------------------===// | |||
1026 | ||||
1027 | ||||
1028 | bool SROASROA_::runOnFunction(Function &F) { | |||
1029 | if (skipFunction(F)) | |||
1030 | return false; | |||
1031 | ||||
1032 | bool Changed = performPromotion(F); | |||
1033 | ||||
1034 | while (1) { | |||
1035 | bool LocalChange = performScalarRepl(F); | |||
1036 | if (!LocalChange) break; // No need to repromote if no scalarrepl | |||
1037 | Changed = true; | |||
1038 | LocalChange = performPromotion(F); | |||
1039 | if (!LocalChange) break; // No need to re-scalarrepl if no promotion | |||
1040 | } | |||
1041 | ||||
1042 | return Changed; | |||
1043 | } | |||
1044 | ||||
1045 | namespace { | |||
1046 | class AllocaPromoter : public LoadAndStorePromoter { | |||
1047 | AllocaInst *AI; | |||
1048 | DIBuilder *DIB; | |||
1049 | SmallVector<DbgDeclareInst *, 4> DDIs; | |||
1050 | SmallVector<DbgValueInst *, 4> DVIs; | |||
1051 | public: | |||
1052 | AllocaPromoter(ArrayRef<Instruction*> Insts, SSAUpdater &S, | |||
1053 | DIBuilder *DB) | |||
1054 | : LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {} | |||
1055 | ||||
1056 | void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) { | |||
1057 | // Remember which alloca we're promoting (for isInstInList). | |||
1058 | this->AI = AI; | |||
1059 | if (auto *L = LocalAsMetadata::getIfExists(AI)) { | |||
1060 | if (auto *DINode = MetadataAsValue::getIfExists(AI->getContext(), L)) { | |||
1061 | for (User *U : DINode->users()) | |||
1062 | if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) | |||
1063 | DDIs.push_back(DDI); | |||
1064 | else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) | |||
1065 | DVIs.push_back(DVI); | |||
1066 | } | |||
1067 | } | |||
1068 | ||||
1069 | LoadAndStorePromoter::run(Insts); | |||
1070 | AI->eraseFromParent(); | |||
1071 | for (SmallVectorImpl<DbgDeclareInst *>::iterator I = DDIs.begin(), | |||
1072 | E = DDIs.end(); I != E; ++I) { | |||
1073 | DbgDeclareInst *DDI = *I; | |||
1074 | DDI->eraseFromParent(); | |||
1075 | } | |||
1076 | for (SmallVectorImpl<DbgValueInst *>::iterator I = DVIs.begin(), | |||
1077 | E = DVIs.end(); I != E; ++I) { | |||
1078 | DbgValueInst *DVI = *I; | |||
1079 | DVI->eraseFromParent(); | |||
1080 | } | |||
1081 | } | |||
1082 | ||||
1083 | bool isInstInList(Instruction *I, | |||
1084 | const SmallVectorImpl<Instruction*> &Insts) const override { | |||
1085 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | |||
1086 | return LI->getOperand(0) == AI; | |||
1087 | return cast<StoreInst>(I)->getPointerOperand() == AI; | |||
1088 | } | |||
1089 | ||||
1090 | void updateDebugInfo(Instruction *Inst) const override { | |||
1091 | for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(), | |||
1092 | E = DDIs.end(); I != E; ++I) { | |||
1093 | DbgDeclareInst *DDI = *I; | |||
1094 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | |||
1095 | ConvertDebugDeclareToDebugValue(DDI, SI, *DIB); | |||
1096 | else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) | |||
1097 | ConvertDebugDeclareToDebugValue(DDI, LI, *DIB); | |||
1098 | } | |||
1099 | for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(), | |||
1100 | E = DVIs.end(); I != E; ++I) { | |||
1101 | DbgValueInst *DVI = *I; | |||
1102 | Value *Arg = nullptr; | |||
1103 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { | |||
1104 | // If an argument is zero extended then use argument directly. The ZExt | |||
1105 | // may be zapped by an optimization pass in future. | |||
1106 | if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) | |||
1107 | Arg = dyn_cast<Argument>(ZExt->getOperand(0)); | |||
1108 | if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) | |||
1109 | Arg = dyn_cast<Argument>(SExt->getOperand(0)); | |||
1110 | if (!Arg) | |||
1111 | Arg = SI->getOperand(0); | |||
1112 | } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { | |||
1113 | Arg = LI->getOperand(0); | |||
1114 | } else { | |||
1115 | continue; | |||
1116 | } | |||
1117 | DIB->insertDbgValueIntrinsic(Arg, 0, DVI->getVariable(), | |||
1118 | DVI->getExpression(), DVI->getDebugLoc(), | |||
1119 | Inst); | |||
1120 | } | |||
1121 | } | |||
1122 | }; | |||
1123 | } // end anon namespace | |||
1124 | ||||
1125 | /// isSafeSelectToSpeculate - Select instructions that use an alloca and are | |||
1126 | /// subsequently loaded can be rewritten to load both input pointers and then | |||
1127 | /// select between the result, allowing the load of the alloca to be promoted. | |||
1128 | /// From this: | |||
1129 | /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other | |||
1130 | /// %V = load i32* %P2 | |||
1131 | /// to: | |||
1132 | /// %V1 = load i32* %Alloca -> will be mem2reg'd | |||
1133 | /// %V2 = load i32* %Other | |||
1134 | /// %V = select i1 %cond, i32 %V1, i32 %V2 | |||
1135 | /// | |||
1136 | /// We can do this to a select if its only uses are loads and if the operand to | |||
1137 | /// the select can be loaded unconditionally. | |||
1138 | static bool isSafeSelectToSpeculate(SelectInst *SI) { | |||
1139 | const DataLayout &DL = SI->getModule()->getDataLayout(); | |||
1140 | ||||
1141 | for (User *U : SI->users()) { | |||
1142 | LoadInst *LI = dyn_cast<LoadInst>(U); | |||
1143 | if (!LI || !LI->isSimple()) return false; | |||
1144 | ||||
1145 | // Both operands to the select need to be dereferencable, either absolutely | |||
1146 | // (e.g. allocas) or at this point because we can see other accesses to it. | |||
1147 | if (!isSafeToLoadUnconditionally(SI->getTrueValue(), LI->getAlignment(), | |||
1148 | DL, LI)) | |||
1149 | return false; | |||
1150 | if (!isSafeToLoadUnconditionally(SI->getFalseValue(), LI->getAlignment(), | |||
1151 | DL, LI)) | |||
1152 | return false; | |||
1153 | } | |||
1154 | ||||
1155 | return true; | |||
1156 | } | |||
1157 | ||||
1158 | /// isSafePHIToSpeculate - PHI instructions that use an alloca and are | |||
1159 | /// subsequently loaded can be rewritten to load both input pointers in the pred | |||
1160 | /// blocks and then PHI the results, allowing the load of the alloca to be | |||
1161 | /// promoted. | |||
1162 | /// From this: | |||
1163 | /// %P2 = phi [i32* %Alloca, i32* %Other] | |||
1164 | /// %V = load i32* %P2 | |||
1165 | /// to: | |||
1166 | /// %V1 = load i32* %Alloca -> will be mem2reg'd | |||
1167 | /// ... | |||
1168 | /// %V2 = load i32* %Other | |||
1169 | /// ... | |||
1170 | /// %V = phi [i32 %V1, i32 %V2] | |||
1171 | /// | |||
1172 | /// We can do this to a select if its only uses are loads and if the operand to | |||
1173 | /// the select can be loaded unconditionally. | |||
1174 | static bool isSafePHIToSpeculate(PHINode *PN) { | |||
1175 | // For now, we can only do this promotion if the load is in the same block as | |||
1176 | // the PHI, and if there are no stores between the phi and load. | |||
1177 | // TODO: Allow recursive phi users. | |||
1178 | // TODO: Allow stores. | |||
1179 | BasicBlock *BB = PN->getParent(); | |||
1180 | unsigned MaxAlign = 0; | |||
1181 | for (User *U : PN->users()) { | |||
1182 | LoadInst *LI = dyn_cast<LoadInst>(U); | |||
1183 | if (!LI || !LI->isSimple()) return false; | |||
1184 | ||||
1185 | // For now we only allow loads in the same block as the PHI. This is a | |||
1186 | // common case that happens when instcombine merges two loads through a PHI. | |||
1187 | if (LI->getParent() != BB) return false; | |||
1188 | ||||
1189 | // Ensure that there are no instructions between the PHI and the load that | |||
1190 | // could store. | |||
1191 | for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) | |||
1192 | if (BBI->mayWriteToMemory()) | |||
1193 | return false; | |||
1194 | ||||
1195 | MaxAlign = std::max(MaxAlign, LI->getAlignment()); | |||
1196 | } | |||
1197 | ||||
1198 | const DataLayout &DL = PN->getModule()->getDataLayout(); | |||
1199 | ||||
1200 | // Okay, we know that we have one or more loads in the same block as the PHI. | |||
1201 | // We can transform this if it is safe to push the loads into the predecessor | |||
1202 | // blocks. The only thing to watch out for is that we can't put a possibly | |||
1203 | // trapping load in the predecessor if it is a critical edge. | |||
1204 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
1205 | BasicBlock *Pred = PN->getIncomingBlock(i); | |||
1206 | Value *InVal = PN->getIncomingValue(i); | |||
1207 | ||||
1208 | // If the terminator of the predecessor has side-effects (an invoke), | |||
1209 | // there is no safe place to put a load in the predecessor. | |||
1210 | if (Pred->getTerminator()->mayHaveSideEffects()) | |||
1211 | return false; | |||
1212 | ||||
1213 | // If the value is produced by the terminator of the predecessor | |||
1214 | // (an invoke), there is no valid place to put a load in the predecessor. | |||
1215 | if (Pred->getTerminator() == InVal) | |||
1216 | return false; | |||
1217 | ||||
1218 | // If the predecessor has a single successor, then the edge isn't critical. | |||
1219 | if (Pred->getTerminator()->getNumSuccessors() == 1) | |||
1220 | continue; | |||
1221 | ||||
1222 | // If this pointer is always safe to load, or if we can prove that there is | |||
1223 | // already a load in the block, then we can move the load to the pred block. | |||
1224 | if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, Pred->getTerminator())) | |||
1225 | continue; | |||
1226 | ||||
1227 | return false; | |||
1228 | } | |||
1229 | ||||
1230 | return true; | |||
1231 | } | |||
1232 | ||||
1233 | ||||
1234 | /// tryToMakeAllocaBePromotable - This returns true if the alloca only has | |||
1235 | /// direct (non-volatile) loads and stores to it. If the alloca is close but | |||
1236 | /// not quite there, this will transform the code to allow promotion. As such, | |||
1237 | /// it is a non-pure predicate. | |||
1238 | static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout &DL) { | |||
1239 | SetVector<Instruction*, SmallVector<Instruction*, 4>, | |||
1240 | SmallPtrSet<Instruction*, 4> > InstsToRewrite; | |||
1241 | for (User *U : AI->users()) { | |||
1242 | if (LoadInst *LI = dyn_cast<LoadInst>(U)) { | |||
1243 | if (!LI->isSimple()) | |||
1244 | return false; | |||
1245 | continue; | |||
1246 | } | |||
1247 | ||||
1248 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) { | |||
1249 | if (SI->getOperand(0) == AI || !SI->isSimple()) | |||
1250 | return false; // Don't allow a store OF the AI, only INTO the AI. | |||
1251 | continue; | |||
1252 | } | |||
1253 | ||||
1254 | if (SelectInst *SI = dyn_cast<SelectInst>(U)) { | |||
1255 | // If the condition being selected on is a constant, fold the select, yes | |||
1256 | // this does (rarely) happen early on. | |||
1257 | if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition())) { | |||
1258 | Value *Result = SI->getOperand(1+CI->isZero()); | |||
1259 | SI->replaceAllUsesWith(Result); | |||
1260 | SI->eraseFromParent(); | |||
1261 | ||||
1262 | // This is very rare and we just scrambled the use list of AI, start | |||
1263 | // over completely. | |||
1264 | return tryToMakeAllocaBePromotable(AI, DL); | |||
1265 | } | |||
1266 | ||||
1267 | // If it is safe to turn "load (select c, AI, ptr)" into a select of two | |||
1268 | // loads, then we can transform this by rewriting the select. | |||
1269 | if (!isSafeSelectToSpeculate(SI)) | |||
1270 | return false; | |||
1271 | ||||
1272 | InstsToRewrite.insert(SI); | |||
1273 | continue; | |||
1274 | } | |||
1275 | ||||
1276 | if (PHINode *PN = dyn_cast<PHINode>(U)) { | |||
1277 | if (PN->use_empty()) { // Dead PHIs can be stripped. | |||
1278 | InstsToRewrite.insert(PN); | |||
1279 | continue; | |||
1280 | } | |||
1281 | ||||
1282 | // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads | |||
1283 | // in the pred blocks, then we can transform this by rewriting the PHI. | |||
1284 | if (!isSafePHIToSpeculate(PN)) | |||
1285 | return false; | |||
1286 | ||||
1287 | InstsToRewrite.insert(PN); | |||
1288 | continue; | |||
1289 | } | |||
1290 | ||||
1291 | if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { | |||
1292 | if (onlyUsedByLifetimeMarkers(BCI)) { | |||
1293 | InstsToRewrite.insert(BCI); | |||
1294 | continue; | |||
1295 | } | |||
1296 | } | |||
1297 | ||||
1298 | return false; | |||
1299 | } | |||
1300 | ||||
1301 | // If there are no instructions to rewrite, then all uses are load/stores and | |||
1302 | // we're done! | |||
1303 | if (InstsToRewrite.empty()) | |||
1304 | return true; | |||
1305 | ||||
1306 | // If we have instructions that need to be rewritten for this to be promotable | |||
1307 | // take care of it now. | |||
1308 | for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) { | |||
1309 | if (BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) { | |||
1310 | // This could only be a bitcast used by nothing but lifetime intrinsics. | |||
1311 | for (BitCastInst::user_iterator I = BCI->user_begin(), E = BCI->user_end(); | |||
1312 | I != E;) | |||
1313 | cast<Instruction>(*I++)->eraseFromParent(); | |||
1314 | BCI->eraseFromParent(); | |||
1315 | continue; | |||
1316 | } | |||
1317 | ||||
1318 | if (SelectInst *SI = dyn_cast<SelectInst>(InstsToRewrite[i])) { | |||
1319 | // Selects in InstsToRewrite only have load uses. Rewrite each as two | |||
1320 | // loads with a new select. | |||
1321 | while (!SI->use_empty()) { | |||
1322 | LoadInst *LI = cast<LoadInst>(SI->user_back()); | |||
1323 | ||||
1324 | IRBuilder<> Builder(LI); | |||
1325 | LoadInst *TrueLoad = | |||
1326 | Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t"); | |||
1327 | LoadInst *FalseLoad = | |||
1328 | Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f"); | |||
1329 | ||||
1330 | // Transfer alignment and AA info if present. | |||
1331 | TrueLoad->setAlignment(LI->getAlignment()); | |||
1332 | FalseLoad->setAlignment(LI->getAlignment()); | |||
1333 | ||||
1334 | AAMDNodes Tags; | |||
1335 | LI->getAAMetadata(Tags); | |||
1336 | if (Tags) { | |||
1337 | TrueLoad->setAAMetadata(Tags); | |||
1338 | FalseLoad->setAAMetadata(Tags); | |||
1339 | } | |||
1340 | ||||
1341 | Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad); | |||
1342 | V->takeName(LI); | |||
1343 | LI->replaceAllUsesWith(V); | |||
1344 | LI->eraseFromParent(); | |||
1345 | } | |||
1346 | ||||
1347 | // Now that all the loads are gone, the select is gone too. | |||
1348 | SI->eraseFromParent(); | |||
1349 | continue; | |||
1350 | } | |||
1351 | ||||
1352 | // Otherwise, we have a PHI node which allows us to push the loads into the | |||
1353 | // predecessors. | |||
1354 | PHINode *PN = cast<PHINode>(InstsToRewrite[i]); | |||
1355 | if (PN->use_empty()) { | |||
1356 | PN->eraseFromParent(); | |||
1357 | continue; | |||
1358 | } | |||
1359 | ||||
1360 | Type *LoadTy = AI->getAllocatedType(); | |||
1361 | PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(), | |||
1362 | PN->getName()+".ld", PN); | |||
1363 | ||||
1364 | // Get the AA tags and alignment to use from one of the loads. It doesn't | |||
1365 | // matter which one we get and if any differ, it doesn't matter. | |||
1366 | LoadInst *SomeLoad = cast<LoadInst>(PN->user_back()); | |||
1367 | ||||
1368 | AAMDNodes AATags; | |||
1369 | SomeLoad->getAAMetadata(AATags); | |||
1370 | unsigned Align = SomeLoad->getAlignment(); | |||
1371 | ||||
1372 | // Rewrite all loads of the PN to use the new PHI. | |||
1373 | while (!PN->use_empty()) { | |||
1374 | LoadInst *LI = cast<LoadInst>(PN->user_back()); | |||
1375 | LI->replaceAllUsesWith(NewPN); | |||
1376 | LI->eraseFromParent(); | |||
1377 | } | |||
1378 | ||||
1379 | // Inject loads into all of the pred blocks. Keep track of which blocks we | |||
1380 | // insert them into in case we have multiple edges from the same block. | |||
1381 | DenseMap<BasicBlock*, LoadInst*> InsertedLoads; | |||
1382 | ||||
1383 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |||
1384 | BasicBlock *Pred = PN->getIncomingBlock(i); | |||
1385 | LoadInst *&Load = InsertedLoads[Pred]; | |||
1386 | if (!Load) { | |||
1387 | Load = new LoadInst(PN->getIncomingValue(i), | |||
1388 | PN->getName() + "." + Pred->getName(), | |||
1389 | Pred->getTerminator()); | |||
1390 | Load->setAlignment(Align); | |||
1391 | if (AATags) Load->setAAMetadata(AATags); | |||
1392 | } | |||
1393 | ||||
1394 | NewPN->addIncoming(Load, Pred); | |||
1395 | } | |||
1396 | ||||
1397 | PN->eraseFromParent(); | |||
1398 | } | |||
1399 | ||||
1400 | ++NumAdjusted; | |||
1401 | return true; | |||
1402 | } | |||
1403 | ||||
1404 | bool SROASROA_::performPromotion(Function &F) { | |||
1405 | std::vector<AllocaInst*> Allocas; | |||
1406 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
1407 | DominatorTree *DT = nullptr; | |||
1408 | if (HasDomTree) | |||
1409 | DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
1410 | AssumptionCache &AC = | |||
1411 | getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | |||
1412 | ||||
1413 | BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function | |||
1414 | DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); | |||
1415 | bool Changed = false; | |||
1416 | SmallVector<Instruction*, 64> Insts; | |||
1417 | while (1) { | |||
1418 | Allocas.clear(); | |||
1419 | ||||
1420 | // Find allocas that are safe to promote, by looking at all instructions in | |||
1421 | // the entry node | |||
1422 | for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) | |||
1423 | if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? | |||
1424 | if (tryToMakeAllocaBePromotable(AI, DL)) | |||
1425 | Allocas.push_back(AI); | |||
1426 | ||||
1427 | if (Allocas.empty()) break; | |||
1428 | ||||
1429 | if (HasDomTree) | |||
1430 | PromoteMemToReg(Allocas, *DT, nullptr, &AC); | |||
1431 | else { | |||
1432 | SSAUpdater SSA; | |||
1433 | for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { | |||
1434 | AllocaInst *AI = Allocas[i]; | |||
1435 | ||||
1436 | // Build list of instructions to promote. | |||
1437 | for (User *U : AI->users()) | |||
1438 | Insts.push_back(cast<Instruction>(U)); | |||
1439 | AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts); | |||
1440 | Insts.clear(); | |||
1441 | } | |||
1442 | } | |||
1443 | NumPromoted += Allocas.size(); | |||
1444 | Changed = true; | |||
1445 | } | |||
1446 | ||||
1447 | return Changed; | |||
1448 | } | |||
1449 | ||||
1450 | ||||
1451 | /// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for | |||
1452 | /// SROA. It must be a struct or array type with a small number of elements. | |||
1453 | bool SROASROA_::ShouldAttemptScalarRepl(AllocaInst *AI) { | |||
1454 | Type *T = AI->getAllocatedType(); | |||
1455 | // Do not promote any struct that has too many members. | |||
1456 | if (StructType *ST = dyn_cast<StructType>(T)) | |||
1457 | return ST->getNumElements() <= StructMemberThreshold; | |||
1458 | // Do not promote any array that has too many elements. | |||
1459 | if (ArrayType *AT = dyn_cast<ArrayType>(T)) | |||
1460 | return AT->getNumElements() <= ArrayElementThreshold; | |||
1461 | return false; | |||
1462 | } | |||
1463 | ||||
1464 | // performScalarRepl - This algorithm is a simple worklist driven algorithm, | |||
1465 | // which runs on all of the alloca instructions in the entry block, removing | |||
1466 | // them if they are only used by getelementptr instructions. | |||
1467 | // | |||
1468 | bool SROASROA_::performScalarRepl(Function &F) { | |||
1469 | std::vector<AllocaInst*> WorkList; | |||
1470 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
1471 | ||||
1472 | // Scan the entry basic block, adding allocas to the worklist. | |||
1473 | BasicBlock &BB = F.getEntryBlock(); | |||
1474 | for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) | |||
1475 | if (AllocaInst *A = dyn_cast<AllocaInst>(I)) | |||
1476 | WorkList.push_back(A); | |||
1477 | ||||
1478 | // Process the worklist | |||
1479 | bool Changed = false; | |||
1480 | while (!WorkList.empty()) { | |||
1481 | AllocaInst *AI = WorkList.back(); | |||
1482 | WorkList.pop_back(); | |||
1483 | ||||
1484 | // Handle dead allocas trivially. These can be formed by SROA'ing arrays | |||
1485 | // with unused elements. | |||
1486 | if (AI->use_empty()) { | |||
1487 | AI->eraseFromParent(); | |||
1488 | Changed = true; | |||
1489 | continue; | |||
1490 | } | |||
1491 | ||||
1492 | // If this alloca is impossible for us to promote, reject it early. | |||
1493 | if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) | |||
1494 | continue; | |||
1495 | ||||
1496 | // Check to see if we can perform the core SROA transformation. We cannot | |||
1497 | // transform the allocation instruction if it is an array allocation | |||
1498 | // (allocations OF arrays are ok though), and an allocation of a scalar | |||
1499 | // value cannot be decomposed at all. | |||
1500 | uint64_t AllocaSize = DL.getTypeAllocSize(AI->getAllocatedType()); | |||
1501 | ||||
1502 | // Do not promote [0 x %struct]. | |||
1503 | if (AllocaSize == 0) continue; | |||
1504 | ||||
1505 | // Do not promote any struct whose size is too big. | |||
1506 | if (AllocaSize > SRThreshold) continue; | |||
1507 | ||||
1508 | // If the alloca looks like a good candidate for scalar replacement, and if | |||
1509 | // all its users can be transformed, then split up the aggregate into its | |||
1510 | // separate elements. | |||
1511 | if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) { | |||
1512 | DoScalarReplacement(AI, WorkList); | |||
1513 | Changed = true; | |||
1514 | continue; | |||
1515 | } | |||
1516 | ||||
1517 | // If we can turn this aggregate value (potentially with casts) into a | |||
1518 | // simple scalar value that can be mem2reg'd into a register value. | |||
1519 | // IsNotTrivial tracks whether this is something that mem2reg could have | |||
1520 | // promoted itself. If so, we don't want to transform it needlessly. Note | |||
1521 | // that we can't just check based on the type: the alloca may be of an i32 | |||
1522 | // but that has pointer arithmetic to set byte 3 of it or something. | |||
1523 | if (AllocaInst *NewAI = | |||
1524 | ConvertToScalarInfo((unsigned)AllocaSize, DL, ScalarLoadThreshold) | |||
1525 | .TryConvert(AI)) { | |||
1526 | NewAI->takeName(AI); | |||
1527 | AI->eraseFromParent(); | |||
1528 | ++NumConverted; | |||
1529 | Changed = true; | |||
1530 | continue; | |||
1531 | } | |||
1532 | ||||
1533 | // Otherwise, couldn't process this alloca. | |||
1534 | } | |||
1535 | ||||
1536 | return Changed; | |||
1537 | } | |||
1538 | ||||
1539 | /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl | |||
1540 | /// predicate, do SROA now. | |||
1541 | void SROASROA_::DoScalarReplacement(AllocaInst *AI, | |||
1542 | std::vector<AllocaInst*> &WorkList) { | |||
1543 | DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "Found inst to SROA: " << *AI << '\n'; } } while (0); | |||
1544 | SmallVector<AllocaInst*, 32> ElementAllocas; | |||
1545 | if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { | |||
1546 | ElementAllocas.reserve(ST->getNumContainedTypes()); | |||
1547 | for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { | |||
1548 | AllocaInst *NA = new AllocaInst(ST->getContainedType(i), nullptr, | |||
1549 | AI->getAlignment(), | |||
1550 | AI->getName() + "." + Twine(i), AI); | |||
1551 | ElementAllocas.push_back(NA); | |||
1552 | WorkList.push_back(NA); // Add to worklist for recursive processing | |||
1553 | } | |||
1554 | } else { | |||
1555 | ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); | |||
1556 | ElementAllocas.reserve(AT->getNumElements()); | |||
1557 | Type *ElTy = AT->getElementType(); | |||
1558 | for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { | |||
1559 | AllocaInst *NA = new AllocaInst(ElTy, nullptr, AI->getAlignment(), | |||
1560 | AI->getName() + "." + Twine(i), AI); | |||
1561 | ElementAllocas.push_back(NA); | |||
1562 | WorkList.push_back(NA); // Add to worklist for recursive processing | |||
1563 | } | |||
1564 | } | |||
1565 | ||||
1566 | // Now that we have created the new alloca instructions, rewrite all the | |||
1567 | // uses of the old alloca. | |||
1568 | RewriteForScalarRepl(AI, AI, 0, ElementAllocas); | |||
1569 | ||||
1570 | // Now erase any instructions that were made dead while rewriting the alloca. | |||
1571 | DeleteDeadInstructions(); | |||
1572 | AI->eraseFromParent(); | |||
1573 | ||||
1574 | ++NumReplaced; | |||
1575 | } | |||
1576 | ||||
1577 | /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, | |||
1578 | /// recursively including all their operands that become trivially dead. | |||
1579 | void SROASROA_::DeleteDeadInstructions() { | |||
1580 | while (!DeadInsts.empty()) { | |||
1581 | Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); | |||
1582 | ||||
1583 | for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) | |||
1584 | if (Instruction *U = dyn_cast<Instruction>(*OI)) { | |||
1585 | // Zero out the operand and see if it becomes trivially dead. | |||
1586 | // (But, don't add allocas to the dead instruction list -- they are | |||
1587 | // already on the worklist and will be deleted separately.) | |||
1588 | *OI = nullptr; | |||
1589 | if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) | |||
1590 | DeadInsts.push_back(U); | |||
1591 | } | |||
1592 | ||||
1593 | I->eraseFromParent(); | |||
1594 | } | |||
1595 | } | |||
1596 | ||||
1597 | /// isSafeForScalarRepl - Check if instruction I is a safe use with regard to | |||
1598 | /// performing scalar replacement of alloca AI. The results are flagged in | |||
1599 | /// the Info parameter. Offset indicates the position within AI that is | |||
1600 | /// referenced by this instruction. | |||
1601 | void SROASROA_::isSafeForScalarRepl(Instruction *I, uint64_t Offset, | |||
1602 | AllocaInfo &Info) { | |||
1603 | const DataLayout &DL = I->getModule()->getDataLayout(); | |||
1604 | for (Use &U : I->uses()) { | |||
1605 | Instruction *User = cast<Instruction>(U.getUser()); | |||
1606 | ||||
1607 | if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { | |||
1608 | isSafeForScalarRepl(BC, Offset, Info); | |||
1609 | } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { | |||
1610 | uint64_t GEPOffset = Offset; | |||
1611 | isSafeGEP(GEPI, GEPOffset, Info); | |||
1612 | if (!Info.isUnsafe) | |||
1613 | isSafeForScalarRepl(GEPI, GEPOffset, Info); | |||
1614 | } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { | |||
1615 | ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); | |||
1616 | if (!Length || Length->isNegative()) | |||
1617 | return MarkUnsafe(Info, User); | |||
1618 | ||||
1619 | isSafeMemAccess(Offset, Length->getZExtValue(), nullptr, | |||
1620 | U.getOperandNo() == 0, Info, MI, | |||
1621 | true /*AllowWholeAccess*/); | |||
1622 | } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { | |||
1623 | if (!LI->isSimple()) | |||
1624 | return MarkUnsafe(Info, User); | |||
1625 | Type *LIType = LI->getType(); | |||
1626 | isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, | |||
1627 | LI, true /*AllowWholeAccess*/); | |||
1628 | Info.hasALoadOrStore = true; | |||
1629 | ||||
1630 | } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { | |||
1631 | // Store is ok if storing INTO the pointer, not storing the pointer | |||
1632 | if (!SI->isSimple() || SI->getOperand(0) == I) | |||
1633 | return MarkUnsafe(Info, User); | |||
1634 | ||||
1635 | Type *SIType = SI->getOperand(0)->getType(); | |||
1636 | isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, | |||
1637 | SI, true /*AllowWholeAccess*/); | |||
1638 | Info.hasALoadOrStore = true; | |||
1639 | } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { | |||
1640 | if (II->getIntrinsicID() != Intrinsic::lifetime_start && | |||
1641 | II->getIntrinsicID() != Intrinsic::lifetime_end) | |||
1642 | return MarkUnsafe(Info, User); | |||
1643 | } else if (isa<PHINode>(User) || isa<SelectInst>(User)) { | |||
1644 | isSafePHISelectUseForScalarRepl(User, Offset, Info); | |||
1645 | } else { | |||
1646 | return MarkUnsafe(Info, User); | |||
1647 | } | |||
1648 | if (Info.isUnsafe) return; | |||
1649 | } | |||
1650 | } | |||
1651 | ||||
1652 | ||||
1653 | /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer | |||
1654 | /// derived from the alloca, we can often still split the alloca into elements. | |||
1655 | /// This is useful if we have a large alloca where one element is phi'd | |||
1656 | /// together somewhere: we can SRoA and promote all the other elements even if | |||
1657 | /// we end up not being able to promote this one. | |||
1658 | /// | |||
1659 | /// All we require is that the uses of the PHI do not index into other parts of | |||
1660 | /// the alloca. The most important use case for this is single load and stores | |||
1661 | /// that are PHI'd together, which can happen due to code sinking. | |||
1662 | void SROASROA_::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, | |||
1663 | AllocaInfo &Info) { | |||
1664 | // If we've already checked this PHI, don't do it again. | |||
1665 | if (PHINode *PN = dyn_cast<PHINode>(I)) | |||
1666 | if (!Info.CheckedPHIs.insert(PN).second) | |||
1667 | return; | |||
1668 | ||||
1669 | const DataLayout &DL = I->getModule()->getDataLayout(); | |||
1670 | for (User *U : I->users()) { | |||
1671 | Instruction *UI = cast<Instruction>(U); | |||
1672 | ||||
1673 | if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) { | |||
1674 | isSafePHISelectUseForScalarRepl(BC, Offset, Info); | |||
1675 | } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { | |||
1676 | // Only allow "bitcast" GEPs for simplicity. We could generalize this, | |||
1677 | // but would have to prove that we're staying inside of an element being | |||
1678 | // promoted. | |||
1679 | if (!GEPI->hasAllZeroIndices()) | |||
1680 | return MarkUnsafe(Info, UI); | |||
1681 | isSafePHISelectUseForScalarRepl(GEPI, Offset, Info); | |||
1682 | } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { | |||
1683 | if (!LI->isSimple()) | |||
1684 | return MarkUnsafe(Info, UI); | |||
1685 | Type *LIType = LI->getType(); | |||
1686 | isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, | |||
1687 | LI, false /*AllowWholeAccess*/); | |||
1688 | Info.hasALoadOrStore = true; | |||
1689 | ||||
1690 | } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { | |||
1691 | // Store is ok if storing INTO the pointer, not storing the pointer | |||
1692 | if (!SI->isSimple() || SI->getOperand(0) == I) | |||
1693 | return MarkUnsafe(Info, UI); | |||
1694 | ||||
1695 | Type *SIType = SI->getOperand(0)->getType(); | |||
1696 | isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, | |||
1697 | SI, false /*AllowWholeAccess*/); | |||
1698 | Info.hasALoadOrStore = true; | |||
1699 | } else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) { | |||
1700 | isSafePHISelectUseForScalarRepl(UI, Offset, Info); | |||
1701 | } else { | |||
1702 | return MarkUnsafe(Info, UI); | |||
1703 | } | |||
1704 | if (Info.isUnsafe) return; | |||
1705 | } | |||
1706 | } | |||
1707 | ||||
1708 | /// isSafeGEP - Check if a GEP instruction can be handled for scalar | |||
1709 | /// replacement. It is safe when all the indices are constant, in-bounds | |||
1710 | /// references, and when the resulting offset corresponds to an element within | |||
1711 | /// the alloca type. The results are flagged in the Info parameter. Upon | |||
1712 | /// return, Offset is adjusted as specified by the GEP indices. | |||
1713 | void SROASROA_::isSafeGEP(GetElementPtrInst *GEPI, | |||
1714 | uint64_t &Offset, AllocaInfo &Info) { | |||
1715 | gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI); | |||
1716 | if (GEPIt == E) | |||
1717 | return; | |||
1718 | bool NonConstant = false; | |||
1719 | unsigned NonConstantIdxSize = 0; | |||
1720 | ||||
1721 | // Walk through the GEP type indices, checking the types that this indexes | |||
1722 | // into. | |||
1723 | for (; GEPIt != E; ++GEPIt) { | |||
1724 | // Ignore struct elements, no extra checking needed for these. | |||
1725 | if ((*GEPIt)->isStructTy()) | |||
1726 | continue; | |||
1727 | ||||
1728 | ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); | |||
1729 | if (!IdxVal) | |||
1730 | return MarkUnsafe(Info, GEPI); | |||
1731 | } | |||
1732 | ||||
1733 | // Compute the offset due to this GEP and check if the alloca has a | |||
1734 | // component element at that offset. | |||
1735 | SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); | |||
1736 | // If this GEP is non-constant then the last operand must have been a | |||
1737 | // dynamic index into a vector. Pop this now as it has no impact on the | |||
1738 | // constant part of the offset. | |||
1739 | if (NonConstant) | |||
1740 | Indices.pop_back(); | |||
1741 | ||||
1742 | const DataLayout &DL = GEPI->getModule()->getDataLayout(); | |||
1743 | Offset += DL.getIndexedOffsetInType(GEPI->getSourceElementType(), Indices); | |||
1744 | if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize, | |||
1745 | DL)) | |||
1746 | MarkUnsafe(Info, GEPI); | |||
1747 | } | |||
1748 | ||||
1749 | /// isHomogeneousAggregate - Check if type T is a struct or array containing | |||
1750 | /// elements of the same type (which is always true for arrays). If so, | |||
1751 | /// return true with NumElts and EltTy set to the number of elements and the | |||
1752 | /// element type, respectively. | |||
1753 | static bool isHomogeneousAggregate(Type *T, unsigned &NumElts, | |||
1754 | Type *&EltTy) { | |||
1755 | if (ArrayType *AT = dyn_cast<ArrayType>(T)) { | |||
1756 | NumElts = AT->getNumElements(); | |||
1757 | EltTy = (NumElts == 0 ? nullptr : AT->getElementType()); | |||
1758 | return true; | |||
1759 | } | |||
1760 | if (StructType *ST = dyn_cast<StructType>(T)) { | |||
1761 | NumElts = ST->getNumContainedTypes(); | |||
1762 | EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0)); | |||
1763 | for (unsigned n = 1; n < NumElts; ++n) { | |||
1764 | if (ST->getContainedType(n) != EltTy) | |||
1765 | return false; | |||
1766 | } | |||
1767 | return true; | |||
1768 | } | |||
1769 | return false; | |||
1770 | } | |||
1771 | ||||
1772 | /// isCompatibleAggregate - Check if T1 and T2 are either the same type or are | |||
1773 | /// "homogeneous" aggregates with the same element type and number of elements. | |||
1774 | static bool isCompatibleAggregate(Type *T1, Type *T2) { | |||
1775 | if (T1 == T2) | |||
1776 | return true; | |||
1777 | ||||
1778 | unsigned NumElts1, NumElts2; | |||
1779 | Type *EltTy1, *EltTy2; | |||
1780 | if (isHomogeneousAggregate(T1, NumElts1, EltTy1) && | |||
1781 | isHomogeneousAggregate(T2, NumElts2, EltTy2) && | |||
1782 | NumElts1 == NumElts2 && | |||
1783 | EltTy1 == EltTy2) | |||
1784 | return true; | |||
1785 | ||||
1786 | return false; | |||
1787 | } | |||
1788 | ||||
1789 | /// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI | |||
1790 | /// alloca or has an offset and size that corresponds to a component element | |||
1791 | /// within it. The offset checked here may have been formed from a GEP with a | |||
1792 | /// pointer bitcasted to a different type. | |||
1793 | /// | |||
1794 | /// If AllowWholeAccess is true, then this allows uses of the entire alloca as a | |||
1795 | /// unit. If false, it only allows accesses known to be in a single element. | |||
1796 | void SROASROA_::isSafeMemAccess(uint64_t Offset, uint64_t MemSize, | |||
1797 | Type *MemOpType, bool isStore, | |||
1798 | AllocaInfo &Info, Instruction *TheAccess, | |||
1799 | bool AllowWholeAccess) { | |||
1800 | const DataLayout &DL = TheAccess->getModule()->getDataLayout(); | |||
1801 | // Check if this is a load/store of the entire alloca. | |||
1802 | if (Offset == 0 && AllowWholeAccess && | |||
1803 | MemSize == DL.getTypeAllocSize(Info.AI->getAllocatedType())) { | |||
1804 | // This can be safe for MemIntrinsics (where MemOpType is 0) and integer | |||
1805 | // loads/stores (which are essentially the same as the MemIntrinsics with | |||
1806 | // regard to copying padding between elements). But, if an alloca is | |||
1807 | // flagged as both a source and destination of such operations, we'll need | |||
1808 | // to check later for padding between elements. | |||
1809 | if (!MemOpType || MemOpType->isIntegerTy()) { | |||
1810 | if (isStore) | |||
1811 | Info.isMemCpyDst = true; | |||
1812 | else | |||
1813 | Info.isMemCpySrc = true; | |||
1814 | return; | |||
1815 | } | |||
1816 | // This is also safe for references using a type that is compatible with | |||
1817 | // the type of the alloca, so that loads/stores can be rewritten using | |||
1818 | // insertvalue/extractvalue. | |||
1819 | if (isCompatibleAggregate(MemOpType, Info.AI->getAllocatedType())) { | |||
1820 | Info.hasSubelementAccess = true; | |||
1821 | return; | |||
1822 | } | |||
1823 | } | |||
1824 | // Check if the offset/size correspond to a component within the alloca type. | |||
1825 | Type *T = Info.AI->getAllocatedType(); | |||
1826 | if (TypeHasComponent(T, Offset, MemSize, DL)) { | |||
1827 | Info.hasSubelementAccess = true; | |||
1828 | return; | |||
1829 | } | |||
1830 | ||||
1831 | return MarkUnsafe(Info, TheAccess); | |||
1832 | } | |||
1833 | ||||
1834 | /// TypeHasComponent - Return true if T has a component type with the | |||
1835 | /// specified offset and size. If Size is zero, do not check the size. | |||
1836 | bool SROASROA_::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size, | |||
1837 | const DataLayout &DL) { | |||
1838 | Type *EltTy; | |||
1839 | uint64_t EltSize; | |||
1840 | if (StructType *ST = dyn_cast<StructType>(T)) { | |||
1841 | const StructLayout *Layout = DL.getStructLayout(ST); | |||
1842 | unsigned EltIdx = Layout->getElementContainingOffset(Offset); | |||
1843 | EltTy = ST->getContainedType(EltIdx); | |||
1844 | EltSize = DL.getTypeAllocSize(EltTy); | |||
1845 | Offset -= Layout->getElementOffset(EltIdx); | |||
1846 | } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { | |||
1847 | EltTy = AT->getElementType(); | |||
1848 | EltSize = DL.getTypeAllocSize(EltTy); | |||
1849 | if (Offset >= AT->getNumElements() * EltSize) | |||
1850 | return false; | |||
1851 | Offset %= EltSize; | |||
1852 | } else if (VectorType *VT = dyn_cast<VectorType>(T)) { | |||
1853 | EltTy = VT->getElementType(); | |||
1854 | EltSize = DL.getTypeAllocSize(EltTy); | |||
1855 | if (Offset >= VT->getNumElements() * EltSize) | |||
1856 | return false; | |||
1857 | Offset %= EltSize; | |||
1858 | } else { | |||
1859 | return false; | |||
1860 | } | |||
1861 | if (Offset == 0 && (Size == 0 || EltSize == Size)) | |||
1862 | return true; | |||
1863 | // Check if the component spans multiple elements. | |||
1864 | if (Offset + Size > EltSize) | |||
1865 | return false; | |||
1866 | return TypeHasComponent(EltTy, Offset, Size, DL); | |||
1867 | } | |||
1868 | ||||
1869 | /// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite | |||
1870 | /// the instruction I, which references it, to use the separate elements. | |||
1871 | /// Offset indicates the position within AI that is referenced by this | |||
1872 | /// instruction. | |||
1873 | void SROASROA_::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, | |||
1874 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
1875 | const DataLayout &DL = I->getModule()->getDataLayout(); | |||
1876 | for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) { | |||
1877 | Use &TheUse = *UI++; | |||
1878 | Instruction *User = cast<Instruction>(TheUse.getUser()); | |||
1879 | ||||
1880 | if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { | |||
1881 | RewriteBitCast(BC, AI, Offset, NewElts); | |||
1882 | continue; | |||
1883 | } | |||
1884 | ||||
1885 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { | |||
1886 | RewriteGEP(GEPI, AI, Offset, NewElts); | |||
1887 | continue; | |||
1888 | } | |||
1889 | ||||
1890 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { | |||
1891 | ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); | |||
1892 | uint64_t MemSize = Length->getZExtValue(); | |||
1893 | if (Offset == 0 && MemSize == DL.getTypeAllocSize(AI->getAllocatedType())) | |||
1894 | RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts); | |||
1895 | // Otherwise the intrinsic can only touch a single element and the | |||
1896 | // address operand will be updated, so nothing else needs to be done. | |||
1897 | continue; | |||
1898 | } | |||
1899 | ||||
1900 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { | |||
1901 | if (II->getIntrinsicID() == Intrinsic::lifetime_start || | |||
1902 | II->getIntrinsicID() == Intrinsic::lifetime_end) { | |||
1903 | RewriteLifetimeIntrinsic(II, AI, Offset, NewElts); | |||
1904 | } | |||
1905 | continue; | |||
1906 | } | |||
1907 | ||||
1908 | if (LoadInst *LI = dyn_cast<LoadInst>(User)) { | |||
1909 | Type *LIType = LI->getType(); | |||
1910 | ||||
1911 | if (isCompatibleAggregate(LIType, AI->getAllocatedType())) { | |||
1912 | // Replace: | |||
1913 | // %res = load { i32, i32 }* %alloc | |||
1914 | // with: | |||
1915 | // %load.0 = load i32* %alloc.0 | |||
1916 | // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 | |||
1917 | // %load.1 = load i32* %alloc.1 | |||
1918 | // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 | |||
1919 | // (Also works for arrays instead of structs) | |||
1920 | Value *Insert = UndefValue::get(LIType); | |||
1921 | IRBuilder<> Builder(LI); | |||
1922 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
1923 | Value *Load = Builder.CreateLoad(NewElts[i], "load"); | |||
1924 | Insert = Builder.CreateInsertValue(Insert, Load, i, "insert"); | |||
1925 | } | |||
1926 | LI->replaceAllUsesWith(Insert); | |||
1927 | DeadInsts.push_back(LI); | |||
1928 | } else if (LIType->isIntegerTy() && | |||
1929 | DL.getTypeAllocSize(LIType) == | |||
1930 | DL.getTypeAllocSize(AI->getAllocatedType())) { | |||
1931 | // If this is a load of the entire alloca to an integer, rewrite it. | |||
1932 | RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); | |||
1933 | } | |||
1934 | continue; | |||
1935 | } | |||
1936 | ||||
1937 | if (StoreInst *SI = dyn_cast<StoreInst>(User)) { | |||
1938 | Value *Val = SI->getOperand(0); | |||
1939 | Type *SIType = Val->getType(); | |||
1940 | if (isCompatibleAggregate(SIType, AI->getAllocatedType())) { | |||
1941 | // Replace: | |||
1942 | // store { i32, i32 } %val, { i32, i32 }* %alloc | |||
1943 | // with: | |||
1944 | // %val.0 = extractvalue { i32, i32 } %val, 0 | |||
1945 | // store i32 %val.0, i32* %alloc.0 | |||
1946 | // %val.1 = extractvalue { i32, i32 } %val, 1 | |||
1947 | // store i32 %val.1, i32* %alloc.1 | |||
1948 | // (Also works for arrays instead of structs) | |||
1949 | IRBuilder<> Builder(SI); | |||
1950 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
1951 | Value *Extract = Builder.CreateExtractValue(Val, i, Val->getName()); | |||
1952 | Builder.CreateStore(Extract, NewElts[i]); | |||
1953 | } | |||
1954 | DeadInsts.push_back(SI); | |||
1955 | } else if (SIType->isIntegerTy() && | |||
1956 | DL.getTypeAllocSize(SIType) == | |||
1957 | DL.getTypeAllocSize(AI->getAllocatedType())) { | |||
1958 | // If this is a store of the entire alloca from an integer, rewrite it. | |||
1959 | RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); | |||
1960 | } | |||
1961 | continue; | |||
1962 | } | |||
1963 | ||||
1964 | if (isa<SelectInst>(User) || isa<PHINode>(User)) { | |||
1965 | // If we have a PHI user of the alloca itself (as opposed to a GEP or | |||
1966 | // bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to | |||
1967 | // the new pointer. | |||
1968 | if (!isa<AllocaInst>(I)) continue; | |||
1969 | ||||
1970 | assert(Offset == 0 && NewElts[0] &&((Offset == 0 && NewElts[0] && "Direct alloca use should have a zero offset" ) ? static_cast<void> (0) : __assert_fail ("Offset == 0 && NewElts[0] && \"Direct alloca use should have a zero offset\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 1971, __PRETTY_FUNCTION__)) | |||
1971 | "Direct alloca use should have a zero offset")((Offset == 0 && NewElts[0] && "Direct alloca use should have a zero offset" ) ? static_cast<void> (0) : __assert_fail ("Offset == 0 && NewElts[0] && \"Direct alloca use should have a zero offset\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 1971, __PRETTY_FUNCTION__)); | |||
1972 | ||||
1973 | // If we have a use of the alloca, we know the derived uses will be | |||
1974 | // utilizing just the first element of the scalarized result. Insert a | |||
1975 | // bitcast of the first alloca before the user as required. | |||
1976 | AllocaInst *NewAI = NewElts[0]; | |||
1977 | BitCastInst *BCI = new BitCastInst(NewAI, AI->getType(), "", NewAI); | |||
1978 | NewAI->moveBefore(BCI); | |||
1979 | TheUse = BCI; | |||
1980 | continue; | |||
1981 | } | |||
1982 | } | |||
1983 | } | |||
1984 | ||||
1985 | /// RewriteBitCast - Update a bitcast reference to the alloca being replaced | |||
1986 | /// and recursively continue updating all of its uses. | |||
1987 | void SROASROA_::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, | |||
1988 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
1989 | RewriteForScalarRepl(BC, AI, Offset, NewElts); | |||
1990 | if (BC->getOperand(0) != AI) | |||
| ||||
1991 | return; | |||
1992 | ||||
1993 | // The bitcast references the original alloca. Replace its uses with | |||
1994 | // references to the alloca containing offset zero (which is normally at | |||
1995 | // index zero, but might not be in cases involving structs with elements | |||
1996 | // of size zero). | |||
1997 | Type *T = AI->getAllocatedType(); | |||
| ||||
1998 | uint64_t EltOffset = 0; | |||
1999 | Type *IdxTy; | |||
2000 | uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, | |||
2001 | BC->getModule()->getDataLayout()); | |||
2002 | Instruction *Val = NewElts[Idx]; | |||
2003 | if (Val->getType() != BC->getDestTy()) { | |||
2004 | Val = new BitCastInst(Val, BC->getDestTy(), "", BC); | |||
2005 | Val->takeName(BC); | |||
2006 | } | |||
2007 | BC->replaceAllUsesWith(Val); | |||
2008 | DeadInsts.push_back(BC); | |||
2009 | } | |||
2010 | ||||
2011 | /// FindElementAndOffset - Return the index of the element containing Offset | |||
2012 | /// within the specified type, which must be either a struct or an array. | |||
2013 | /// Sets T to the type of the element and Offset to the offset within that | |||
2014 | /// element. IdxTy is set to the type of the index result to be used in a | |||
2015 | /// GEP instruction. | |||
2016 | uint64_t SROASROA_::FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy, | |||
2017 | const DataLayout &DL) { | |||
2018 | uint64_t Idx = 0; | |||
2019 | ||||
2020 | if (StructType *ST = dyn_cast<StructType>(T)) { | |||
2021 | const StructLayout *Layout = DL.getStructLayout(ST); | |||
2022 | Idx = Layout->getElementContainingOffset(Offset); | |||
2023 | T = ST->getContainedType(Idx); | |||
2024 | Offset -= Layout->getElementOffset(Idx); | |||
2025 | IdxTy = Type::getInt32Ty(T->getContext()); | |||
2026 | return Idx; | |||
2027 | } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { | |||
2028 | T = AT->getElementType(); | |||
2029 | uint64_t EltSize = DL.getTypeAllocSize(T); | |||
2030 | Idx = Offset / EltSize; | |||
2031 | Offset -= Idx * EltSize; | |||
2032 | IdxTy = Type::getInt64Ty(T->getContext()); | |||
2033 | return Idx; | |||
2034 | } | |||
2035 | VectorType *VT = cast<VectorType>(T); | |||
2036 | T = VT->getElementType(); | |||
2037 | uint64_t EltSize = DL.getTypeAllocSize(T); | |||
2038 | Idx = Offset / EltSize; | |||
2039 | Offset -= Idx * EltSize; | |||
2040 | IdxTy = Type::getInt64Ty(T->getContext()); | |||
2041 | return Idx; | |||
2042 | } | |||
2043 | ||||
2044 | /// RewriteGEP - Check if this GEP instruction moves the pointer across | |||
2045 | /// elements of the alloca that are being split apart, and if so, rewrite | |||
2046 | /// the GEP to be relative to the new element. | |||
2047 | void SROASROA_::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, | |||
2048 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
2049 | uint64_t OldOffset = Offset; | |||
2050 | const DataLayout &DL = GEPI->getModule()->getDataLayout(); | |||
2051 | SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); | |||
2052 | // If the GEP was dynamic then it must have been a dynamic vector lookup. | |||
2053 | // In this case, it must be the last GEP operand which is dynamic so keep that | |||
2054 | // aside until we've found the constant GEP offset then add it back in at the | |||
2055 | // end. | |||
2056 | Value* NonConstantIdx = nullptr; | |||
2057 | if (!GEPI->hasAllConstantIndices()) | |||
2058 | NonConstantIdx = Indices.pop_back_val(); | |||
2059 | Offset += DL.getIndexedOffsetInType(GEPI->getSourceElementType(), Indices); | |||
2060 | ||||
2061 | RewriteForScalarRepl(GEPI, AI, Offset, NewElts); | |||
2062 | ||||
2063 | Type *T = AI->getAllocatedType(); | |||
2064 | Type *IdxTy; | |||
2065 | uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy, DL); | |||
2066 | if (GEPI->getOperand(0) == AI) | |||
2067 | OldIdx = ~0ULL; // Force the GEP to be rewritten. | |||
2068 | ||||
2069 | T = AI->getAllocatedType(); | |||
2070 | uint64_t EltOffset = Offset; | |||
2071 | uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, DL); | |||
2072 | ||||
2073 | // If this GEP does not move the pointer across elements of the alloca | |||
2074 | // being split, then it does not needs to be rewritten. | |||
2075 | if (Idx == OldIdx) | |||
2076 | return; | |||
2077 | ||||
2078 | Type *i32Ty = Type::getInt32Ty(AI->getContext()); | |||
2079 | SmallVector<Value*, 8> NewArgs; | |||
2080 | NewArgs.push_back(Constant::getNullValue(i32Ty)); | |||
2081 | while (EltOffset != 0) { | |||
2082 | uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy, DL); | |||
2083 | NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx)); | |||
2084 | } | |||
2085 | if (NonConstantIdx) { | |||
2086 | Type* GepTy = T; | |||
2087 | // This GEP has a dynamic index. We need to add "i32 0" to index through | |||
2088 | // any structs or arrays in the original type until we get to the vector | |||
2089 | // to index. | |||
2090 | while (!isa<VectorType>(GepTy)) { | |||
2091 | NewArgs.push_back(Constant::getNullValue(i32Ty)); | |||
2092 | GepTy = cast<CompositeType>(GepTy)->getTypeAtIndex(0U); | |||
2093 | } | |||
2094 | NewArgs.push_back(NonConstantIdx); | |||
2095 | } | |||
2096 | Instruction *Val = NewElts[Idx]; | |||
2097 | if (NewArgs.size() > 1) { | |||
2098 | Val = GetElementPtrInst::CreateInBounds(Val, NewArgs, "", GEPI); | |||
2099 | Val->takeName(GEPI); | |||
2100 | } | |||
2101 | if (Val->getType() != GEPI->getType()) | |||
2102 | Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI); | |||
2103 | GEPI->replaceAllUsesWith(Val); | |||
2104 | DeadInsts.push_back(GEPI); | |||
2105 | } | |||
2106 | ||||
2107 | /// RewriteLifetimeIntrinsic - II is a lifetime.start/lifetime.end. Rewrite it | |||
2108 | /// to mark the lifetime of the scalarized memory. | |||
2109 | void SROASROA_::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, | |||
2110 | uint64_t Offset, | |||
2111 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
2112 | ConstantInt *OldSize = cast<ConstantInt>(II->getArgOperand(0)); | |||
2113 | // Put matching lifetime markers on everything from Offset up to | |||
2114 | // Offset+OldSize. | |||
2115 | Type *AIType = AI->getAllocatedType(); | |||
2116 | const DataLayout &DL = II->getModule()->getDataLayout(); | |||
2117 | uint64_t NewOffset = Offset; | |||
2118 | Type *IdxTy; | |||
2119 | uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy, DL); | |||
2120 | ||||
2121 | IRBuilder<> Builder(II); | |||
2122 | uint64_t Size = OldSize->getLimitedValue(); | |||
2123 | ||||
2124 | if (NewOffset) { | |||
2125 | // Splice the first element and index 'NewOffset' bytes in. SROA will | |||
2126 | // split the alloca again later. | |||
2127 | unsigned AS = AI->getType()->getAddressSpace(); | |||
2128 | Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS)); | |||
2129 | V = Builder.CreateGEP(Builder.getInt8Ty(), V, Builder.getInt64(NewOffset)); | |||
2130 | ||||
2131 | IdxTy = NewElts[Idx]->getAllocatedType(); | |||
2132 | uint64_t EltSize = DL.getTypeAllocSize(IdxTy) - NewOffset; | |||
2133 | if (EltSize > Size) { | |||
2134 | EltSize = Size; | |||
2135 | Size = 0; | |||
2136 | } else { | |||
2137 | Size -= EltSize; | |||
2138 | } | |||
2139 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | |||
2140 | Builder.CreateLifetimeStart(V, Builder.getInt64(EltSize)); | |||
2141 | else | |||
2142 | Builder.CreateLifetimeEnd(V, Builder.getInt64(EltSize)); | |||
2143 | ++Idx; | |||
2144 | } | |||
2145 | ||||
2146 | for (; Idx != NewElts.size() && Size; ++Idx) { | |||
2147 | IdxTy = NewElts[Idx]->getAllocatedType(); | |||
2148 | uint64_t EltSize = DL.getTypeAllocSize(IdxTy); | |||
2149 | if (EltSize > Size) { | |||
2150 | EltSize = Size; | |||
2151 | Size = 0; | |||
2152 | } else { | |||
2153 | Size -= EltSize; | |||
2154 | } | |||
2155 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | |||
2156 | Builder.CreateLifetimeStart(NewElts[Idx], | |||
2157 | Builder.getInt64(EltSize)); | |||
2158 | else | |||
2159 | Builder.CreateLifetimeEnd(NewElts[Idx], | |||
2160 | Builder.getInt64(EltSize)); | |||
2161 | } | |||
2162 | DeadInsts.push_back(II); | |||
2163 | } | |||
2164 | ||||
2165 | /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. | |||
2166 | /// Rewrite it to copy or set the elements of the scalarized memory. | |||
2167 | void | |||
2168 | SROASROA_::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, | |||
2169 | AllocaInst *AI, | |||
2170 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
2171 | // If this is a memcpy/memmove, construct the other pointer as the | |||
2172 | // appropriate type. The "Other" pointer is the pointer that goes to memory | |||
2173 | // that doesn't have anything to do with the alloca that we are promoting. For | |||
2174 | // memset, this Value* stays null. | |||
2175 | Value *OtherPtr = nullptr; | |||
2176 | unsigned MemAlignment = MI->getAlignment(); | |||
2177 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy | |||
2178 | if (Inst == MTI->getRawDest()) | |||
2179 | OtherPtr = MTI->getRawSource(); | |||
2180 | else { | |||
2181 | assert(Inst == MTI->getRawSource())((Inst == MTI->getRawSource()) ? static_cast<void> ( 0) : __assert_fail ("Inst == MTI->getRawSource()", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 2181, __PRETTY_FUNCTION__)); | |||
2182 | OtherPtr = MTI->getRawDest(); | |||
2183 | } | |||
2184 | } | |||
2185 | ||||
2186 | // If there is an other pointer, we want to convert it to the same pointer | |||
2187 | // type as AI has, so we can GEP through it safely. | |||
2188 | if (OtherPtr) { | |||
2189 | unsigned AddrSpace = | |||
2190 | cast<PointerType>(OtherPtr->getType())->getAddressSpace(); | |||
2191 | ||||
2192 | // Remove bitcasts and all-zero GEPs from OtherPtr. This is an | |||
2193 | // optimization, but it's also required to detect the corner case where | |||
2194 | // both pointer operands are referencing the same memory, and where | |||
2195 | // OtherPtr may be a bitcast or GEP that currently being rewritten. (This | |||
2196 | // function is only called for mem intrinsics that access the whole | |||
2197 | // aggregate, so non-zero GEPs are not an issue here.) | |||
2198 | OtherPtr = OtherPtr->stripPointerCasts(); | |||
2199 | ||||
2200 | // Copying the alloca to itself is a no-op: just delete it. | |||
2201 | if (OtherPtr == AI || OtherPtr == NewElts[0]) { | |||
2202 | // This code will run twice for a no-op memcpy -- once for each operand. | |||
2203 | // Put only one reference to MI on the DeadInsts list. | |||
2204 | for (SmallVectorImpl<Value *>::const_iterator I = DeadInsts.begin(), | |||
2205 | E = DeadInsts.end(); I != E; ++I) | |||
2206 | if (*I == MI) return; | |||
2207 | DeadInsts.push_back(MI); | |||
2208 | return; | |||
2209 | } | |||
2210 | ||||
2211 | // If the pointer is not the right type, insert a bitcast to the right | |||
2212 | // type. | |||
2213 | Type *NewTy = PointerType::get(AI->getAllocatedType(), AddrSpace); | |||
2214 | ||||
2215 | if (OtherPtr->getType() != NewTy) | |||
2216 | OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI); | |||
2217 | } | |||
2218 | ||||
2219 | // Process each element of the aggregate. | |||
2220 | bool SROADest = MI->getRawDest() == Inst; | |||
2221 | ||||
2222 | Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); | |||
2223 | const DataLayout &DL = MI->getModule()->getDataLayout(); | |||
2224 | ||||
2225 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
2226 | // If this is a memcpy/memmove, emit a GEP of the other element address. | |||
2227 | Value *OtherElt = nullptr; | |||
2228 | unsigned OtherEltAlign = MemAlignment; | |||
2229 | ||||
2230 | if (OtherPtr) { | |||
2231 | Value *Idx[2] = { Zero, | |||
2232 | ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; | |||
2233 | OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, | |||
2234 | OtherPtr->getName()+"."+Twine(i), | |||
2235 | MI); | |||
2236 | uint64_t EltOffset; | |||
2237 | Type *OtherTy = AI->getAllocatedType(); | |||
2238 | if (StructType *ST = dyn_cast<StructType>(OtherTy)) { | |||
2239 | EltOffset = DL.getStructLayout(ST)->getElementOffset(i); | |||
2240 | } else { | |||
2241 | Type *EltTy = cast<SequentialType>(OtherTy)->getElementType(); | |||
2242 | EltOffset = DL.getTypeAllocSize(EltTy) * i; | |||
2243 | } | |||
2244 | ||||
2245 | // The alignment of the other pointer is the guaranteed alignment of the | |||
2246 | // element, which is affected by both the known alignment of the whole | |||
2247 | // mem intrinsic and the alignment of the element. If the alignment of | |||
2248 | // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the | |||
2249 | // known alignment is just 4 bytes. | |||
2250 | OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); | |||
2251 | } | |||
2252 | ||||
2253 | AllocaInst *EltPtr = NewElts[i]; | |||
2254 | Type *EltTy = EltPtr->getAllocatedType(); | |||
2255 | ||||
2256 | // If we got down to a scalar, insert a load or store as appropriate. | |||
2257 | if (EltTy->isSingleValueType()) { | |||
2258 | if (isa<MemTransferInst>(MI)) { | |||
2259 | if (SROADest) { | |||
2260 | // From Other to Alloca. | |||
2261 | Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); | |||
2262 | new StoreInst(Elt, EltPtr, MI); | |||
2263 | } else { | |||
2264 | // From Alloca to Other. | |||
2265 | Value *Elt = new LoadInst(EltPtr, "tmp", MI); | |||
2266 | new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); | |||
2267 | } | |||
2268 | continue; | |||
2269 | } | |||
2270 | assert(isa<MemSetInst>(MI))((isa<MemSetInst>(MI)) ? static_cast<void> (0) : __assert_fail ("isa<MemSetInst>(MI)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 2270, __PRETTY_FUNCTION__)); | |||
2271 | ||||
2272 | // If the stored element is zero (common case), just store a null | |||
2273 | // constant. | |||
2274 | Constant *StoreVal; | |||
2275 | if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) { | |||
2276 | if (CI->isZero()) { | |||
2277 | StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> | |||
2278 | } else { | |||
2279 | // If EltTy is a vector type, get the element type. | |||
2280 | Type *ValTy = EltTy->getScalarType(); | |||
2281 | ||||
2282 | // Construct an integer with the right value. | |||
2283 | unsigned EltSize = DL.getTypeSizeInBits(ValTy); | |||
2284 | APInt OneVal(EltSize, CI->getZExtValue()); | |||
2285 | APInt TotalVal(OneVal); | |||
2286 | // Set each byte. | |||
2287 | for (unsigned i = 0; 8*i < EltSize; ++i) { | |||
2288 | TotalVal = TotalVal.shl(8); | |||
2289 | TotalVal |= OneVal; | |||
2290 | } | |||
2291 | ||||
2292 | // Convert the integer value to the appropriate type. | |||
2293 | StoreVal = ConstantInt::get(CI->getContext(), TotalVal); | |||
2294 | if (ValTy->isPointerTy()) | |||
2295 | StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); | |||
2296 | else if (ValTy->isFloatingPointTy()) | |||
2297 | StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); | |||
2298 | assert(StoreVal->getType() == ValTy && "Type mismatch!")((StoreVal->getType() == ValTy && "Type mismatch!" ) ? static_cast<void> (0) : __assert_fail ("StoreVal->getType() == ValTy && \"Type mismatch!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 2298, __PRETTY_FUNCTION__)); | |||
2299 | ||||
2300 | // If the requested value was a vector constant, create it. | |||
2301 | if (EltTy->isVectorTy()) { | |||
2302 | unsigned NumElts = cast<VectorType>(EltTy)->getNumElements(); | |||
2303 | StoreVal = ConstantVector::getSplat(NumElts, StoreVal); | |||
2304 | } | |||
2305 | } | |||
2306 | new StoreInst(StoreVal, EltPtr, MI); | |||
2307 | continue; | |||
2308 | } | |||
2309 | // Otherwise, if we're storing a byte variable, use a memset call for | |||
2310 | // this element. | |||
2311 | } | |||
2312 | ||||
2313 | unsigned EltSize = DL.getTypeAllocSize(EltTy); | |||
2314 | if (!EltSize) | |||
2315 | continue; | |||
2316 | ||||
2317 | IRBuilder<> Builder(MI); | |||
2318 | ||||
2319 | // Finally, insert the meminst for this element. | |||
2320 | if (isa<MemSetInst>(MI)) { | |||
2321 | Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize, | |||
2322 | MI->isVolatile()); | |||
2323 | } else { | |||
2324 | assert(isa<MemTransferInst>(MI))((isa<MemTransferInst>(MI)) ? static_cast<void> ( 0) : __assert_fail ("isa<MemTransferInst>(MI)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn271203/lib/Transforms/Scalar/ScalarReplAggregates.cpp" , 2324, __PRETTY_FUNCTION__)); | |||
2325 | Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr | |||
2326 | Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr | |||
2327 | ||||
2328 | if (isa<MemCpyInst>(MI)) | |||
2329 | Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile()); | |||
2330 | else | |||
2331 | Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile()); | |||
2332 | } | |||
2333 | } | |||
2334 | DeadInsts.push_back(MI); | |||
2335 | } | |||
2336 | ||||
2337 | /// RewriteStoreUserOfWholeAlloca - We found a store of an integer that | |||
2338 | /// overwrites the entire allocation. Extract out the pieces of the stored | |||
2339 | /// integer and store them individually. | |||
2340 | void | |||
2341 | SROASROA_::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, | |||
2342 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
2343 | // Extract each element out of the integer according to its structure offset | |||
2344 | // and store the element value to the individual alloca. | |||
2345 | Value *SrcVal = SI->getOperand(0); | |||
2346 | Type *AllocaEltTy = AI->getAllocatedType(); | |||
2347 | const DataLayout &DL = SI->getModule()->getDataLayout(); | |||
2348 | uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy); | |||
2349 | ||||
2350 | IRBuilder<> Builder(SI); | |||
2351 | ||||
2352 | // Handle tail padding by extending the operand | |||
2353 | if (DL.getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) | |||
2354 | SrcVal = Builder.CreateZExt(SrcVal, | |||
2355 | IntegerType::get(SI->getContext(), AllocaSizeBits)); | |||
2356 | ||||
2357 | DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI << '\n'; } } while (0) | |||
2358 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI << '\n'; } } while (0); | |||
2359 | ||||
2360 | // There are two forms here: AI could be an array or struct. Both cases | |||
2361 | // have different ways to compute the element offset. | |||
2362 | if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { | |||
2363 | const StructLayout *Layout = DL.getStructLayout(EltSTy); | |||
2364 | ||||
2365 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
2366 | // Get the number of bits to shift SrcVal to get the value. | |||
2367 | Type *FieldTy = EltSTy->getElementType(i); | |||
2368 | uint64_t Shift = Layout->getElementOffsetInBits(i); | |||
2369 | ||||
2370 | if (DL.isBigEndian()) | |||
2371 | Shift = AllocaSizeBits - Shift - DL.getTypeAllocSizeInBits(FieldTy); | |||
2372 | ||||
2373 | Value *EltVal = SrcVal; | |||
2374 | if (Shift) { | |||
2375 | Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); | |||
2376 | EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt"); | |||
2377 | } | |||
2378 | ||||
2379 | // Truncate down to an integer of the right size. | |||
2380 | uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy); | |||
2381 | ||||
2382 | // Ignore zero sized fields like {}, they obviously contain no data. | |||
2383 | if (FieldSizeBits == 0) continue; | |||
2384 | ||||
2385 | if (FieldSizeBits != AllocaSizeBits) | |||
2386 | EltVal = Builder.CreateTrunc(EltVal, | |||
2387 | IntegerType::get(SI->getContext(), FieldSizeBits)); | |||
2388 | Value *DestField = NewElts[i]; | |||
2389 | if (EltVal->getType() == FieldTy) { | |||
2390 | // Storing to an integer field of this size, just do it. | |||
2391 | } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) { | |||
2392 | // Bitcast to the right element type (for fp/vector values). | |||
2393 | EltVal = Builder.CreateBitCast(EltVal, FieldTy); | |||
2394 | } else { | |||
2395 | // Otherwise, bitcast the dest pointer (for aggregates). | |||
2396 | DestField = Builder.CreateBitCast(DestField, | |||
2397 | PointerType::getUnqual(EltVal->getType())); | |||
2398 | } | |||
2399 | new StoreInst(EltVal, DestField, SI); | |||
2400 | } | |||
2401 | ||||
2402 | } else { | |||
2403 | ArrayType *ATy = cast<ArrayType>(AllocaEltTy); | |||
2404 | Type *ArrayEltTy = ATy->getElementType(); | |||
2405 | uint64_t ElementOffset = DL.getTypeAllocSizeInBits(ArrayEltTy); | |||
2406 | uint64_t ElementSizeBits = DL.getTypeSizeInBits(ArrayEltTy); | |||
2407 | ||||
2408 | uint64_t Shift; | |||
2409 | ||||
2410 | if (DL.isBigEndian()) | |||
2411 | Shift = AllocaSizeBits-ElementOffset; | |||
2412 | else | |||
2413 | Shift = 0; | |||
2414 | ||||
2415 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
2416 | // Ignore zero sized fields like {}, they obviously contain no data. | |||
2417 | if (ElementSizeBits == 0) continue; | |||
2418 | ||||
2419 | Value *EltVal = SrcVal; | |||
2420 | if (Shift) { | |||
2421 | Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); | |||
2422 | EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt"); | |||
2423 | } | |||
2424 | ||||
2425 | // Truncate down to an integer of the right size. | |||
2426 | if (ElementSizeBits != AllocaSizeBits) | |||
2427 | EltVal = Builder.CreateTrunc(EltVal, | |||
2428 | IntegerType::get(SI->getContext(), | |||
2429 | ElementSizeBits)); | |||
2430 | Value *DestField = NewElts[i]; | |||
2431 | if (EltVal->getType() == ArrayEltTy) { | |||
2432 | // Storing to an integer field of this size, just do it. | |||
2433 | } else if (ArrayEltTy->isFloatingPointTy() || | |||
2434 | ArrayEltTy->isVectorTy()) { | |||
2435 | // Bitcast to the right element type (for fp/vector values). | |||
2436 | EltVal = Builder.CreateBitCast(EltVal, ArrayEltTy); | |||
2437 | } else { | |||
2438 | // Otherwise, bitcast the dest pointer (for aggregates). | |||
2439 | DestField = Builder.CreateBitCast(DestField, | |||
2440 | PointerType::getUnqual(EltVal->getType())); | |||
2441 | } | |||
2442 | new StoreInst(EltVal, DestField, SI); | |||
2443 | ||||
2444 | if (DL.isBigEndian()) | |||
2445 | Shift -= ElementOffset; | |||
2446 | else | |||
2447 | Shift += ElementOffset; | |||
2448 | } | |||
2449 | } | |||
2450 | ||||
2451 | DeadInsts.push_back(SI); | |||
2452 | } | |||
2453 | ||||
2454 | /// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to | |||
2455 | /// an integer. Load the individual pieces to form the aggregate value. | |||
2456 | void | |||
2457 | SROASROA_::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, | |||
2458 | SmallVectorImpl<AllocaInst *> &NewElts) { | |||
2459 | // Extract each element out of the NewElts according to its structure offset | |||
2460 | // and form the result value. | |||
2461 | Type *AllocaEltTy = AI->getAllocatedType(); | |||
2462 | const DataLayout &DL = LI->getModule()->getDataLayout(); | |||
2463 | uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy); | |||
2464 | ||||
2465 | DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI << '\n'; } } while (0) | |||
2466 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI << '\n'; } } while (0); | |||
2467 | ||||
2468 | // There are two forms here: AI could be an array or struct. Both cases | |||
2469 | // have different ways to compute the element offset. | |||
2470 | const StructLayout *Layout = nullptr; | |||
2471 | uint64_t ArrayEltBitOffset = 0; | |||
2472 | if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { | |||
2473 | Layout = DL.getStructLayout(EltSTy); | |||
2474 | } else { | |||
2475 | Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); | |||
2476 | ArrayEltBitOffset = DL.getTypeAllocSizeInBits(ArrayEltTy); | |||
2477 | } | |||
2478 | ||||
2479 | Value *ResultVal = | |||
2480 | Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits)); | |||
2481 | ||||
2482 | for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { | |||
2483 | // Load the value from the alloca. If the NewElt is an aggregate, cast | |||
2484 | // the pointer to an integer of the same size before doing the load. | |||
2485 | Value *SrcField = NewElts[i]; | |||
2486 | Type *FieldTy = NewElts[i]->getAllocatedType(); | |||
2487 | uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy); | |||
2488 | ||||
2489 | // Ignore zero sized fields like {}, they obviously contain no data. | |||
2490 | if (FieldSizeBits == 0) continue; | |||
2491 | ||||
2492 | IntegerType *FieldIntTy = IntegerType::get(LI->getContext(), | |||
2493 | FieldSizeBits); | |||
2494 | if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() && | |||
2495 | !FieldTy->isVectorTy()) | |||
2496 | SrcField = new BitCastInst(SrcField, | |||
2497 | PointerType::getUnqual(FieldIntTy), | |||
2498 | "", LI); | |||
2499 | SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); | |||
2500 | ||||
2501 | // If SrcField is a fp or vector of the right size but that isn't an | |||
2502 | // integer type, bitcast to an integer so we can shift it. | |||
2503 | if (SrcField->getType() != FieldIntTy) | |||
2504 | SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); | |||
2505 | ||||
2506 | // Zero extend the field to be the same size as the final alloca so that | |||
2507 | // we can shift and insert it. | |||
2508 | if (SrcField->getType() != ResultVal->getType()) | |||
2509 | SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); | |||
2510 | ||||
2511 | // Determine the number of bits to shift SrcField. | |||
2512 | uint64_t Shift; | |||
2513 | if (Layout) // Struct case. | |||
2514 | Shift = Layout->getElementOffsetInBits(i); | |||
2515 | else // Array case. | |||
2516 | Shift = i*ArrayEltBitOffset; | |||
2517 | ||||
2518 | if (DL.isBigEndian()) | |||
2519 | Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); | |||
2520 | ||||
2521 | if (Shift) { | |||
2522 | Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); | |||
2523 | SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); | |||
2524 | } | |||
2525 | ||||
2526 | // Don't create an 'or x, 0' on the first iteration. | |||
2527 | if (!isa<Constant>(ResultVal) || | |||
2528 | !cast<Constant>(ResultVal)->isNullValue()) | |||
2529 | ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); | |||
2530 | else | |||
2531 | ResultVal = SrcField; | |||
2532 | } | |||
2533 | ||||
2534 | // Handle tail padding by truncating the result | |||
2535 | if (DL.getTypeSizeInBits(LI->getType()) != AllocaSizeBits) | |||
2536 | ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); | |||
2537 | ||||
2538 | LI->replaceAllUsesWith(ResultVal); | |||
2539 | DeadInsts.push_back(LI); | |||
2540 | } | |||
2541 | ||||
2542 | /// HasPadding - Return true if the specified type has any structure or | |||
2543 | /// alignment padding in between the elements that would be split apart | |||
2544 | /// by SROA; return false otherwise. | |||
2545 | static bool HasPadding(Type *Ty, const DataLayout &DL) { | |||
2546 | if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { | |||
2547 | Ty = ATy->getElementType(); | |||
2548 | return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty); | |||
2549 | } | |||
2550 | ||||
2551 | // SROA currently handles only Arrays and Structs. | |||
2552 | StructType *STy = cast<StructType>(Ty); | |||
2553 | const StructLayout *SL = DL.getStructLayout(STy); | |||
2554 | unsigned PrevFieldBitOffset = 0; | |||
2555 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | |||
2556 | unsigned FieldBitOffset = SL->getElementOffsetInBits(i); | |||
2557 | ||||
2558 | // Check to see if there is any padding between this element and the | |||
2559 | // previous one. | |||
2560 | if (i) { | |||
2561 | unsigned PrevFieldEnd = | |||
2562 | PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1)); | |||
2563 | if (PrevFieldEnd < FieldBitOffset) | |||
2564 | return true; | |||
2565 | } | |||
2566 | PrevFieldBitOffset = FieldBitOffset; | |||
2567 | } | |||
2568 | // Check for tail padding. | |||
2569 | if (unsigned EltCount = STy->getNumElements()) { | |||
2570 | unsigned PrevFieldEnd = PrevFieldBitOffset + | |||
2571 | DL.getTypeSizeInBits(STy->getElementType(EltCount-1)); | |||
2572 | if (PrevFieldEnd < SL->getSizeInBits()) | |||
2573 | return true; | |||
2574 | } | |||
2575 | return false; | |||
2576 | } | |||
2577 | ||||
2578 | /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of | |||
2579 | /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, | |||
2580 | /// or 1 if safe after canonicalization has been performed. | |||
2581 | bool SROASROA_::isSafeAllocaToScalarRepl(AllocaInst *AI) { | |||
2582 | // Loop over the use list of the alloca. We can only transform it if all of | |||
2583 | // the users are safe to transform. | |||
2584 | AllocaInfo Info(AI); | |||
2585 | ||||
2586 | isSafeForScalarRepl(AI, 0, Info); | |||
2587 | if (Info.isUnsafe) { | |||
2588 | DEBUG(dbgs() << "Cannot transform: " << *AI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("scalarrepl")) { dbgs() << "Cannot transform: " << *AI << '\n'; } } while (0); | |||
2589 | return false; | |||
2590 | } | |||
2591 | ||||
2592 | const DataLayout &DL = AI->getModule()->getDataLayout(); | |||
2593 | ||||
2594 | // Okay, we know all the users are promotable. If the aggregate is a memcpy | |||
2595 | // source and destination, we have to be careful. In particular, the memcpy | |||
2596 | // could be moving around elements that live in structure padding of the LLVM | |||
2597 | // types, but may actually be used. In these cases, we refuse to promote the | |||
2598 | // struct. | |||
2599 | if (Info.isMemCpySrc && Info.isMemCpyDst && | |||
2600 | HasPadding(AI->getAllocatedType(), DL)) | |||
2601 | return false; | |||
2602 | ||||
2603 | // If the alloca never has an access to just *part* of it, but is accessed | |||
2604 | // via loads and stores, then we should use ConvertToScalarInfo to promote | |||
2605 | // the alloca instead of promoting each piece at a time and inserting fission | |||
2606 | // and fusion code. | |||
2607 | if (!Info.hasSubelementAccess && Info.hasALoadOrStore) { | |||
2608 | // If the struct/array just has one element, use basic SRoA. | |||
2609 | if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { | |||
2610 | if (ST->getNumElements() > 1) return false; | |||
2611 | } else { | |||
2612 | if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1) | |||
2613 | return false; | |||
2614 | } | |||
2615 | } | |||
2616 | ||||
2617 | return true; | |||
2618 | } |