Bug Summary

File:lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
Warning:line 1351, column 28
Called C++ object pointer is null

Annotated Source Code

1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/Loads.h"
18#include "llvm/IR/ConstantRange.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/LLVMContext.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/Transforms/Utils/BasicBlockUtils.h"
24#include "llvm/Transforms/Utils/Local.h"
25using namespace llvm;
26
27#define DEBUG_TYPE"instcombine" "instcombine"
28
29STATISTIC(NumDeadStore, "Number of dead stores eliminated")static llvm::Statistic NumDeadStore = {"instcombine", "NumDeadStore"
, "Number of dead stores eliminated", {0}, false}
;
30STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global")static llvm::Statistic NumGlobalCopies = {"instcombine", "NumGlobalCopies"
, "Number of allocas copied from constant global", {0}, false
}
;
31
32/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
33/// some part of a constant global variable. This intentionally only accepts
34/// constant expressions because we can't rewrite arbitrary instructions.
35static bool pointsToConstantGlobal(Value *V) {
36 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
37 return GV->isConstant();
38
39 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
40 if (CE->getOpcode() == Instruction::BitCast ||
41 CE->getOpcode() == Instruction::AddrSpaceCast ||
42 CE->getOpcode() == Instruction::GetElementPtr)
43 return pointsToConstantGlobal(CE->getOperand(0));
44 }
45 return false;
46}
47
48/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
49/// pointer to an alloca. Ignore any reads of the pointer, return false if we
50/// see any stores or other unknown uses. If we see pointer arithmetic, keep
51/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
52/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
53/// the alloca, and if the source pointer is a pointer to a constant global, we
54/// can optimize this.
55static bool
56isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
57 SmallVectorImpl<Instruction *> &ToDelete) {
58 // We track lifetime intrinsics as we encounter them. If we decide to go
59 // ahead and replace the value with the global, this lets the caller quickly
60 // eliminate the markers.
61
62 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
63 ValuesToInspect.emplace_back(V, false);
64 while (!ValuesToInspect.empty()) {
65 auto ValuePair = ValuesToInspect.pop_back_val();
66 const bool IsOffset = ValuePair.second;
67 for (auto &U : ValuePair.first->uses()) {
68 auto *I = cast<Instruction>(U.getUser());
69
70 if (auto *LI = dyn_cast<LoadInst>(I)) {
71 // Ignore non-volatile loads, they are always ok.
72 if (!LI->isSimple()) return false;
73 continue;
74 }
75
76 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
77 // If uses of the bitcast are ok, we are ok.
78 ValuesToInspect.emplace_back(I, IsOffset);
79 continue;
80 }
81 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
82 // If the GEP has all zero indices, it doesn't offset the pointer. If it
83 // doesn't, it does.
84 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
85 continue;
86 }
87
88 if (auto CS = CallSite(I)) {
89 // If this is the function being called then we treat it like a load and
90 // ignore it.
91 if (CS.isCallee(&U))
92 continue;
93
94 unsigned DataOpNo = CS.getDataOperandNo(&U);
95 bool IsArgOperand = CS.isArgOperand(&U);
96
97 // Inalloca arguments are clobbered by the call.
98 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
99 return false;
100
101 // If this is a readonly/readnone call site, then we know it is just a
102 // load (but one that potentially returns the value itself), so we can
103 // ignore it if we know that the value isn't captured.
104 if (CS.onlyReadsMemory() &&
105 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
106 continue;
107
108 // If this is being passed as a byval argument, the caller is making a
109 // copy, so it is only a read of the alloca.
110 if (IsArgOperand && CS.isByValArgument(DataOpNo))
111 continue;
112 }
113
114 // Lifetime intrinsics can be handled by the caller.
115 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
116 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
117 II->getIntrinsicID() == Intrinsic::lifetime_end) {
118 assert(II->use_empty() && "Lifetime markers have no result to use!")((II->use_empty() && "Lifetime markers have no result to use!"
) ? static_cast<void> (0) : __assert_fail ("II->use_empty() && \"Lifetime markers have no result to use!\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 118, __PRETTY_FUNCTION__))
;
119 ToDelete.push_back(II);
120 continue;
121 }
122 }
123
124 // If this is isn't our memcpy/memmove, reject it as something we can't
125 // handle.
126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127 if (!MI)
128 return false;
129
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U.getOperandNo() == 1) {
133 if (MI->isVolatile()) return false;
134 continue;
135 }
136
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
139
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
143
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
146
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI->getSource()))
149 return false;
150
151 // Otherwise, the transform is safe. Remember the copy instruction.
152 TheCopy = MI;
153 }
154 }
155 return true;
156}
157
158/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159/// modified by a copy from a constant global. If we can prove this, we can
160/// replace any uses of the alloca with uses of the global directly.
161static MemTransferInst *
162isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163 SmallVectorImpl<Instruction *> &ToDelete) {
164 MemTransferInst *TheCopy = nullptr;
165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
166 return TheCopy;
167 return nullptr;
168}
169
170static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
171 // Check for array size of 1 (scalar allocation).
172 if (!AI.isArrayAllocation()) {
173 // i32 1 is the canonical array size for scalar allocations.
174 if (AI.getArraySize()->getType()->isIntegerTy(32))
175 return nullptr;
176
177 // Canonicalize it.
178 Value *V = IC.Builder->getInt32(1);
179 AI.setOperand(0, V);
180 return &AI;
181 }
182
183 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
184 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
185 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
187 New->setAlignment(AI.getAlignment());
188
189 // Scan to the end of the allocation instructions, to skip over a block of
190 // allocas if possible...also skip interleaved debug info
191 //
192 BasicBlock::iterator It(New);
193 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
194 ++It;
195
196 // Now that I is pointing to the first non-allocation-inst in the block,
197 // insert our getelementptr instruction...
198 //
199 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200 Value *NullIdx = Constant::getNullValue(IdxTy);
201 Value *Idx[2] = {NullIdx, NullIdx};
202 Instruction *GEP =
203 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
204 IC.InsertNewInstBefore(GEP, *It);
205
206 // Now make everything use the getelementptr instead of the original
207 // allocation.
208 return IC.replaceInstUsesWith(AI, GEP);
209 }
210
211 if (isa<UndefValue>(AI.getArraySize()))
212 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
213
214 // Ensure that the alloca array size argument has type intptr_t, so that
215 // any casting is exposed early.
216 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
217 if (AI.getArraySize()->getType() != IntPtrTy) {
218 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
219 AI.setOperand(0, V);
220 return &AI;
221 }
222
223 return nullptr;
224}
225
226Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
227 if (auto *I = simplifyAllocaArraySize(*this, AI))
228 return I;
229
230 if (AI.getAllocatedType()->isSized()) {
231 // If the alignment is 0 (unspecified), assign it the preferred alignment.
232 if (AI.getAlignment() == 0)
233 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
234
235 // Move all alloca's of zero byte objects to the entry block and merge them
236 // together. Note that we only do this for alloca's, because malloc should
237 // allocate and return a unique pointer, even for a zero byte allocation.
238 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
239 // For a zero sized alloca there is no point in doing an array allocation.
240 // This is helpful if the array size is a complicated expression not used
241 // elsewhere.
242 if (AI.isArrayAllocation()) {
243 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
244 return &AI;
245 }
246
247 // Get the first instruction in the entry block.
248 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
249 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
250 if (FirstInst != &AI) {
251 // If the entry block doesn't start with a zero-size alloca then move
252 // this one to the start of the entry block. There is no problem with
253 // dominance as the array size was forced to a constant earlier already.
254 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
255 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
256 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
257 AI.moveBefore(FirstInst);
258 return &AI;
259 }
260
261 // If the alignment of the entry block alloca is 0 (unspecified),
262 // assign it the preferred alignment.
263 if (EntryAI->getAlignment() == 0)
264 EntryAI->setAlignment(
265 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
266 // Replace this zero-sized alloca with the one at the start of the entry
267 // block after ensuring that the address will be aligned enough for both
268 // types.
269 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
270 AI.getAlignment());
271 EntryAI->setAlignment(MaxAlign);
272 if (AI.getType() != EntryAI->getType())
273 return new BitCastInst(EntryAI, AI.getType());
274 return replaceInstUsesWith(AI, EntryAI);
275 }
276 }
277 }
278
279 if (AI.getAlignment()) {
280 // Check to see if this allocation is only modified by a memcpy/memmove from
281 // a constant global whose alignment is equal to or exceeds that of the
282 // allocation. If this is the case, we can change all users to use
283 // the constant global instead. This is commonly produced by the CFE by
284 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
285 // is only subsequently read.
286 SmallVector<Instruction *, 4> ToDelete;
287 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
288 unsigned SourceAlign = getOrEnforceKnownAlignment(
289 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
290 if (AI.getAlignment() <= SourceAlign) {
291 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "Found alloca equal to global: "
<< AI << '\n'; } } while (false)
;
292 DEBUG(dbgs() << " memcpy = " << *Copy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << " memcpy = " << *Copy
<< '\n'; } } while (false)
;
293 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
294 eraseInstFromFunction(*ToDelete[i]);
295 Constant *TheSrc = cast<Constant>(Copy->getSource());
296 Constant *Cast
297 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
298 Instruction *NewI = replaceInstUsesWith(AI, Cast);
299 eraseInstFromFunction(*Copy);
300 ++NumGlobalCopies;
301 return NewI;
302 }
303 }
304 }
305
306 // At last, use the generic allocation site handler to aggressively remove
307 // unused allocas.
308 return visitAllocSite(AI);
309}
310
311// Are we allowed to form a atomic load or store of this type?
312static bool isSupportedAtomicType(Type *Ty) {
313 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
314}
315
316/// \brief Helper to combine a load to a new type.
317///
318/// This just does the work of combining a load to a new type. It handles
319/// metadata, etc., and returns the new instruction. The \c NewTy should be the
320/// loaded *value* type. This will convert it to a pointer, cast the operand to
321/// that pointer type, load it, etc.
322///
323/// Note that this will create all of the instructions with whatever insert
324/// point the \c InstCombiner currently is using.
325static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
326 const Twine &Suffix = "") {
327 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&(((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
"can't fold an atomic load to requested type") ? static_cast
<void> (0) : __assert_fail ("(!LI.isAtomic() || isSupportedAtomicType(NewTy)) && \"can't fold an atomic load to requested type\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 328, __PRETTY_FUNCTION__))
328 "can't fold an atomic load to requested type")(((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
"can't fold an atomic load to requested type") ? static_cast
<void> (0) : __assert_fail ("(!LI.isAtomic() || isSupportedAtomicType(NewTy)) && \"can't fold an atomic load to requested type\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 328, __PRETTY_FUNCTION__))
;
329
330 Value *Ptr = LI.getPointerOperand();
331 unsigned AS = LI.getPointerAddressSpace();
332 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
333 LI.getAllMetadata(MD);
334
335 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
336 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
337 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
338 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
339 MDBuilder MDB(NewLoad->getContext());
340 for (const auto &MDPair : MD) {
341 unsigned ID = MDPair.first;
342 MDNode *N = MDPair.second;
343 // Note, essentially every kind of metadata should be preserved here! This
344 // routine is supposed to clone a load instruction changing *only its type*.
345 // The only metadata it makes sense to drop is metadata which is invalidated
346 // when the pointer type changes. This should essentially never be the case
347 // in LLVM, but we explicitly switch over only known metadata to be
348 // conservatively correct. If you are adding metadata to LLVM which pertains
349 // to loads, you almost certainly want to add it here.
350 switch (ID) {
351 case LLVMContext::MD_dbg:
352 case LLVMContext::MD_tbaa:
353 case LLVMContext::MD_prof:
354 case LLVMContext::MD_fpmath:
355 case LLVMContext::MD_tbaa_struct:
356 case LLVMContext::MD_invariant_load:
357 case LLVMContext::MD_alias_scope:
358 case LLVMContext::MD_noalias:
359 case LLVMContext::MD_nontemporal:
360 case LLVMContext::MD_mem_parallel_loop_access:
361 // All of these directly apply.
362 NewLoad->setMetadata(ID, N);
363 break;
364
365 case LLVMContext::MD_nonnull:
366 // This only directly applies if the new type is also a pointer.
367 if (NewTy->isPointerTy()) {
368 NewLoad->setMetadata(ID, N);
369 break;
370 }
371 // If it's integral now, translate it to !range metadata.
372 if (NewTy->isIntegerTy()) {
373 auto *ITy = cast<IntegerType>(NewTy);
374 auto *NullInt = ConstantExpr::getPtrToInt(
375 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
376 auto *NonNullInt =
377 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
378 NewLoad->setMetadata(LLVMContext::MD_range,
379 MDB.createRange(NonNullInt, NullInt));
380 }
381 break;
382 case LLVMContext::MD_align:
383 case LLVMContext::MD_dereferenceable:
384 case LLVMContext::MD_dereferenceable_or_null:
385 // These only directly apply if the new type is also a pointer.
386 if (NewTy->isPointerTy())
387 NewLoad->setMetadata(ID, N);
388 break;
389 case LLVMContext::MD_range:
390 // FIXME: It would be nice to propagate this in some way, but the type
391 // conversions make it hard.
392
393 // If it's a pointer now and the range does not contain 0, make it !nonnull.
394 if (NewTy->isPointerTy()) {
395 unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy);
396 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
397 MDNode *NN = MDNode::get(LI.getContext(), None);
398 NewLoad->setMetadata(LLVMContext::MD_nonnull, NN);
399 }
400 }
401 break;
402 }
403 }
404 return NewLoad;
405}
406
407/// \brief Combine a store to a new type.
408///
409/// Returns the newly created store instruction.
410static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
411 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&(((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
"can't fold an atomic store of requested type") ? static_cast
<void> (0) : __assert_fail ("(!SI.isAtomic() || isSupportedAtomicType(V->getType())) && \"can't fold an atomic store of requested type\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 412, __PRETTY_FUNCTION__))
412 "can't fold an atomic store of requested type")(((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
"can't fold an atomic store of requested type") ? static_cast
<void> (0) : __assert_fail ("(!SI.isAtomic() || isSupportedAtomicType(V->getType())) && \"can't fold an atomic store of requested type\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 412, __PRETTY_FUNCTION__))
;
413
414 Value *Ptr = SI.getPointerOperand();
415 unsigned AS = SI.getPointerAddressSpace();
416 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
417 SI.getAllMetadata(MD);
418
419 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
420 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
421 SI.getAlignment(), SI.isVolatile());
422 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
423 for (const auto &MDPair : MD) {
424 unsigned ID = MDPair.first;
425 MDNode *N = MDPair.second;
426 // Note, essentially every kind of metadata should be preserved here! This
427 // routine is supposed to clone a store instruction changing *only its
428 // type*. The only metadata it makes sense to drop is metadata which is
429 // invalidated when the pointer type changes. This should essentially
430 // never be the case in LLVM, but we explicitly switch over only known
431 // metadata to be conservatively correct. If you are adding metadata to
432 // LLVM which pertains to stores, you almost certainly want to add it
433 // here.
434 switch (ID) {
435 case LLVMContext::MD_dbg:
436 case LLVMContext::MD_tbaa:
437 case LLVMContext::MD_prof:
438 case LLVMContext::MD_fpmath:
439 case LLVMContext::MD_tbaa_struct:
440 case LLVMContext::MD_alias_scope:
441 case LLVMContext::MD_noalias:
442 case LLVMContext::MD_nontemporal:
443 case LLVMContext::MD_mem_parallel_loop_access:
444 // All of these directly apply.
445 NewStore->setMetadata(ID, N);
446 break;
447
448 case LLVMContext::MD_invariant_load:
449 case LLVMContext::MD_nonnull:
450 case LLVMContext::MD_range:
451 case LLVMContext::MD_align:
452 case LLVMContext::MD_dereferenceable:
453 case LLVMContext::MD_dereferenceable_or_null:
454 // These don't apply for stores.
455 break;
456 }
457 }
458
459 return NewStore;
460}
461
462/// \brief Combine loads to match the type of their uses' value after looking
463/// through intervening bitcasts.
464///
465/// The core idea here is that if the result of a load is used in an operation,
466/// we should load the type most conducive to that operation. For example, when
467/// loading an integer and converting that immediately to a pointer, we should
468/// instead directly load a pointer.
469///
470/// However, this routine must never change the width of a load or the number of
471/// loads as that would introduce a semantic change. This combine is expected to
472/// be a semantic no-op which just allows loads to more closely model the types
473/// of their consuming operations.
474///
475/// Currently, we also refuse to change the precise type used for an atomic load
476/// or a volatile load. This is debatable, and might be reasonable to change
477/// later. However, it is risky in case some backend or other part of LLVM is
478/// relying on the exact type loaded to select appropriate atomic operations.
479static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
480 // FIXME: We could probably with some care handle both volatile and ordered
481 // atomic loads here but it isn't clear that this is important.
482 if (!LI.isUnordered())
483 return nullptr;
484
485 if (LI.use_empty())
486 return nullptr;
487
488 // swifterror values can't be bitcasted.
489 if (LI.getPointerOperand()->isSwiftError())
490 return nullptr;
491
492 Type *Ty = LI.getType();
493 const DataLayout &DL = IC.getDataLayout();
494
495 // Try to canonicalize loads which are only ever stored to operate over
496 // integers instead of any other type. We only do this when the loaded type
497 // is sized and has a size exactly the same as its store size and the store
498 // size is a legal integer type.
499 if (!Ty->isIntegerTy() && Ty->isSized() &&
500 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
501 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
502 !DL.isNonIntegralPointerType(Ty)) {
503 if (all_of(LI.users(), [&LI](User *U) {
504 auto *SI = dyn_cast<StoreInst>(U);
505 return SI && SI->getPointerOperand() != &LI;
506 })) {
507 LoadInst *NewLoad = combineLoadToNewType(
508 IC, LI,
509 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
510 // Replace all the stores with stores of the newly loaded value.
511 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
512 auto *SI = cast<StoreInst>(*UI++);
513 IC.Builder->SetInsertPoint(SI);
514 combineStoreToNewValue(IC, *SI, NewLoad);
515 IC.eraseInstFromFunction(*SI);
516 }
517 assert(LI.use_empty() && "Failed to remove all users of the load!")((LI.use_empty() && "Failed to remove all users of the load!"
) ? static_cast<void> (0) : __assert_fail ("LI.use_empty() && \"Failed to remove all users of the load!\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 517, __PRETTY_FUNCTION__))
;
518 // Return the old load so the combiner can delete it safely.
519 return &LI;
520 }
521 }
522
523 // Fold away bit casts of the loaded value by loading the desired type.
524 // We can do this for BitCastInsts as well as casts from and to pointer types,
525 // as long as those are noops (i.e., the source or dest type have the same
526 // bitwidth as the target's pointers).
527 if (LI.hasOneUse())
528 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
529 if (CI->isNoopCast(DL))
530 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
531 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
532 CI->replaceAllUsesWith(NewLoad);
533 IC.eraseInstFromFunction(*CI);
534 return &LI;
535 }
536
537 // FIXME: We should also canonicalize loads of vectors when their elements are
538 // cast to other types.
539 return nullptr;
540}
541
542static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
543 // FIXME: We could probably with some care handle both volatile and atomic
544 // stores here but it isn't clear that this is important.
545 if (!LI.isSimple())
546 return nullptr;
547
548 Type *T = LI.getType();
549 if (!T->isAggregateType())
550 return nullptr;
551
552 StringRef Name = LI.getName();
553 assert(LI.getAlignment() && "Alignment must be set at this point")((LI.getAlignment() && "Alignment must be set at this point"
) ? static_cast<void> (0) : __assert_fail ("LI.getAlignment() && \"Alignment must be set at this point\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 553, __PRETTY_FUNCTION__))
;
554
555 if (auto *ST = dyn_cast<StructType>(T)) {
556 // If the struct only have one element, we unpack.
557 auto NumElements = ST->getNumElements();
558 if (NumElements == 1) {
559 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
560 ".unpack");
561 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
562 UndefValue::get(T), NewLoad, 0, Name));
563 }
564
565 // We don't want to break loads with padding here as we'd loose
566 // the knowledge that padding exists for the rest of the pipeline.
567 const DataLayout &DL = IC.getDataLayout();
568 auto *SL = DL.getStructLayout(ST);
569 if (SL->hasPadding())
570 return nullptr;
571
572 auto Align = LI.getAlignment();
573 if (!Align)
574 Align = DL.getABITypeAlignment(ST);
575
576 auto *Addr = LI.getPointerOperand();
577 auto *IdxType = Type::getInt32Ty(T->getContext());
578 auto *Zero = ConstantInt::get(IdxType, 0);
579
580 Value *V = UndefValue::get(T);
581 for (unsigned i = 0; i < NumElements; i++) {
582 Value *Indices[2] = {
583 Zero,
584 ConstantInt::get(IdxType, i),
585 };
586 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
587 Name + ".elt");
588 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
589 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
590 V = IC.Builder->CreateInsertValue(V, L, i);
591 }
592
593 V->setName(Name);
594 return IC.replaceInstUsesWith(LI, V);
595 }
596
597 if (auto *AT = dyn_cast<ArrayType>(T)) {
598 auto *ET = AT->getElementType();
599 auto NumElements = AT->getNumElements();
600 if (NumElements == 1) {
601 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
602 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
603 UndefValue::get(T), NewLoad, 0, Name));
604 }
605
606 // Bail out if the array is too large. Ideally we would like to optimize
607 // arrays of arbitrary size but this has a terrible impact on compile time.
608 // The threshold here is chosen arbitrarily, maybe needs a little bit of
609 // tuning.
610 if (NumElements > 1024)
611 return nullptr;
612
613 const DataLayout &DL = IC.getDataLayout();
614 auto EltSize = DL.getTypeAllocSize(ET);
615 auto Align = LI.getAlignment();
616 if (!Align)
617 Align = DL.getABITypeAlignment(T);
618
619 auto *Addr = LI.getPointerOperand();
620 auto *IdxType = Type::getInt64Ty(T->getContext());
621 auto *Zero = ConstantInt::get(IdxType, 0);
622
623 Value *V = UndefValue::get(T);
624 uint64_t Offset = 0;
625 for (uint64_t i = 0; i < NumElements; i++) {
626 Value *Indices[2] = {
627 Zero,
628 ConstantInt::get(IdxType, i),
629 };
630 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
631 Name + ".elt");
632 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
633 Name + ".unpack");
634 V = IC.Builder->CreateInsertValue(V, L, i);
635 Offset += EltSize;
636 }
637
638 V->setName(Name);
639 return IC.replaceInstUsesWith(LI, V);
640 }
641
642 return nullptr;
643}
644
645// If we can determine that all possible objects pointed to by the provided
646// pointer value are, not only dereferenceable, but also definitively less than
647// or equal to the provided maximum size, then return true. Otherwise, return
648// false (constant global values and allocas fall into this category).
649//
650// FIXME: This should probably live in ValueTracking (or similar).
651static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
652 const DataLayout &DL) {
653 SmallPtrSet<Value *, 4> Visited;
654 SmallVector<Value *, 4> Worklist(1, V);
655
656 do {
657 Value *P = Worklist.pop_back_val();
658 P = P->stripPointerCasts();
659
660 if (!Visited.insert(P).second)
661 continue;
662
663 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
664 Worklist.push_back(SI->getTrueValue());
665 Worklist.push_back(SI->getFalseValue());
666 continue;
667 }
668
669 if (PHINode *PN = dyn_cast<PHINode>(P)) {
670 for (Value *IncValue : PN->incoming_values())
671 Worklist.push_back(IncValue);
672 continue;
673 }
674
675 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
676 if (GA->isInterposable())
677 return false;
678 Worklist.push_back(GA->getAliasee());
679 continue;
680 }
681
682 // If we know how big this object is, and it is less than MaxSize, continue
683 // searching. Otherwise, return false.
684 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
685 if (!AI->getAllocatedType()->isSized())
686 return false;
687
688 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
689 if (!CS)
690 return false;
691
692 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
693 // Make sure that, even if the multiplication below would wrap as an
694 // uint64_t, we still do the right thing.
695 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
696 return false;
697 continue;
698 }
699
700 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
701 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
702 return false;
703
704 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
705 if (InitSize > MaxSize)
706 return false;
707 continue;
708 }
709
710 return false;
711 } while (!Worklist.empty());
712
713 return true;
714}
715
716// If we're indexing into an object of a known size, and the outer index is
717// not a constant, but having any value but zero would lead to undefined
718// behavior, replace it with zero.
719//
720// For example, if we have:
721// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
722// ...
723// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
724// ... = load i32* %arrayidx, align 4
725// Then we know that we can replace %x in the GEP with i64 0.
726//
727// FIXME: We could fold any GEP index to zero that would cause UB if it were
728// not zero. Currently, we only handle the first such index. Also, we could
729// also search through non-zero constant indices if we kept track of the
730// offsets those indices implied.
731static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
732 Instruction *MemI, unsigned &Idx) {
733 if (GEPI->getNumOperands() < 2)
734 return false;
735
736 // Find the first non-zero index of a GEP. If all indices are zero, return
737 // one past the last index.
738 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
739 unsigned I = 1;
740 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
741 Value *V = GEPI->getOperand(I);
742 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
743 if (CI->isZero())
744 continue;
745
746 break;
747 }
748
749 return I;
750 };
751
752 // Skip through initial 'zero' indices, and find the corresponding pointer
753 // type. See if the next index is not a constant.
754 Idx = FirstNZIdx(GEPI);
755 if (Idx == GEPI->getNumOperands())
756 return false;
757 if (isa<Constant>(GEPI->getOperand(Idx)))
758 return false;
759
760 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
761 Type *AllocTy =
762 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
763 if (!AllocTy || !AllocTy->isSized())
764 return false;
765 const DataLayout &DL = IC.getDataLayout();
766 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
767
768 // If there are more indices after the one we might replace with a zero, make
769 // sure they're all non-negative. If any of them are negative, the overall
770 // address being computed might be before the base address determined by the
771 // first non-zero index.
772 auto IsAllNonNegative = [&]() {
773 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
774 bool KnownNonNegative, KnownNegative;
775 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
776 KnownNegative, 0, MemI);
777 if (KnownNonNegative)
778 continue;
779 return false;
780 }
781
782 return true;
783 };
784
785 // FIXME: If the GEP is not inbounds, and there are extra indices after the
786 // one we'll replace, those could cause the address computation to wrap
787 // (rendering the IsAllNonNegative() check below insufficient). We can do
788 // better, ignoring zero indices (and other indices we can prove small
789 // enough not to wrap).
790 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
791 return false;
792
793 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
794 // also known to be dereferenceable.
795 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
796 IsAllNonNegative();
797}
798
799// If we're indexing into an object with a variable index for the memory
800// access, but the object has only one element, we can assume that the index
801// will always be zero. If we replace the GEP, return it.
802template <typename T>
803static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
804 T &MemI) {
805 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
806 unsigned Idx;
807 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
808 Instruction *NewGEPI = GEPI->clone();
809 NewGEPI->setOperand(Idx,
810 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
811 NewGEPI->insertBefore(GEPI);
812 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
813 return NewGEPI;
814 }
815 }
816
817 return nullptr;
818}
819
820Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
821 Value *Op = LI.getOperand(0);
822
823 // Try to canonicalize the loaded type.
824 if (Instruction *Res = combineLoadToOperationType(*this, LI))
825 return Res;
826
827 // Attempt to improve the alignment.
828 unsigned KnownAlign = getOrEnforceKnownAlignment(
829 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
830 unsigned LoadAlign = LI.getAlignment();
831 unsigned EffectiveLoadAlign =
832 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
833
834 if (KnownAlign > EffectiveLoadAlign)
835 LI.setAlignment(KnownAlign);
836 else if (LoadAlign == 0)
837 LI.setAlignment(EffectiveLoadAlign);
838
839 // Replace GEP indices if possible.
840 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
841 Worklist.Add(NewGEPI);
842 return &LI;
843 }
844
845 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
846 return Res;
847
848 // Do really simple store-to-load forwarding and load CSE, to catch cases
849 // where there are several consecutive memory accesses to the same location,
850 // separated by a few arithmetic operations.
851 BasicBlock::iterator BBI(LI);
852 bool IsLoadCSE = false;
853 if (Value *AvailableVal = FindAvailableLoadedValue(
854 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
855 if (IsLoadCSE)
856 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
857
858 return replaceInstUsesWith(
859 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
860 LI.getName() + ".cast"));
861 }
862
863 // None of the following transforms are legal for volatile/ordered atomic
864 // loads. Most of them do apply for unordered atomics.
865 if (!LI.isUnordered()) return nullptr;
866
867 // load(gep null, ...) -> unreachable
868 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
869 const Value *GEPI0 = GEPI->getOperand(0);
870 // TODO: Consider a target hook for valid address spaces for this xform.
871 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
872 // Insert a new store to null instruction before the load to indicate
873 // that this code is not reachable. We do this instead of inserting
874 // an unreachable instruction directly because we cannot modify the
875 // CFG.
876 new StoreInst(UndefValue::get(LI.getType()),
877 Constant::getNullValue(Op->getType()), &LI);
878 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
879 }
880 }
881
882 // load null/undef -> unreachable
883 // TODO: Consider a target hook for valid address spaces for this xform.
884 if (isa<UndefValue>(Op) ||
885 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
886 // Insert a new store to null instruction before the load to indicate that
887 // this code is not reachable. We do this instead of inserting an
888 // unreachable instruction directly because we cannot modify the CFG.
889 new StoreInst(UndefValue::get(LI.getType()),
890 Constant::getNullValue(Op->getType()), &LI);
891 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
892 }
893
894 if (Op->hasOneUse()) {
895 // Change select and PHI nodes to select values instead of addresses: this
896 // helps alias analysis out a lot, allows many others simplifications, and
897 // exposes redundancy in the code.
898 //
899 // Note that we cannot do the transformation unless we know that the
900 // introduced loads cannot trap! Something like this is valid as long as
901 // the condition is always false: load (select bool %C, int* null, int* %G),
902 // but it would not be valid if we transformed it to load from null
903 // unconditionally.
904 //
905 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
906 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
907 unsigned Align = LI.getAlignment();
908 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
909 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
910 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
911 SI->getOperand(1)->getName()+".val");
912 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
913 SI->getOperand(2)->getName()+".val");
914 assert(LI.isUnordered() && "implied by above")((LI.isUnordered() && "implied by above") ? static_cast
<void> (0) : __assert_fail ("LI.isUnordered() && \"implied by above\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 914, __PRETTY_FUNCTION__))
;
915 V1->setAlignment(Align);
916 V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
917 V2->setAlignment(Align);
918 V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
919 return SelectInst::Create(SI->getCondition(), V1, V2);
920 }
921
922 // load (select (cond, null, P)) -> load P
923 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
924 LI.getPointerAddressSpace() == 0) {
925 LI.setOperand(0, SI->getOperand(2));
926 return &LI;
927 }
928
929 // load (select (cond, P, null)) -> load P
930 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
931 LI.getPointerAddressSpace() == 0) {
932 LI.setOperand(0, SI->getOperand(1));
933 return &LI;
934 }
935 }
936 }
937 return nullptr;
938}
939
940/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
941///
942/// \returns underlying value that was "cast", or nullptr otherwise.
943///
944/// For example, if we have:
945///
946/// %E0 = extractelement <2 x double> %U, i32 0
947/// %V0 = insertvalue [2 x double] undef, double %E0, 0
948/// %E1 = extractelement <2 x double> %U, i32 1
949/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
950///
951/// and the layout of a <2 x double> is isomorphic to a [2 x double],
952/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
953/// Note that %U may contain non-undef values where %V1 has undef.
954static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
955 Value *U = nullptr;
956 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
957 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
958 if (!E)
959 return nullptr;
960 auto *W = E->getVectorOperand();
961 if (!U)
962 U = W;
963 else if (U != W)
964 return nullptr;
965 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
966 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
967 return nullptr;
968 V = IV->getAggregateOperand();
969 }
970 if (!isa<UndefValue>(V) ||!U)
971 return nullptr;
972
973 auto *UT = cast<VectorType>(U->getType());
974 auto *VT = V->getType();
975 // Check that types UT and VT are bitwise isomorphic.
976 const auto &DL = IC.getDataLayout();
977 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
978 return nullptr;
979 }
980 if (auto *AT = dyn_cast<ArrayType>(VT)) {
981 if (AT->getNumElements() != UT->getNumElements())
982 return nullptr;
983 } else {
984 auto *ST = cast<StructType>(VT);
985 if (ST->getNumElements() != UT->getNumElements())
986 return nullptr;
987 for (const auto *EltT : ST->elements()) {
988 if (EltT != UT->getElementType())
989 return nullptr;
990 }
991 }
992 return U;
993}
994
995/// \brief Combine stores to match the type of value being stored.
996///
997/// The core idea here is that the memory does not have any intrinsic type and
998/// where we can we should match the type of a store to the type of value being
999/// stored.
1000///
1001/// However, this routine must never change the width of a store or the number of
1002/// stores as that would introduce a semantic change. This combine is expected to
1003/// be a semantic no-op which just allows stores to more closely model the types
1004/// of their incoming values.
1005///
1006/// Currently, we also refuse to change the precise type used for an atomic or
1007/// volatile store. This is debatable, and might be reasonable to change later.
1008/// However, it is risky in case some backend or other part of LLVM is relying
1009/// on the exact type stored to select appropriate atomic operations.
1010///
1011/// \returns true if the store was successfully combined away. This indicates
1012/// the caller must erase the store instruction. We have to let the caller erase
1013/// the store instruction as otherwise there is no way to signal whether it was
1014/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1015static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1016 // FIXME: We could probably with some care handle both volatile and ordered
1017 // atomic stores here but it isn't clear that this is important.
1018 if (!SI.isUnordered())
1019 return false;
1020
1021 // swifterror values can't be bitcasted.
1022 if (SI.getPointerOperand()->isSwiftError())
1023 return false;
1024
1025 Value *V = SI.getValueOperand();
1026
1027 // Fold away bit casts of the stored value by storing the original type.
1028 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1029 V = BC->getOperand(0);
1030 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1031 combineStoreToNewValue(IC, SI, V);
1032 return true;
1033 }
1034 }
1035
1036 if (Value *U = likeBitCastFromVector(IC, V))
1037 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1038 combineStoreToNewValue(IC, SI, U);
1039 return true;
1040 }
1041
1042 // FIXME: We should also canonicalize stores of vectors when their elements
1043 // are cast to other types.
1044 return false;
1045}
1046
1047static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1048 // FIXME: We could probably with some care handle both volatile and atomic
1049 // stores here but it isn't clear that this is important.
1050 if (!SI.isSimple())
1051 return false;
1052
1053 Value *V = SI.getValueOperand();
1054 Type *T = V->getType();
1055
1056 if (!T->isAggregateType())
1057 return false;
1058
1059 if (auto *ST = dyn_cast<StructType>(T)) {
1060 // If the struct only have one element, we unpack.
1061 unsigned Count = ST->getNumElements();
1062 if (Count == 1) {
1063 V = IC.Builder->CreateExtractValue(V, 0);
1064 combineStoreToNewValue(IC, SI, V);
1065 return true;
1066 }
1067
1068 // We don't want to break loads with padding here as we'd loose
1069 // the knowledge that padding exists for the rest of the pipeline.
1070 const DataLayout &DL = IC.getDataLayout();
1071 auto *SL = DL.getStructLayout(ST);
1072 if (SL->hasPadding())
1073 return false;
1074
1075 auto Align = SI.getAlignment();
1076 if (!Align)
1077 Align = DL.getABITypeAlignment(ST);
1078
1079 SmallString<16> EltName = V->getName();
1080 EltName += ".elt";
1081 auto *Addr = SI.getPointerOperand();
1082 SmallString<16> AddrName = Addr->getName();
1083 AddrName += ".repack";
1084
1085 auto *IdxType = Type::getInt32Ty(ST->getContext());
1086 auto *Zero = ConstantInt::get(IdxType, 0);
1087 for (unsigned i = 0; i < Count; i++) {
1088 Value *Indices[2] = {
1089 Zero,
1090 ConstantInt::get(IdxType, i),
1091 };
1092 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1093 AddrName);
1094 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1095 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1096 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1097 }
1098
1099 return true;
1100 }
1101
1102 if (auto *AT = dyn_cast<ArrayType>(T)) {
1103 // If the array only have one element, we unpack.
1104 auto NumElements = AT->getNumElements();
1105 if (NumElements == 1) {
1106 V = IC.Builder->CreateExtractValue(V, 0);
1107 combineStoreToNewValue(IC, SI, V);
1108 return true;
1109 }
1110
1111 // Bail out if the array is too large. Ideally we would like to optimize
1112 // arrays of arbitrary size but this has a terrible impact on compile time.
1113 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1114 // tuning.
1115 if (NumElements > 1024)
1116 return false;
1117
1118 const DataLayout &DL = IC.getDataLayout();
1119 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1120 auto Align = SI.getAlignment();
1121 if (!Align)
1122 Align = DL.getABITypeAlignment(T);
1123
1124 SmallString<16> EltName = V->getName();
1125 EltName += ".elt";
1126 auto *Addr = SI.getPointerOperand();
1127 SmallString<16> AddrName = Addr->getName();
1128 AddrName += ".repack";
1129
1130 auto *IdxType = Type::getInt64Ty(T->getContext());
1131 auto *Zero = ConstantInt::get(IdxType, 0);
1132
1133 uint64_t Offset = 0;
1134 for (uint64_t i = 0; i < NumElements; i++) {
1135 Value *Indices[2] = {
1136 Zero,
1137 ConstantInt::get(IdxType, i),
1138 };
1139 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1140 AddrName);
1141 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1142 auto EltAlign = MinAlign(Align, Offset);
1143 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1144 Offset += EltSize;
1145 }
1146
1147 return true;
1148 }
1149
1150 return false;
1151}
1152
1153/// equivalentAddressValues - Test if A and B will obviously have the same
1154/// value. This includes recognizing that %t0 and %t1 will have the same
1155/// value in code like this:
1156/// %t0 = getelementptr \@a, 0, 3
1157/// store i32 0, i32* %t0
1158/// %t1 = getelementptr \@a, 0, 3
1159/// %t2 = load i32* %t1
1160///
1161static bool equivalentAddressValues(Value *A, Value *B) {
1162 // Test if the values are trivially equivalent.
1163 if (A == B) return true;
1164
1165 // Test if the values come form identical arithmetic instructions.
1166 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1167 // its only used to compare two uses within the same basic block, which
1168 // means that they'll always either have the same value or one of them
1169 // will have an undefined value.
1170 if (isa<BinaryOperator>(A) ||
1171 isa<CastInst>(A) ||
1172 isa<PHINode>(A) ||
1173 isa<GetElementPtrInst>(A))
1174 if (Instruction *BI = dyn_cast<Instruction>(B))
1175 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1176 return true;
1177
1178 // Otherwise they may not be equivalent.
1179 return false;
1180}
1181
1182Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1183 Value *Val = SI.getOperand(0);
1184 Value *Ptr = SI.getOperand(1);
1185
1186 // Try to canonicalize the stored type.
1187 if (combineStoreToValueType(*this, SI))
1188 return eraseInstFromFunction(SI);
1189
1190 // Attempt to improve the alignment.
1191 unsigned KnownAlign = getOrEnforceKnownAlignment(
1192 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
1193 unsigned StoreAlign = SI.getAlignment();
1194 unsigned EffectiveStoreAlign =
1195 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1196
1197 if (KnownAlign > EffectiveStoreAlign)
1198 SI.setAlignment(KnownAlign);
1199 else if (StoreAlign == 0)
1200 SI.setAlignment(EffectiveStoreAlign);
1201
1202 // Try to canonicalize the stored type.
1203 if (unpackStoreToAggregate(*this, SI))
1204 return eraseInstFromFunction(SI);
1205
1206 // Replace GEP indices if possible.
1207 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1208 Worklist.Add(NewGEPI);
1209 return &SI;
1210 }
1211
1212 // Don't hack volatile/ordered stores.
1213 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1214 if (!SI.isUnordered()) return nullptr;
1215
1216 // If the RHS is an alloca with a single use, zapify the store, making the
1217 // alloca dead.
1218 if (Ptr->hasOneUse()) {
1219 if (isa<AllocaInst>(Ptr))
1220 return eraseInstFromFunction(SI);
1221 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1222 if (isa<AllocaInst>(GEP->getOperand(0))) {
1223 if (GEP->getOperand(0)->hasOneUse())
1224 return eraseInstFromFunction(SI);
1225 }
1226 }
1227 }
1228
1229 // Do really simple DSE, to catch cases where there are several consecutive
1230 // stores to the same location, separated by a few arithmetic operations. This
1231 // situation often occurs with bitfield accesses.
1232 BasicBlock::iterator BBI(SI);
1233 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1234 --ScanInsts) {
1235 --BBI;
1236 // Don't count debug info directives, lest they affect codegen,
1237 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1238 if (isa<DbgInfoIntrinsic>(BBI) ||
1239 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1240 ScanInsts++;
1241 continue;
1242 }
1243
1244 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1245 // Prev store isn't volatile, and stores to the same location?
1246 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1247 SI.getOperand(1))) {
1248 ++NumDeadStore;
1249 ++BBI;
1250 eraseInstFromFunction(*PrevSI);
1251 continue;
1252 }
1253 break;
1254 }
1255
1256 // If this is a load, we have to stop. However, if the loaded value is from
1257 // the pointer we're loading and is producing the pointer we're storing,
1258 // then *this* store is dead (X = load P; store X -> P).
1259 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1260 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1261 assert(SI.isUnordered() && "can't eliminate ordering operation")((SI.isUnordered() && "can't eliminate ordering operation"
) ? static_cast<void> (0) : __assert_fail ("SI.isUnordered() && \"can't eliminate ordering operation\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 1261, __PRETTY_FUNCTION__))
;
1262 return eraseInstFromFunction(SI);
1263 }
1264
1265 // Otherwise, this is a load from some other location. Stores before it
1266 // may not be dead.
1267 break;
1268 }
1269
1270 // Don't skip over loads or things that can modify memory.
1271 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
1272 break;
1273 }
1274
1275 // store X, null -> turns into 'unreachable' in SimplifyCFG
1276 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1277 if (!isa<UndefValue>(Val)) {
1278 SI.setOperand(0, UndefValue::get(Val->getType()));
1279 if (Instruction *U = dyn_cast<Instruction>(Val))
1280 Worklist.Add(U); // Dropped a use.
1281 }
1282 return nullptr; // Do not modify these!
1283 }
1284
1285 // store undef, Ptr -> noop
1286 if (isa<UndefValue>(Val))
1287 return eraseInstFromFunction(SI);
1288
1289 // If this store is the last instruction in the basic block (possibly
1290 // excepting debug info instructions), and if the block ends with an
1291 // unconditional branch, try to move it to the successor block.
1292 BBI = SI.getIterator();
1293 do {
1294 ++BBI;
1295 } while (isa<DbgInfoIntrinsic>(BBI) ||
1296 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1297 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1298 if (BI->isUnconditional())
1299 if (SimplifyStoreAtEndOfBlock(SI))
1300 return nullptr; // xform done!
1301
1302 return nullptr;
1303}
1304
1305/// SimplifyStoreAtEndOfBlock - Turn things like:
1306/// if () { *P = v1; } else { *P = v2 }
1307/// into a phi node with a store in the successor.
1308///
1309/// Simplify things like:
1310/// *P = v1; if () { *P = v2; }
1311/// into a phi node with a store in the successor.
1312///
1313bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1314 assert(SI.isUnordered() &&((SI.isUnordered() && "this code has not been auditted for volatile or ordered store case"
) ? static_cast<void> (0) : __assert_fail ("SI.isUnordered() && \"this code has not been auditted for volatile or ordered store case\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 1315, __PRETTY_FUNCTION__))
1315 "this code has not been auditted for volatile or ordered store case")((SI.isUnordered() && "this code has not been auditted for volatile or ordered store case"
) ? static_cast<void> (0) : __assert_fail ("SI.isUnordered() && \"this code has not been auditted for volatile or ordered store case\""
, "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp"
, 1315, __PRETTY_FUNCTION__))
;
1316
1317 BasicBlock *StoreBB = SI.getParent();
1318
1319 // Check to see if the successor block has exactly two incoming edges. If
1320 // so, see if the other predecessor contains a store to the same location.
1321 // if so, insert a PHI node (if needed) and move the stores down.
1322 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1323
1324 // Determine whether Dest has exactly two predecessors and, if so, compute
1325 // the other predecessor.
1326 pred_iterator PI = pred_begin(DestBB);
1327 BasicBlock *P = *PI;
1328 BasicBlock *OtherBB = nullptr;
1
'OtherBB' initialized to a null pointer value
1329
1330 if (P != StoreBB)
2
Assuming 'P' is equal to 'StoreBB'
3
Taking false branch
1331 OtherBB = P;
1332
1333 if (++PI == pred_end(DestBB))
4
Assuming the condition is false
5
Taking false branch
1334 return false;
1335
1336 P = *PI;
1337 if (P != StoreBB) {
6
Assuming 'P' is equal to 'StoreBB'
7
Taking false branch
1338 if (OtherBB)
1339 return false;
1340 OtherBB = P;
1341 }
1342 if (++PI != pred_end(DestBB))
8
Assuming the condition is false
9
Taking false branch
1343 return false;
1344
1345 // Bail out if all the relevant blocks aren't distinct (this can happen,
1346 // for example, if SI is in an infinite loop)
1347 if (StoreBB == DestBB || OtherBB == DestBB)
10
Assuming 'StoreBB' is not equal to 'DestBB'
11
Assuming 'OtherBB' is not equal to 'DestBB'
12
Taking false branch
1348 return false;
1349
1350 // Verify that the other block ends in a branch and is not otherwise empty.
1351 BasicBlock::iterator BBI(OtherBB->getTerminator());
13
Called C++ object pointer is null
1352 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1353 if (!OtherBr || BBI == OtherBB->begin())
1354 return false;
1355
1356 // If the other block ends in an unconditional branch, check for the 'if then
1357 // else' case. there is an instruction before the branch.
1358 StoreInst *OtherStore = nullptr;
1359 if (OtherBr->isUnconditional()) {
1360 --BBI;
1361 // Skip over debugging info.
1362 while (isa<DbgInfoIntrinsic>(BBI) ||
1363 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1364 if (BBI==OtherBB->begin())
1365 return false;
1366 --BBI;
1367 }
1368 // If this isn't a store, isn't a store to the same location, or is not the
1369 // right kind of store, bail out.
1370 OtherStore = dyn_cast<StoreInst>(BBI);
1371 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1372 !SI.isSameOperationAs(OtherStore))
1373 return false;
1374 } else {
1375 // Otherwise, the other block ended with a conditional branch. If one of the
1376 // destinations is StoreBB, then we have the if/then case.
1377 if (OtherBr->getSuccessor(0) != StoreBB &&
1378 OtherBr->getSuccessor(1) != StoreBB)
1379 return false;
1380
1381 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1382 // if/then triangle. See if there is a store to the same ptr as SI that
1383 // lives in OtherBB.
1384 for (;; --BBI) {
1385 // Check to see if we find the matching store.
1386 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1387 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1388 !SI.isSameOperationAs(OtherStore))
1389 return false;
1390 break;
1391 }
1392 // If we find something that may be using or overwriting the stored
1393 // value, or if we run out of instructions, we can't do the xform.
1394 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1395 BBI == OtherBB->begin())
1396 return false;
1397 }
1398
1399 // In order to eliminate the store in OtherBr, we have to
1400 // make sure nothing reads or overwrites the stored value in
1401 // StoreBB.
1402 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1403 // FIXME: This should really be AA driven.
1404 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1405 return false;
1406 }
1407 }
1408
1409 // Insert a PHI node now if we need it.
1410 Value *MergedVal = OtherStore->getOperand(0);
1411 if (MergedVal != SI.getOperand(0)) {
1412 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1413 PN->addIncoming(SI.getOperand(0), SI.getParent());
1414 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1415 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1416 }
1417
1418 // Advance to a place where it is safe to insert the new store and
1419 // insert it.
1420 BBI = DestBB->getFirstInsertionPt();
1421 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1422 SI.isVolatile(),
1423 SI.getAlignment(),
1424 SI.getOrdering(),
1425 SI.getSynchScope());
1426 InsertNewInstBefore(NewSI, *BBI);
1427 NewSI->setDebugLoc(OtherStore->getDebugLoc());
1428
1429 // If the two stores had AA tags, merge them.
1430 AAMDNodes AATags;
1431 SI.getAAMetadata(AATags);
1432 if (AATags) {
1433 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1434 NewSI->setAAMetadata(AATags);
1435 }
1436
1437 // Nuke the old stores.
1438 eraseInstFromFunction(SI);
1439 eraseInstFromFunction(*OtherStore);
1440 return true;
1441}