LLVM  6.0.0svn
DeadStoreElimination.cpp
Go to the documentation of this file.
1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
12 //
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
58 #include "llvm/Transforms/Scalar.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <cstddef>
64 #include <iterator>
65 #include <map>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "dse"
71 
72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
73 STATISTIC(NumFastStores, "Number of stores deleted");
74 STATISTIC(NumFastOther , "Number of other instrs removed");
75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
76 STATISTIC(NumModifiedStores, "Number of stores modified");
77 
78 static cl::opt<bool>
79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80  cl::init(true), cl::Hidden,
81  cl::desc("Enable partial-overwrite tracking in DSE"));
82 
83 static cl::opt<bool>
84 EnablePartialStoreMerging("enable-dse-partial-store-merging",
85  cl::init(true), cl::Hidden,
86  cl::desc("Enable partial store merging in DSE"));
87 
88 //===----------------------------------------------------------------------===//
89 // Helper functions
90 //===----------------------------------------------------------------------===//
91 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
93 
94 /// Delete this instruction. Before we do, go through and zero out all the
95 /// operands of this instruction. If any of them become dead, delete them and
96 /// the computation tree that feeds them.
97 /// If ValueSet is non-null, remove any deleted instructions from it as well.
98 static void
102  DenseMap<Instruction*, size_t> *InstrOrdering,
103  SmallSetVector<Value *, 16> *ValueSet = nullptr) {
104  SmallVector<Instruction*, 32> NowDeadInsts;
105 
106  NowDeadInsts.push_back(I);
107  --NumFastOther;
108 
109  // Keeping the iterator straight is a pain, so we let this routine tell the
110  // caller what the next instruction is after we're done mucking about.
111  BasicBlock::iterator NewIter = *BBI;
112 
113  // Before we touch this instruction, remove it from memdep!
114  do {
115  Instruction *DeadInst = NowDeadInsts.pop_back_val();
116  ++NumFastOther;
117 
118  // This instruction is dead, zap it, in stages. Start by removing it from
119  // MemDep, which needs to know the operands and needs it to be in the
120  // function.
121  MD.removeInstruction(DeadInst);
122 
123  for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
124  Value *Op = DeadInst->getOperand(op);
125  DeadInst->setOperand(op, nullptr);
126 
127  // If this operand just became dead, add it to the NowDeadInsts list.
128  if (!Op->use_empty()) continue;
129 
130  if (Instruction *OpI = dyn_cast<Instruction>(Op))
131  if (isInstructionTriviallyDead(OpI, &TLI))
132  NowDeadInsts.push_back(OpI);
133  }
134 
135  if (ValueSet) ValueSet->remove(DeadInst);
136  InstrOrdering->erase(DeadInst);
137  IOL.erase(DeadInst);
138 
139  if (NewIter == DeadInst->getIterator())
140  NewIter = DeadInst->eraseFromParent();
141  else
142  DeadInst->eraseFromParent();
143  } while (!NowDeadInsts.empty());
144  *BBI = NewIter;
145 }
146 
147 /// Does this instruction write some memory? This only returns true for things
148 /// that we can analyze with other helpers below.
149 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
150  if (isa<StoreInst>(I))
151  return true;
152  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
153  switch (II->getIntrinsicID()) {
154  default:
155  return false;
156  case Intrinsic::memset:
157  case Intrinsic::memmove:
158  case Intrinsic::memcpy:
159  case Intrinsic::init_trampoline:
160  case Intrinsic::lifetime_end:
161  return true;
162  }
163  }
164  if (auto CS = CallSite(I)) {
165  if (Function *F = CS.getCalledFunction()) {
166  StringRef FnName = F->getName();
167  if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
168  return true;
169  if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
170  return true;
171  if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
172  return true;
173  if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
174  return true;
175  }
176  }
177  return false;
178 }
179 
180 /// Return a Location stored to by the specified instruction. If isRemovable
181 /// returns true, this function and getLocForRead completely describe the memory
182 /// operations for this instruction.
184  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
185  return MemoryLocation::get(SI);
186 
187  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
188  // memcpy/memmove/memset.
190  return Loc;
191  }
192 
193  IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
194  if (!II)
195  return MemoryLocation();
196 
197  switch (II->getIntrinsicID()) {
198  default:
199  return MemoryLocation(); // Unhandled intrinsic.
200  case Intrinsic::init_trampoline:
201  // FIXME: We don't know the size of the trampoline, so we can't really
202  // handle it here.
203  return MemoryLocation(II->getArgOperand(0));
204  case Intrinsic::lifetime_end: {
205  uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
206  return MemoryLocation(II->getArgOperand(1), Len);
207  }
208  }
209 }
210 
211 /// Return the location read by the specified "hasMemoryWrite" instruction if
212 /// any.
214  const TargetLibraryInfo &TLI) {
215  assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
216 
217  // The only instructions that both read and write are the mem transfer
218  // instructions (memcpy/memmove).
219  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
220  return MemoryLocation::getForSource(MTI);
221  return MemoryLocation();
222 }
223 
224 /// If the value of this instruction and the memory it writes to is unused, may
225 /// we delete this instruction?
226 static bool isRemovable(Instruction *I) {
227  // Don't remove volatile/atomic stores.
228  if (StoreInst *SI = dyn_cast<StoreInst>(I))
229  return SI->isUnordered();
230 
231  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
232  switch (II->getIntrinsicID()) {
233  default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
234  case Intrinsic::lifetime_end:
235  // Never remove dead lifetime_end's, e.g. because it is followed by a
236  // free.
237  return false;
238  case Intrinsic::init_trampoline:
239  // Always safe to remove init_trampoline.
240  return true;
241  case Intrinsic::memset:
242  case Intrinsic::memmove:
243  case Intrinsic::memcpy:
244  // Don't remove volatile memory intrinsics.
245  return !cast<MemIntrinsic>(II)->isVolatile();
246  }
247  }
248 
249  if (auto CS = CallSite(I))
250  return CS.getInstruction()->use_empty();
251 
252  return false;
253 }
254 
255 /// Returns true if the end of this instruction can be safely shortened in
256 /// length.
258  // Don't shorten stores for now
259  if (isa<StoreInst>(I))
260  return false;
261 
262  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
263  switch (II->getIntrinsicID()) {
264  default: return false;
265  case Intrinsic::memset:
266  case Intrinsic::memcpy:
267  // Do shorten memory intrinsics.
268  // FIXME: Add memmove if it's also safe to transform.
269  return true;
270  }
271  }
272 
273  // Don't shorten libcalls calls for now.
274 
275  return false;
276 }
277 
278 /// Returns true if the beginning of this instruction can be safely shortened
279 /// in length.
281  // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
282  // easily done by offsetting the source address.
284  return II && II->getIntrinsicID() == Intrinsic::memset;
285 }
286 
287 /// Return the pointer that is being written to.
289  if (StoreInst *SI = dyn_cast<StoreInst>(I))
290  return SI->getPointerOperand();
291  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
292  return MI->getDest();
293 
294  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
295  switch (II->getIntrinsicID()) {
296  default: llvm_unreachable("Unexpected intrinsic!");
297  case Intrinsic::init_trampoline:
298  return II->getArgOperand(0);
299  }
300  }
301 
302  CallSite CS(I);
303  // All the supported functions so far happen to have dest as their first
304  // argument.
305  return CS.getArgument(0);
306 }
307 
308 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
309  const TargetLibraryInfo &TLI) {
310  uint64_t Size;
311  if (getObjectSize(V, Size, DL, &TLI))
312  return Size;
314 }
315 
316 namespace {
317 
319  OW_Begin,
320  OW_Complete,
321  OW_End,
322  OW_PartialEarlierWithFullLater,
323  OW_Unknown
324 };
325 
326 } // end anonymous namespace
327 
328 /// Return 'OW_Complete' if a store to the 'Later' location completely
329 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
330 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
331 /// beginning of the 'Earlier' location is overwritten by 'Later'.
332 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
333 /// overwritten by a latter (smaller) store which doesn't write outside the big
334 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
336  const MemoryLocation &Earlier,
337  const DataLayout &DL,
338  const TargetLibraryInfo &TLI,
339  int64_t &EarlierOff, int64_t &LaterOff,
340  Instruction *DepWrite,
341  InstOverlapIntervalsTy &IOL) {
342  // If we don't know the sizes of either access, then we can't do a comparison.
343  if (Later.Size == MemoryLocation::UnknownSize ||
345  return OW_Unknown;
346 
347  const Value *P1 = Earlier.Ptr->stripPointerCasts();
348  const Value *P2 = Later.Ptr->stripPointerCasts();
349 
350  // If the start pointers are the same, we just have to compare sizes to see if
351  // the later store was larger than the earlier store.
352  if (P1 == P2) {
353  // Make sure that the Later size is >= the Earlier size.
354  if (Later.Size >= Earlier.Size)
355  return OW_Complete;
356  }
357 
358  // Check to see if the later store is to the entire object (either a global,
359  // an alloca, or a byval/inalloca argument). If so, then it clearly
360  // overwrites any other store to the same object.
361  const Value *UO1 = GetUnderlyingObject(P1, DL),
362  *UO2 = GetUnderlyingObject(P2, DL);
363 
364  // If we can't resolve the same pointers to the same object, then we can't
365  // analyze them at all.
366  if (UO1 != UO2)
367  return OW_Unknown;
368 
369  // If the "Later" store is to a recognizable object, get its size.
370  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
371  if (ObjectSize != MemoryLocation::UnknownSize)
372  if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
373  return OW_Complete;
374 
375  // Okay, we have stores to two completely different pointers. Try to
376  // decompose the pointer into a "base + constant_offset" form. If the base
377  // pointers are equal, then we can reason about the two stores.
378  EarlierOff = 0;
379  LaterOff = 0;
380  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
381  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
382 
383  // If the base pointers still differ, we have two completely different stores.
384  if (BP1 != BP2)
385  return OW_Unknown;
386 
387  // The later store completely overlaps the earlier store if:
388  //
389  // 1. Both start at the same offset and the later one's size is greater than
390  // or equal to the earlier one's, or
391  //
392  // |--earlier--|
393  // |-- later --|
394  //
395  // 2. The earlier store has an offset greater than the later offset, but which
396  // still lies completely within the later store.
397  //
398  // |--earlier--|
399  // |----- later ------|
400  //
401  // We have to be careful here as *Off is signed while *.Size is unsigned.
402  if (EarlierOff >= LaterOff &&
403  Later.Size >= Earlier.Size &&
404  uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
405  return OW_Complete;
406 
407  // We may now overlap, although the overlap is not complete. There might also
408  // be other incomplete overlaps, and together, they might cover the complete
409  // earlier write.
410  // Note: The correctness of this logic depends on the fact that this function
411  // is not even called providing DepWrite when there are any intervening reads.
413  LaterOff < int64_t(EarlierOff + Earlier.Size) &&
414  int64_t(LaterOff + Later.Size) >= EarlierOff) {
415 
416  // Insert our part of the overlap into the map.
417  auto &IM = IOL[DepWrite];
418  DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
419  int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
420  LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
421 
422  // Make sure that we only insert non-overlapping intervals and combine
423  // adjacent intervals. The intervals are stored in the map with the ending
424  // offset as the key (in the half-open sense) and the starting offset as
425  // the value.
426  int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
427 
428  // Find any intervals ending at, or after, LaterIntStart which start
429  // before LaterIntEnd.
430  auto ILI = IM.lower_bound(LaterIntStart);
431  if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
432  // This existing interval is overlapped with the current store somewhere
433  // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
434  // intervals and adjusting our start and end.
435  LaterIntStart = std::min(LaterIntStart, ILI->second);
436  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
437  ILI = IM.erase(ILI);
438 
439  // Continue erasing and adjusting our end in case other previous
440  // intervals are also overlapped with the current store.
441  //
442  // |--- ealier 1 ---| |--- ealier 2 ---|
443  // |------- later---------|
444  //
445  while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
446  assert(ILI->second > LaterIntStart && "Unexpected interval");
447  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
448  ILI = IM.erase(ILI);
449  }
450  }
451 
452  IM[LaterIntEnd] = LaterIntStart;
453 
454  ILI = IM.begin();
455  if (ILI->second <= EarlierOff &&
456  ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
457  DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
458  EarlierOff << ", " <<
459  int64_t(EarlierOff + Earlier.Size) <<
460  ") Composite Later [" <<
461  ILI->second << ", " << ILI->first << ")\n");
462  ++NumCompletePartials;
463  return OW_Complete;
464  }
465  }
466 
467  // Check for an earlier store which writes to all the memory locations that
468  // the later store writes to.
469  if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
470  int64_t(EarlierOff + Earlier.Size) > LaterOff &&
471  uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
472  DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff
473  << ", " << int64_t(EarlierOff + Earlier.Size)
474  << ") by a later store [" << LaterOff << ", "
475  << int64_t(LaterOff + Later.Size) << ")\n");
476  // TODO: Maybe come up with a better name?
477  return OW_PartialEarlierWithFullLater;
478  }
479 
480  // Another interesting case is if the later store overwrites the end of the
481  // earlier store.
482  //
483  // |--earlier--|
484  // |-- later --|
485  //
486  // In this case we may want to trim the size of earlier to avoid generating
487  // writes to addresses which will definitely be overwritten later
489  (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
490  int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
491  return OW_End;
492 
493  // Finally, we also need to check if the later store overwrites the beginning
494  // of the earlier store.
495  //
496  // |--earlier--|
497  // |-- later --|
498  //
499  // In this case we may want to move the destination address and trim the size
500  // of earlier to avoid generating writes to addresses which will definitely
501  // be overwritten later.
503  (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
504  assert(int64_t(LaterOff + Later.Size) <
505  int64_t(EarlierOff + Earlier.Size) &&
506  "Expect to be handled as OW_Complete");
507  return OW_Begin;
508  }
509  // Otherwise, they don't completely overlap.
510  return OW_Unknown;
511 }
512 
513 /// If 'Inst' might be a self read (i.e. a noop copy of a
514 /// memory region into an identical pointer) then it doesn't actually make its
515 /// input dead in the traditional sense. Consider this case:
516 ///
517 /// memcpy(A <- B)
518 /// memcpy(A <- A)
519 ///
520 /// In this case, the second store to A does not make the first store to A dead.
521 /// The usual situation isn't an explicit A<-A store like this (which can be
522 /// trivially removed) but a case where two pointers may alias.
523 ///
524 /// This function detects when it is unsafe to remove a dependent instruction
525 /// because the DSE inducing instruction may be a self-read.
526 static bool isPossibleSelfRead(Instruction *Inst,
527  const MemoryLocation &InstStoreLoc,
528  Instruction *DepWrite,
529  const TargetLibraryInfo &TLI,
530  AliasAnalysis &AA) {
531  // Self reads can only happen for instructions that read memory. Get the
532  // location read.
533  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
534  if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
535 
536  // If the read and written loc obviously don't alias, it isn't a read.
537  if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
538 
539  // Okay, 'Inst' may copy over itself. However, we can still remove a the
540  // DepWrite instruction if we can prove that it reads from the same location
541  // as Inst. This handles useful cases like:
542  // memcpy(A <- B)
543  // memcpy(A <- B)
544  // Here we don't know if A/B may alias, but we do know that B/B are must
545  // aliases, so removing the first memcpy is safe (assuming it writes <= #
546  // bytes as the second one.
547  MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
548 
549  if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
550  return false;
551 
552  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
553  // then it can't be considered dead.
554  return true;
555 }
556 
557 /// Returns true if the memory which is accessed by the second instruction is not
558 /// modified between the first and the second instruction.
559 /// Precondition: Second instruction must be dominated by the first
560 /// instruction.
562  Instruction *SecondI,
563  AliasAnalysis *AA) {
566  BasicBlock::iterator FirstBBI(FirstI);
567  ++FirstBBI;
568  BasicBlock::iterator SecondBBI(SecondI);
569  BasicBlock *FirstBB = FirstI->getParent();
570  BasicBlock *SecondBB = SecondI->getParent();
571  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
572 
573  // Start checking the store-block.
574  WorkList.push_back(SecondBB);
575  bool isFirstBlock = true;
576 
577  // Check all blocks going backward until we reach the load-block.
578  while (!WorkList.empty()) {
579  BasicBlock *B = WorkList.pop_back_val();
580 
581  // Ignore instructions before LI if this is the FirstBB.
582  BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
583 
585  if (isFirstBlock) {
586  // Ignore instructions after SI if this is the first visit of SecondBB.
587  assert(B == SecondBB && "first block is not the store block");
588  EI = SecondBBI;
589  isFirstBlock = false;
590  } else {
591  // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
592  // In this case we also have to look at instructions after SI.
593  EI = B->end();
594  }
595  for (; BI != EI; ++BI) {
596  Instruction *I = &*BI;
597  if (I->mayWriteToMemory() && I != SecondI) {
598  auto Res = AA->getModRefInfo(I, MemLoc);
599  if (Res & MRI_Mod)
600  return false;
601  }
602  }
603  if (B != FirstBB) {
604  assert(B != &FirstBB->getParent()->getEntryBlock() &&
605  "Should not hit the entry block because SI must be dominated by LI");
606  for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
607  if (!Visited.insert(*PredI).second)
608  continue;
609  WorkList.push_back(*PredI);
610  }
611  }
612  }
613  return true;
614 }
615 
616 /// Find all blocks that will unconditionally lead to the block BB and append
617 /// them to F.
619  BasicBlock *BB, DominatorTree *DT) {
620  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
621  BasicBlock *Pred = *I;
622  if (Pred == BB) continue;
623  TerminatorInst *PredTI = Pred->getTerminator();
624  if (PredTI->getNumSuccessors() != 1)
625  continue;
626 
627  if (DT->isReachableFromEntry(Pred))
628  Blocks.push_back(Pred);
629  }
630 }
631 
632 /// Handle frees of entire structures whose dependency is a store
633 /// to a field of that structure.
634 static bool handleFree(CallInst *F, AliasAnalysis *AA,
636  const TargetLibraryInfo *TLI,
638  DenseMap<Instruction*, size_t> *InstrOrdering) {
639  bool MadeChange = false;
640 
643  Blocks.push_back(F->getParent());
644  const DataLayout &DL = F->getModule()->getDataLayout();
645 
646  while (!Blocks.empty()) {
647  BasicBlock *BB = Blocks.pop_back_val();
648  Instruction *InstPt = BB->getTerminator();
649  if (BB == F->getParent()) InstPt = F;
650 
651  MemDepResult Dep =
652  MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
653  while (Dep.isDef() || Dep.isClobber()) {
654  Instruction *Dependency = Dep.getInst();
655  if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
656  break;
657 
658  Value *DepPointer =
660 
661  // Check for aliasing.
662  if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
663  break;
664 
665  DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
666  << *Dependency << '\n');
667 
668  // DCE instructions only used to calculate that store.
669  BasicBlock::iterator BBI(Dependency);
670  deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
671  ++NumFastStores;
672  MadeChange = true;
673 
674  // Inst's old Dependency is now deleted. Compute the next dependency,
675  // which may also be dead, as in
676  // s[0] = 0;
677  // s[1] = 0; // This has just been deleted.
678  // free(s);
679  Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
680  }
681 
682  if (Dep.isNonLocal())
683  findUnconditionalPreds(Blocks, BB, DT);
684  }
685 
686  return MadeChange;
687 }
688 
689 /// Check to see if the specified location may alias any of the stack objects in
690 /// the DeadStackObjects set. If so, they become live because the location is
691 /// being loaded.
692 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
693  SmallSetVector<Value *, 16> &DeadStackObjects,
694  const DataLayout &DL, AliasAnalysis *AA,
695  const TargetLibraryInfo *TLI) {
696  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
697 
698  // A constant can't be in the dead pointer set.
699  if (isa<Constant>(UnderlyingPointer))
700  return;
701 
702  // If the kill pointer can be easily reduced to an alloca, don't bother doing
703  // extraneous AA queries.
704  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
705  DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
706  return;
707  }
708 
709  // Remove objects that could alias LoadedLoc.
710  DeadStackObjects.remove_if([&](Value *I) {
711  // See if the loaded location could alias the stack location.
712  MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
713  return !AA->isNoAlias(StackLoc, LoadedLoc);
714  });
715 }
716 
717 /// Remove dead stores to stack-allocated locations in the function end block.
718 /// Ex:
719 /// %A = alloca i32
720 /// ...
721 /// store i32 1, i32* %A
722 /// ret void
725  const TargetLibraryInfo *TLI,
727  DenseMap<Instruction*, size_t> *InstrOrdering) {
728  bool MadeChange = false;
729 
730  // Keep track of all of the stack objects that are dead at the end of the
731  // function.
732  SmallSetVector<Value*, 16> DeadStackObjects;
733 
734  // Find all of the alloca'd pointers in the entry block.
735  BasicBlock &Entry = BB.getParent()->front();
736  for (Instruction &I : Entry) {
737  if (isa<AllocaInst>(&I))
738  DeadStackObjects.insert(&I);
739 
740  // Okay, so these are dead heap objects, but if the pointer never escapes
741  // then it's leaked by this function anyways.
742  else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
743  DeadStackObjects.insert(&I);
744  }
745 
746  // Treat byval or inalloca arguments the same, stores to them are dead at the
747  // end of the function.
748  for (Argument &AI : BB.getParent()->args())
749  if (AI.hasByValOrInAllocaAttr())
750  DeadStackObjects.insert(&AI);
751 
752  const DataLayout &DL = BB.getModule()->getDataLayout();
753 
754  // Scan the basic block backwards
755  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
756  --BBI;
757 
758  // If we find a store, check to see if it points into a dead stack value.
759  if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
760  // See through pointer-to-pointer bitcasts
761  SmallVector<Value *, 4> Pointers;
762  GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
763 
764  // Stores to stack values are valid candidates for removal.
765  bool AllDead = true;
766  for (Value *Pointer : Pointers)
767  if (!DeadStackObjects.count(Pointer)) {
768  AllDead = false;
769  break;
770  }
771 
772  if (AllDead) {
773  Instruction *Dead = &*BBI;
774 
775  DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
776  << *Dead << "\n Objects: ";
777  for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
778  E = Pointers.end(); I != E; ++I) {
779  dbgs() << **I;
780  if (std::next(I) != E)
781  dbgs() << ", ";
782  }
783  dbgs() << '\n');
784 
785  // DCE instructions only used to calculate that store.
786  deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
787  ++NumFastStores;
788  MadeChange = true;
789  continue;
790  }
791  }
792 
793  // Remove any dead non-memory-mutating instructions.
794  if (isInstructionTriviallyDead(&*BBI, TLI)) {
795  DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
796  << *&*BBI << '\n');
797  deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
798  ++NumFastOther;
799  MadeChange = true;
800  continue;
801  }
802 
803  if (isa<AllocaInst>(BBI)) {
804  // Remove allocas from the list of dead stack objects; there can't be
805  // any references before the definition.
806  DeadStackObjects.remove(&*BBI);
807  continue;
808  }
809 
810  if (auto CS = CallSite(&*BBI)) {
811  // Remove allocation function calls from the list of dead stack objects;
812  // there can't be any references before the definition.
813  if (isAllocLikeFn(&*BBI, TLI))
814  DeadStackObjects.remove(&*BBI);
815 
816  // If this call does not access memory, it can't be loading any of our
817  // pointers.
818  if (AA->doesNotAccessMemory(CS))
819  continue;
820 
821  // If the call might load from any of our allocas, then any store above
822  // the call is live.
823  DeadStackObjects.remove_if([&](Value *I) {
824  // See if the call site touches the value.
825  ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
826 
827  return A == MRI_ModRef || A == MRI_Ref;
828  });
829 
830  // If all of the allocas were clobbered by the call then we're not going
831  // to find anything else to process.
832  if (DeadStackObjects.empty())
833  break;
834 
835  continue;
836  }
837 
838  // We can remove the dead stores, irrespective of the fence and its ordering
839  // (release/acquire/seq_cst). Fences only constraints the ordering of
840  // already visible stores, it does not make a store visible to other
841  // threads. So, skipping over a fence does not change a store from being
842  // dead.
843  if (isa<FenceInst>(*BBI))
844  continue;
845 
846  MemoryLocation LoadedLoc;
847 
848  // If we encounter a use of the pointer, it is no longer considered dead
849  if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
850  if (!L->isUnordered()) // Be conservative with atomic/volatile load
851  break;
852  LoadedLoc = MemoryLocation::get(L);
853  } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
854  LoadedLoc = MemoryLocation::get(V);
855  } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
856  LoadedLoc = MemoryLocation::getForSource(MTI);
857  } else if (!BBI->mayReadFromMemory()) {
858  // Instruction doesn't read memory. Note that stores that weren't removed
859  // above will hit this case.
860  continue;
861  } else {
862  // Unknown inst; assume it clobbers everything.
863  break;
864  }
865 
866  // Remove any allocas from the DeadPointer set that are loaded, as this
867  // makes any stores above the access live.
868  removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI);
869 
870  // If all of the allocas were clobbered by the access then we're not going
871  // to find anything else to process.
872  if (DeadStackObjects.empty())
873  break;
874  }
875 
876  return MadeChange;
877 }
878 
879 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
880  int64_t &EarlierSize, int64_t LaterOffset,
881  int64_t LaterSize, bool IsOverwriteEnd) {
882  // TODO: base this on the target vector size so that if the earlier
883  // store was too small to get vector writes anyway then its likely
884  // a good idea to shorten it
885  // Power of 2 vector writes are probably always a bad idea to optimize
886  // as any store/memset/memcpy is likely using vector instructions so
887  // shortening it to not vector size is likely to be slower
888  MemIntrinsic *EarlierIntrinsic = cast<MemIntrinsic>(EarlierWrite);
889  unsigned EarlierWriteAlign = EarlierIntrinsic->getAlignment();
890  if (!IsOverwriteEnd)
891  LaterOffset = int64_t(LaterOffset + LaterSize);
892 
893  if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
894  !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
895  return false;
896 
897  DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
898  << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
899  << "\n KILLER (offset " << LaterOffset << ", " << EarlierSize
900  << ")\n");
901 
902  int64_t NewLength = IsOverwriteEnd
903  ? LaterOffset - EarlierOffset
904  : EarlierSize - (LaterOffset - EarlierOffset);
905 
906  Value *EarlierWriteLength = EarlierIntrinsic->getLength();
907  Value *TrimmedLength =
908  ConstantInt::get(EarlierWriteLength->getType(), NewLength);
909  EarlierIntrinsic->setLength(TrimmedLength);
910 
911  EarlierSize = NewLength;
912  if (!IsOverwriteEnd) {
913  int64_t OffsetMoved = (LaterOffset - EarlierOffset);
914  Value *Indices[1] = {
915  ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
917  EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
918  EarlierIntrinsic->setDest(NewDestGEP);
919  EarlierOffset = EarlierOffset + OffsetMoved;
920  }
921  return true;
922 }
923 
924 static bool tryToShortenEnd(Instruction *EarlierWrite,
926  int64_t &EarlierStart, int64_t &EarlierSize) {
927  if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
928  return false;
929 
930  OverlapIntervalsTy::iterator OII = --IntervalMap.end();
931  int64_t LaterStart = OII->second;
932  int64_t LaterSize = OII->first - LaterStart;
933 
934  if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
935  LaterStart + LaterSize >= EarlierStart + EarlierSize) {
936  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
937  LaterSize, true)) {
938  IntervalMap.erase(OII);
939  return true;
940  }
941  }
942  return false;
943 }
944 
945 static bool tryToShortenBegin(Instruction *EarlierWrite,
947  int64_t &EarlierStart, int64_t &EarlierSize) {
948  if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
949  return false;
950 
951  OverlapIntervalsTy::iterator OII = IntervalMap.begin();
952  int64_t LaterStart = OII->second;
953  int64_t LaterSize = OII->first - LaterStart;
954 
955  if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
956  assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
957  "Should have been handled as OW_Complete");
958  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
959  LaterSize, false)) {
960  IntervalMap.erase(OII);
961  return true;
962  }
963  }
964  return false;
965 }
966 
968  const DataLayout &DL,
969  InstOverlapIntervalsTy &IOL) {
970  bool Changed = false;
971  for (auto OI : IOL) {
972  Instruction *EarlierWrite = OI.first;
973  MemoryLocation Loc = getLocForWrite(EarlierWrite, *AA);
974  assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
975  assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
976 
977  const Value *Ptr = Loc.Ptr->stripPointerCasts();
978  int64_t EarlierStart = 0;
979  int64_t EarlierSize = int64_t(Loc.Size);
980  GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
981  OverlapIntervalsTy &IntervalMap = OI.second;
982  Changed |=
983  tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
984  if (IntervalMap.empty())
985  continue;
986  Changed |=
987  tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
988  }
989  return Changed;
990 }
991 
994  const DataLayout &DL,
995  const TargetLibraryInfo *TLI,
997  DenseMap<Instruction*, size_t> *InstrOrdering) {
998  // Must be a store instruction.
999  StoreInst *SI = dyn_cast<StoreInst>(Inst);
1000  if (!SI)
1001  return false;
1002 
1003  // If we're storing the same value back to a pointer that we just loaded from,
1004  // then the store can be removed.
1005  if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1006  if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1007  isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1008 
1009  DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1010  << *DepLoad << "\n STORE: " << *SI << '\n');
1011 
1012  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1013  ++NumRedundantStores;
1014  return true;
1015  }
1016  }
1017 
1018  // Remove null stores into the calloc'ed objects
1019  Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1020  if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1021  Instruction *UnderlyingPointer =
1023 
1024  if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1025  memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1026  DEBUG(
1027  dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1028  << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1029 
1030  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1031  ++NumRedundantStores;
1032  return true;
1033  }
1034  }
1035  return false;
1036 }
1037 
1040  const TargetLibraryInfo *TLI) {
1041  const DataLayout &DL = BB.getModule()->getDataLayout();
1042  bool MadeChange = false;
1043 
1044  // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1045  // The current OrderedBasicBlock can't deal with mutation at the moment.
1046  size_t LastThrowingInstIndex = 0;
1047  DenseMap<Instruction*, size_t> InstrOrdering;
1048  size_t InstrIndex = 1;
1049 
1050  // A map of interval maps representing partially-overwritten value parts.
1052 
1053  // Do a top-down walk on the BB.
1054  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1055  // Handle 'free' calls specially.
1056  if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1057  MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1058  // Increment BBI after handleFree has potentially deleted instructions.
1059  // This ensures we maintain a valid iterator.
1060  ++BBI;
1061  continue;
1062  }
1063 
1064  Instruction *Inst = &*BBI++;
1065 
1066  size_t CurInstNumber = InstrIndex++;
1067  InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1068  if (Inst->mayThrow()) {
1069  LastThrowingInstIndex = CurInstNumber;
1070  continue;
1071  }
1072 
1073  // Check to see if Inst writes to memory. If not, continue.
1074  if (!hasMemoryWrite(Inst, *TLI))
1075  continue;
1076 
1077  // eliminateNoopStore will update in iterator, if necessary.
1078  if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1079  MadeChange = true;
1080  continue;
1081  }
1082 
1083  // If we find something that writes memory, get its memory dependence.
1084  MemDepResult InstDep = MD->getDependency(Inst);
1085 
1086  // Ignore any store where we can't find a local dependence.
1087  // FIXME: cross-block DSE would be fun. :)
1088  if (!InstDep.isDef() && !InstDep.isClobber())
1089  continue;
1090 
1091  // Figure out what location is being stored to.
1092  MemoryLocation Loc = getLocForWrite(Inst, *AA);
1093 
1094  // If we didn't get a useful location, fail.
1095  if (!Loc.Ptr)
1096  continue;
1097 
1098  // Loop until we find a store we can eliminate or a load that
1099  // invalidates the analysis. Without an upper bound on the number of
1100  // instructions examined, this analysis can become very time-consuming.
1101  // However, the potential gain diminishes as we process more instructions
1102  // without eliminating any of them. Therefore, we limit the number of
1103  // instructions we look at.
1104  auto Limit = MD->getDefaultBlockScanLimit();
1105  while (InstDep.isDef() || InstDep.isClobber()) {
1106  // Get the memory clobbered by the instruction we depend on. MemDep will
1107  // skip any instructions that 'Loc' clearly doesn't interact with. If we
1108  // end up depending on a may- or must-aliased load, then we can't optimize
1109  // away the store and we bail out. However, if we depend on something
1110  // that overwrites the memory location we *can* potentially optimize it.
1111  //
1112  // Find out what memory location the dependent instruction stores.
1113  Instruction *DepWrite = InstDep.getInst();
1114  MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
1115  // If we didn't get a useful location, or if it isn't a size, bail out.
1116  if (!DepLoc.Ptr)
1117  break;
1118 
1119  // Make sure we don't look past a call which might throw. This is an
1120  // issue because MemoryDependenceAnalysis works in the wrong direction:
1121  // it finds instructions which dominate the current instruction, rather than
1122  // instructions which are post-dominated by the current instruction.
1123  //
1124  // If the underlying object is a non-escaping memory allocation, any store
1125  // to it is dead along the unwind edge. Otherwise, we need to preserve
1126  // the store.
1127  size_t DepIndex = InstrOrdering.lookup(DepWrite);
1128  assert(DepIndex && "Unexpected instruction");
1129  if (DepIndex <= LastThrowingInstIndex) {
1130  const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1131  bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1132  if (!IsStoreDeadOnUnwind) {
1133  // We're looking for a call to an allocation function
1134  // where the allocation doesn't escape before the last
1135  // throwing instruction; PointerMayBeCaptured
1136  // reasonably fast approximation.
1137  IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1138  !PointerMayBeCaptured(Underlying, false, true);
1139  }
1140  if (!IsStoreDeadOnUnwind)
1141  break;
1142  }
1143 
1144  // If we find a write that is a) removable (i.e., non-volatile), b) is
1145  // completely obliterated by the store to 'Loc', and c) which we know that
1146  // 'Inst' doesn't load from, then we can remove it.
1147  // Also try to merge two stores if a later one only touches memory written
1148  // to by the earlier one.
1149  if (isRemovable(DepWrite) &&
1150  !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1151  int64_t InstWriteOffset, DepWriteOffset;
1153  isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset,
1154  DepWrite, IOL);
1155  if (OR == OW_Complete) {
1156  DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
1157  << *DepWrite << "\n KILLER: " << *Inst << '\n');
1158 
1159  // Delete the store and now-dead instructions that feed it.
1160  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1161  ++NumFastStores;
1162  MadeChange = true;
1163 
1164  // We erased DepWrite; start over.
1165  InstDep = MD->getDependency(Inst);
1166  continue;
1167  } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1168  ((OR == OW_Begin &&
1169  isShortenableAtTheBeginning(DepWrite)))) {
1170  assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1171  "when partial-overwrite "
1172  "tracking is enabled");
1173  int64_t EarlierSize = DepLoc.Size;
1174  int64_t LaterSize = Loc.Size;
1175  bool IsOverwriteEnd = (OR == OW_End);
1176  MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1177  InstWriteOffset, LaterSize, IsOverwriteEnd);
1178  } else if (EnablePartialStoreMerging &&
1179  OR == OW_PartialEarlierWithFullLater) {
1180  auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1181  auto *Later = dyn_cast<StoreInst>(Inst);
1182  if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1183  Later && isa<ConstantInt>(Later->getValueOperand())) {
1184  // If the store we find is:
1185  // a) partially overwritten by the store to 'Loc'
1186  // b) the later store is fully contained in the earlier one and
1187  // c) they both have a constant value
1188  // Merge the two stores, replacing the earlier store's value with a
1189  // merge of both values.
1190  // TODO: Deal with other constant types (vectors, etc), and probably
1191  // some mem intrinsics (if needed)
1192 
1193  APInt EarlierValue =
1194  cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1195  APInt LaterValue =
1196  cast<ConstantInt>(Later->getValueOperand())->getValue();
1197  unsigned LaterBits = LaterValue.getBitWidth();
1198  assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1199  LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1200 
1201  // Offset of the smaller store inside the larger store
1202  unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1203  unsigned LShiftAmount =
1204  DL.isBigEndian()
1205  ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1206  : BitOffsetDiff;
1207  APInt Mask =
1208  APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1209  LShiftAmount + LaterBits);
1210  // Clear the bits we'll be replacing, then OR with the smaller
1211  // store, shifted appropriately.
1212  APInt Merged =
1213  (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1214  DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
1215  << "\n Later: " << *Inst
1216  << "\n Merged Value: " << Merged << '\n');
1217 
1218  auto *SI = new StoreInst(
1219  ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1220  Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1221  Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1222 
1223  unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1227  SI->copyMetadata(*DepWrite, MDToKeep);
1228  ++NumModifiedStores;
1229 
1230  // Remove earlier, wider, store
1231  size_t Idx = InstrOrdering.lookup(DepWrite);
1232  InstrOrdering.erase(DepWrite);
1233  InstrOrdering.insert(std::make_pair(SI, Idx));
1234 
1235  // Delete the old stores and now-dead instructions that feed them.
1236  deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1237  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1238  &InstrOrdering);
1239  MadeChange = true;
1240 
1241  // We erased DepWrite and Inst (Loc); start over.
1242  break;
1243  }
1244  }
1245  }
1246 
1247  // If this is a may-aliased store that is clobbering the store value, we
1248  // can keep searching past it for another must-aliased pointer that stores
1249  // to the same location. For example, in:
1250  // store -> P
1251  // store -> Q
1252  // store -> P
1253  // we can remove the first store to P even though we don't know if P and Q
1254  // alias.
1255  if (DepWrite == &BB.front()) break;
1256 
1257  // Can't look past this instruction if it might read 'Loc'.
1258  if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
1259  break;
1260 
1261  InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1262  DepWrite->getIterator(), &BB,
1263  /*QueryInst=*/ nullptr, &Limit);
1264  }
1265  }
1266 
1268  MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1269 
1270  // If this block ends in a return, unwind, or unreachable, all allocas are
1271  // dead at its end, which means stores to them are also dead.
1272  if (BB.getTerminator()->getNumSuccessors() == 0)
1273  MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1274 
1275  return MadeChange;
1276 }
1277 
1280  const TargetLibraryInfo *TLI) {
1281  bool MadeChange = false;
1282  for (BasicBlock &BB : F)
1283  // Only check non-dead blocks. Dead blocks may have strange pointer
1284  // cycles that will confuse alias analysis.
1285  if (DT->isReachableFromEntry(&BB))
1286  MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1287 
1288  return MadeChange;
1289 }
1290 
1291 //===----------------------------------------------------------------------===//
1292 // DSE Pass
1293 //===----------------------------------------------------------------------===//
1295  AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1299 
1300  if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1301  return PreservedAnalyses::all();
1302 
1303  PreservedAnalyses PA;
1304  PA.preserveSet<CFGAnalyses>();
1305  PA.preserve<GlobalsAA>();
1307  return PA;
1308 }
1309 
1310 namespace {
1311 
1312 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1313 class DSELegacyPass : public FunctionPass {
1314 public:
1315  static char ID; // Pass identification, replacement for typeid
1316 
1317  DSELegacyPass() : FunctionPass(ID) {
1319  }
1320 
1321  bool runOnFunction(Function &F) override {
1322  if (skipFunction(F))
1323  return false;
1324 
1325  DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1326  AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1328  &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1329  const TargetLibraryInfo *TLI =
1330  &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1331 
1332  return eliminateDeadStores(F, AA, MD, DT, TLI);
1333  }
1334 
1335  void getAnalysisUsage(AnalysisUsage &AU) const override {
1336  AU.setPreservesCFG();
1344  }
1345 };
1346 
1347 } // end anonymous namespace
1348 
1349 char DSELegacyPass::ID = 0;
1350 
1351 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1352  false)
1359  false)
1360 
1362  return new DSELegacyPass();
1363 }
Legacy wrapper pass to provide the GlobalsAAResult object.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Value * getValueOperand()
Definition: Instructions.h:395
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:69
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM&#39;s alias analysis passes.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA)
Return a Location stored to by the specified instruction.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
This is the interface for a simple mod/ref and alias analysis over globals.
void initializeDSELegacyPassPass(PassRegistry &)
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias. ...
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:865
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block...
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
StringRef getName(LibFunc F) const
void setDest(Value *Ptr)
Set the specified arguments of the instruction.
STATISTIC(NumFunctions, "Total number of functions")
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:238
F(f)
static bool handleFree(CallInst *F, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Handle frees of entire structures whose dependency is a store to a field of that structure.
An instruction for reading from memory.
Definition: Instructions.h:164
static bool isPossibleSelfRead(Instruction *Inst, const MemoryLocation &InstStoreLoc, Instruction *DepWrite, const TargetLibraryInfo &TLI, AliasAnalysis &AA)
If &#39;Inst&#39; might be a self read (i.e.
#define op(i)
Value * getLength() const
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:290
The access modifies the value stored in memory.
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
static void findUnconditionalPreds(SmallVectorImpl< BasicBlock *> &Blocks, BasicBlock *BB, DominatorTree *DT)
Find all blocks that will unconditionally lead to the block BB and append them to F...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
AnalysisUsage & addRequired()
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr it the function does no...
Definition: BasicBlock.cpp:116
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
static OverwriteResult isOverwrite(const MemoryLocation &Later, const MemoryLocation &Earlier, const DataLayout &DL, const TargetLibraryInfo &TLI, int64_t &EarlierOff, int64_t &LaterOff, Instruction *DepWrite, InstOverlapIntervalsTy &IOL)
Return &#39;OW_Complete&#39; if a store to the &#39;Later&#39; location completely overwrites a store to the &#39;Earlier...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Remove dead stores to stack-allocated locations in the function end block.
unsigned getDefaultBlockScanLimit() const
Some methods limit the number of instructions they will examine.
An analysis that produces MemoryDependenceResults for a function.
The access references the value stored in memory.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:158
This file implements a class to represent arbitrary precision integral constant values and operations...
static void removeAccessedObjects(const MemoryLocation &LoadedLoc, SmallSetVector< Value *, 16 > &DeadStackObjects, const DataLayout &DL, AliasAnalysis *AA, const TargetLibraryInfo *TLI)
Check to see if the specified location may alias any of the stack objects in the DeadStackObjects set...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:86
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
static Value * getStoredPointerOperand(Instruction *I)
Return the pointer that is being written to.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
void setLength(Value *L)
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
bool has(LibFunc F) const
Tests whether a library function is available.
std::map< int64_t, int64_t > OverlapIntervalsTy
An instruction for storing to memory.
Definition: Instructions.h:306
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
Value * getOperand(unsigned i) const
Definition: User.h:154
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:211
const BasicBlock & getEntryBlock() const
Definition: Function.h:572
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
static bool runOnFunction(Function &F, bool PostInlining)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
static bool removePartiallyOverlappedStores(AliasAnalysis *AA, const DataLayout &DL, InstOverlapIntervalsTy &IOL)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:264
A manager for alias analyses.
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:31
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI)
Does this instruction write some memory? This only returns true for things that we can analyze with o...
bool mayThrow() const
Return true if this instruction may throw an exception.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
Represent the analysis usage information of a pass.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
FunctionPass * createDeadStoreEliminationPass()
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AliasAnalysis *AA)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E&#39;s underlying type.
Definition: BitmaskEnum.h:91
Analysis pass providing a never-invalidated alias analysis result.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
self_iterator getIterator()
Definition: ilist_node.h:82
INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, false) INITIALIZE_PASS_END(DSELegacyPass
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:558
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Dead Store Elimination
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A memory dependence query can return one of three different answers.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst=nullptr, unsigned *Limit=nullptr)
Returns the instruction on which a memory location depends.
static void deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI, MemoryDependenceResults &MD, const TargetLibraryInfo &TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering, SmallSetVector< Value *, 16 > *ValueSet=nullptr)
Delete this instruction.
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
bool doesNotAccessMemory(ImmutableCallSite CS)
Checks if the specified call is known to never read or write memory.
This is the common base class for memset/memcpy/memmove.
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:176
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:186
iterator end()
Definition: BasicBlock.h:254
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Module.h This file contains the declarations for the Module class.
Provides information about what library functions are available for the current target.
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:285
static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, AliasAnalysis *AA, MemoryDependenceResults *MD, const DataLayout &DL, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
void setOperand(unsigned i, Value *Val)
Definition: User.h:159
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:57
Class for arbitrary precision integers.
Definition: APInt.h:69
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
Represents analyses that only rely on functions&#39; control flow.
Definition: PassManager.h:114
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition: APInt.h:600
This class wraps the llvm.memcpy/memmove intrinsics.
static bool tryToShortenBegin(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
static MemoryLocation getLocForRead(Instruction *Inst, const TargetLibraryInfo &TLI)
Return the location read by the specified "hasMemoryWrite" instruction if any.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
This file provides utility analysis objects describing memory locations.
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:189
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
The access both references and modifies the value stored in memory.
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:73
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc...
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:174
unsigned getAlignment() const
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
Analysis pass providing the TargetLibraryInfo.
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static GetElementPtrInst * CreateInBounds(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:897
static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
Definition: Function.h:595
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:324
LLVM Value Representation.
Definition: Value.h:73
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
bool remove_if(UnaryPredicate P)
Remove items from the set vector based on a predicate function.
Definition: SetVector.h:200
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
static bool isRemovable(Instruction *I)
If the value of this instruction and the memory it writes to is unused, may we delete this instructio...
#define DEBUG(X)
Definition: Debug.h:118
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
static bool isVolatile(Instruction *Inst)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
This header defines various interfaces for pass management in LLVM.
bool isBigEndian() const
Definition: DataLayout.h:217
static uint64_t getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI)
Value * getPointerOperand()
Definition: Instructions.h:398
Value * getRawDest() const
bool use_empty() const
Definition: Value.h:328
static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset, int64_t &EarlierSize, int64_t LaterOffset, int64_t LaterSize, bool IsOverwriteEnd)
iterator_range< arg_iterator > args()
Definition: Function.h:621
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
static bool tryToShortenEnd(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.