LLVM  7.0.0svn
DeadStoreElimination.cpp
Go to the documentation of this file.
1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
12 //
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/PassManager.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/Debug.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstddef>
63 #include <cstdint>
64 #include <iterator>
65 #include <map>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "dse"
71 
72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
73 STATISTIC(NumFastStores, "Number of stores deleted");
74 STATISTIC(NumFastOther , "Number of other instrs removed");
75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
76 STATISTIC(NumModifiedStores, "Number of stores modified");
77 
78 static cl::opt<bool>
79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80  cl::init(true), cl::Hidden,
81  cl::desc("Enable partial-overwrite tracking in DSE"));
82 
83 static cl::opt<bool>
84 EnablePartialStoreMerging("enable-dse-partial-store-merging",
85  cl::init(true), cl::Hidden,
86  cl::desc("Enable partial store merging in DSE"));
87 
88 //===----------------------------------------------------------------------===//
89 // Helper functions
90 //===----------------------------------------------------------------------===//
91 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
93 
94 /// Delete this instruction. Before we do, go through and zero out all the
95 /// operands of this instruction. If any of them become dead, delete them and
96 /// the computation tree that feeds them.
97 /// If ValueSet is non-null, remove any deleted instructions from it as well.
98 static void
102  DenseMap<Instruction*, size_t> *InstrOrdering,
103  SmallSetVector<Value *, 16> *ValueSet = nullptr) {
104  SmallVector<Instruction*, 32> NowDeadInsts;
105 
106  NowDeadInsts.push_back(I);
107  --NumFastOther;
108 
109  // Keeping the iterator straight is a pain, so we let this routine tell the
110  // caller what the next instruction is after we're done mucking about.
111  BasicBlock::iterator NewIter = *BBI;
112 
113  // Before we touch this instruction, remove it from memdep!
114  do {
115  Instruction *DeadInst = NowDeadInsts.pop_back_val();
116  ++NumFastOther;
117 
118  // Try to preserve debug information attached to the dead instruction.
119  salvageDebugInfo(*DeadInst);
120 
121  // This instruction is dead, zap it, in stages. Start by removing it from
122  // MemDep, which needs to know the operands and needs it to be in the
123  // function.
124  MD.removeInstruction(DeadInst);
125 
126  for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
127  Value *Op = DeadInst->getOperand(op);
128  DeadInst->setOperand(op, nullptr);
129 
130  // If this operand just became dead, add it to the NowDeadInsts list.
131  if (!Op->use_empty()) continue;
132 
133  if (Instruction *OpI = dyn_cast<Instruction>(Op))
134  if (isInstructionTriviallyDead(OpI, &TLI))
135  NowDeadInsts.push_back(OpI);
136  }
137 
138  if (ValueSet) ValueSet->remove(DeadInst);
139  InstrOrdering->erase(DeadInst);
140  IOL.erase(DeadInst);
141 
142  if (NewIter == DeadInst->getIterator())
143  NewIter = DeadInst->eraseFromParent();
144  else
145  DeadInst->eraseFromParent();
146  } while (!NowDeadInsts.empty());
147  *BBI = NewIter;
148 }
149 
150 /// Does this instruction write some memory? This only returns true for things
151 /// that we can analyze with other helpers below.
153  const TargetLibraryInfo &TLI) {
154  if (isa<StoreInst>(I))
155  return true;
156  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
157  switch (II->getIntrinsicID()) {
158  default:
159  return false;
160  case Intrinsic::memset:
161  case Intrinsic::memmove:
162  case Intrinsic::memcpy:
163  case Intrinsic::init_trampoline:
164  case Intrinsic::lifetime_end:
165  return true;
166  }
167  }
168  if (auto CS = CallSite(I)) {
169  if (Function *F = CS.getCalledFunction()) {
170  StringRef FnName = F->getName();
171  if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
172  return true;
173  if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
174  return true;
175  if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
176  return true;
177  if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
178  return true;
179  }
180  }
181  return false;
182 }
183 
184 /// Return a Location stored to by the specified instruction. If isRemovable
185 /// returns true, this function and getLocForRead completely describe the memory
186 /// operations for this instruction.
188 
189  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
190  return MemoryLocation::get(SI);
191 
192  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
193  // memcpy/memmove/memset.
195  return Loc;
196  }
197 
198  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
199  switch (II->getIntrinsicID()) {
200  default:
201  return MemoryLocation(); // Unhandled intrinsic.
202  case Intrinsic::init_trampoline:
203  return MemoryLocation(II->getArgOperand(0));
204  case Intrinsic::lifetime_end: {
205  uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
206  return MemoryLocation(II->getArgOperand(1), Len);
207  }
208  }
209  }
210  if (auto CS = CallSite(Inst))
211  // All the supported TLI functions so far happen to have dest as their
212  // first argument.
213  return MemoryLocation(CS.getArgument(0));
214  return MemoryLocation();
215 }
216 
217 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
218 /// instruction if any.
220  const TargetLibraryInfo &TLI) {
221  assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
222 
223  // The only instructions that both read and write are the mem transfer
224  // instructions (memcpy/memmove).
225  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
226  return MemoryLocation::getForSource(MTI);
227  return MemoryLocation();
228 }
229 
230 /// If the value of this instruction and the memory it writes to is unused, may
231 /// we delete this instruction?
232 static bool isRemovable(Instruction *I) {
233  // Don't remove volatile/atomic stores.
234  if (StoreInst *SI = dyn_cast<StoreInst>(I))
235  return SI->isUnordered();
236 
237  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
238  switch (II->getIntrinsicID()) {
239  default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
240  case Intrinsic::lifetime_end:
241  // Never remove dead lifetime_end's, e.g. because it is followed by a
242  // free.
243  return false;
244  case Intrinsic::init_trampoline:
245  // Always safe to remove init_trampoline.
246  return true;
247  case Intrinsic::memset:
248  case Intrinsic::memmove:
249  case Intrinsic::memcpy:
250  // Don't remove volatile memory intrinsics.
251  return !cast<MemIntrinsic>(II)->isVolatile();
252  }
253  }
254 
255  // note: only get here for calls with analyzable writes - i.e. libcalls
256  if (auto CS = CallSite(I))
257  return CS.getInstruction()->use_empty();
258 
259  return false;
260 }
261 
262 /// Returns true if the end of this instruction can be safely shortened in
263 /// length.
265  // Don't shorten stores for now
266  if (isa<StoreInst>(I))
267  return false;
268 
269  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
270  switch (II->getIntrinsicID()) {
271  default: return false;
272  case Intrinsic::memset:
273  case Intrinsic::memcpy:
274  // Do shorten memory intrinsics.
275  // FIXME: Add memmove if it's also safe to transform.
276  return true;
277  }
278  }
279 
280  // Don't shorten libcalls calls for now.
281 
282  return false;
283 }
284 
285 /// Returns true if the beginning of this instruction can be safely shortened
286 /// in length.
288  // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
289  // easily done by offsetting the source address.
291  return II && II->getIntrinsicID() == Intrinsic::memset;
292 }
293 
294 /// Return the pointer that is being written to.
296  //TODO: factor this to reuse getLocForWrite
298  assert(Loc.Ptr &&
299  "unable to find pointer writen for analyzable instruction?");
300  // TODO: most APIs don't expect const Value *
301  return const_cast<Value*>(Loc.Ptr);
302 }
303 
304 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
305  const TargetLibraryInfo &TLI) {
306  uint64_t Size;
307  if (getObjectSize(V, Size, DL, &TLI))
308  return Size;
310 }
311 
312 namespace {
313 
315  OW_Begin,
316  OW_Complete,
317  OW_End,
318  OW_PartialEarlierWithFullLater,
319  OW_Unknown
320 };
321 
322 } // end anonymous namespace
323 
324 /// Return 'OW_Complete' if a store to the 'Later' location completely
325 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
326 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
327 /// beginning of the 'Earlier' location is overwritten by 'Later'.
328 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
329 /// overwritten by a latter (smaller) store which doesn't write outside the big
330 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
332  const MemoryLocation &Earlier,
333  const DataLayout &DL,
334  const TargetLibraryInfo &TLI,
335  int64_t &EarlierOff, int64_t &LaterOff,
336  Instruction *DepWrite,
337  InstOverlapIntervalsTy &IOL) {
338  // If we don't know the sizes of either access, then we can't do a comparison.
339  if (Later.Size == MemoryLocation::UnknownSize ||
341  return OW_Unknown;
342 
343  const Value *P1 = Earlier.Ptr->stripPointerCasts();
344  const Value *P2 = Later.Ptr->stripPointerCasts();
345 
346  // If the start pointers are the same, we just have to compare sizes to see if
347  // the later store was larger than the earlier store.
348  if (P1 == P2) {
349  // Make sure that the Later size is >= the Earlier size.
350  if (Later.Size >= Earlier.Size)
351  return OW_Complete;
352  }
353 
354  // Check to see if the later store is to the entire object (either a global,
355  // an alloca, or a byval/inalloca argument). If so, then it clearly
356  // overwrites any other store to the same object.
357  const Value *UO1 = GetUnderlyingObject(P1, DL),
358  *UO2 = GetUnderlyingObject(P2, DL);
359 
360  // If we can't resolve the same pointers to the same object, then we can't
361  // analyze them at all.
362  if (UO1 != UO2)
363  return OW_Unknown;
364 
365  // If the "Later" store is to a recognizable object, get its size.
366  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
367  if (ObjectSize != MemoryLocation::UnknownSize)
368  if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
369  return OW_Complete;
370 
371  // Okay, we have stores to two completely different pointers. Try to
372  // decompose the pointer into a "base + constant_offset" form. If the base
373  // pointers are equal, then we can reason about the two stores.
374  EarlierOff = 0;
375  LaterOff = 0;
376  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
377  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
378 
379  // If the base pointers still differ, we have two completely different stores.
380  if (BP1 != BP2)
381  return OW_Unknown;
382 
383  // The later store completely overlaps the earlier store if:
384  //
385  // 1. Both start at the same offset and the later one's size is greater than
386  // or equal to the earlier one's, or
387  //
388  // |--earlier--|
389  // |-- later --|
390  //
391  // 2. The earlier store has an offset greater than the later offset, but which
392  // still lies completely within the later store.
393  //
394  // |--earlier--|
395  // |----- later ------|
396  //
397  // We have to be careful here as *Off is signed while *.Size is unsigned.
398  if (EarlierOff >= LaterOff &&
399  Later.Size >= Earlier.Size &&
400  uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
401  return OW_Complete;
402 
403  // We may now overlap, although the overlap is not complete. There might also
404  // be other incomplete overlaps, and together, they might cover the complete
405  // earlier write.
406  // Note: The correctness of this logic depends on the fact that this function
407  // is not even called providing DepWrite when there are any intervening reads.
409  LaterOff < int64_t(EarlierOff + Earlier.Size) &&
410  int64_t(LaterOff + Later.Size) >= EarlierOff) {
411 
412  // Insert our part of the overlap into the map.
413  auto &IM = IOL[DepWrite];
414  DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
415  int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
416  LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
417 
418  // Make sure that we only insert non-overlapping intervals and combine
419  // adjacent intervals. The intervals are stored in the map with the ending
420  // offset as the key (in the half-open sense) and the starting offset as
421  // the value.
422  int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
423 
424  // Find any intervals ending at, or after, LaterIntStart which start
425  // before LaterIntEnd.
426  auto ILI = IM.lower_bound(LaterIntStart);
427  if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
428  // This existing interval is overlapped with the current store somewhere
429  // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
430  // intervals and adjusting our start and end.
431  LaterIntStart = std::min(LaterIntStart, ILI->second);
432  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
433  ILI = IM.erase(ILI);
434 
435  // Continue erasing and adjusting our end in case other previous
436  // intervals are also overlapped with the current store.
437  //
438  // |--- ealier 1 ---| |--- ealier 2 ---|
439  // |------- later---------|
440  //
441  while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
442  assert(ILI->second > LaterIntStart && "Unexpected interval");
443  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
444  ILI = IM.erase(ILI);
445  }
446  }
447 
448  IM[LaterIntEnd] = LaterIntStart;
449 
450  ILI = IM.begin();
451  if (ILI->second <= EarlierOff &&
452  ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
453  DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
454  EarlierOff << ", " <<
455  int64_t(EarlierOff + Earlier.Size) <<
456  ") Composite Later [" <<
457  ILI->second << ", " << ILI->first << ")\n");
458  ++NumCompletePartials;
459  return OW_Complete;
460  }
461  }
462 
463  // Check for an earlier store which writes to all the memory locations that
464  // the later store writes to.
465  if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
466  int64_t(EarlierOff + Earlier.Size) > LaterOff &&
467  uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
468  DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff
469  << ", " << int64_t(EarlierOff + Earlier.Size)
470  << ") by a later store [" << LaterOff << ", "
471  << int64_t(LaterOff + Later.Size) << ")\n");
472  // TODO: Maybe come up with a better name?
473  return OW_PartialEarlierWithFullLater;
474  }
475 
476  // Another interesting case is if the later store overwrites the end of the
477  // earlier store.
478  //
479  // |--earlier--|
480  // |-- later --|
481  //
482  // In this case we may want to trim the size of earlier to avoid generating
483  // writes to addresses which will definitely be overwritten later
485  (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
486  int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
487  return OW_End;
488 
489  // Finally, we also need to check if the later store overwrites the beginning
490  // of the earlier store.
491  //
492  // |--earlier--|
493  // |-- later --|
494  //
495  // In this case we may want to move the destination address and trim the size
496  // of earlier to avoid generating writes to addresses which will definitely
497  // be overwritten later.
499  (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
500  assert(int64_t(LaterOff + Later.Size) <
501  int64_t(EarlierOff + Earlier.Size) &&
502  "Expect to be handled as OW_Complete");
503  return OW_Begin;
504  }
505  // Otherwise, they don't completely overlap.
506  return OW_Unknown;
507 }
508 
509 /// If 'Inst' might be a self read (i.e. a noop copy of a
510 /// memory region into an identical pointer) then it doesn't actually make its
511 /// input dead in the traditional sense. Consider this case:
512 ///
513 /// memmove(A <- B)
514 /// memmove(A <- A)
515 ///
516 /// In this case, the second store to A does not make the first store to A dead.
517 /// The usual situation isn't an explicit A<-A store like this (which can be
518 /// trivially removed) but a case where two pointers may alias.
519 ///
520 /// This function detects when it is unsafe to remove a dependent instruction
521 /// because the DSE inducing instruction may be a self-read.
522 static bool isPossibleSelfRead(Instruction *Inst,
523  const MemoryLocation &InstStoreLoc,
524  Instruction *DepWrite,
525  const TargetLibraryInfo &TLI,
526  AliasAnalysis &AA) {
527  // Self reads can only happen for instructions that read memory. Get the
528  // location read.
529  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
530  if (!InstReadLoc.Ptr)
531  return false; // Not a reading instruction.
532 
533  // If the read and written loc obviously don't alias, it isn't a read.
534  if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
535  return false;
536 
537  if (isa<MemCpyInst>(Inst)) {
538  // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
539  // but in practice memcpy(A <- B) either means that A and B are disjoint or
540  // are equal (i.e. there are not partial overlaps). Given that, if we have:
541  //
542  // memcpy/memmove(A <- B) // DepWrite
543  // memcpy(A <- B) // Inst
544  //
545  // with Inst reading/writing a >= size than DepWrite, we can reason as
546  // follows:
547  //
548  // - If A == B then both the copies are no-ops, so the DepWrite can be
549  // removed.
550  // - If A != B then A and B are disjoint locations in Inst. Since
551  // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
552  // Therefore DepWrite can be removed.
553  MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
554 
555  if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
556  return false;
557  }
558 
559  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
560  // then it can't be considered dead.
561  return true;
562 }
563 
564 /// Returns true if the memory which is accessed by the second instruction is not
565 /// modified between the first and the second instruction.
566 /// Precondition: Second instruction must be dominated by the first
567 /// instruction.
569  Instruction *SecondI,
570  AliasAnalysis *AA) {
573  BasicBlock::iterator FirstBBI(FirstI);
574  ++FirstBBI;
575  BasicBlock::iterator SecondBBI(SecondI);
576  BasicBlock *FirstBB = FirstI->getParent();
577  BasicBlock *SecondBB = SecondI->getParent();
578  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
579 
580  // Start checking the store-block.
581  WorkList.push_back(SecondBB);
582  bool isFirstBlock = true;
583 
584  // Check all blocks going backward until we reach the load-block.
585  while (!WorkList.empty()) {
586  BasicBlock *B = WorkList.pop_back_val();
587 
588  // Ignore instructions before LI if this is the FirstBB.
589  BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
590 
592  if (isFirstBlock) {
593  // Ignore instructions after SI if this is the first visit of SecondBB.
594  assert(B == SecondBB && "first block is not the store block");
595  EI = SecondBBI;
596  isFirstBlock = false;
597  } else {
598  // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
599  // In this case we also have to look at instructions after SI.
600  EI = B->end();
601  }
602  for (; BI != EI; ++BI) {
603  Instruction *I = &*BI;
604  if (I->mayWriteToMemory() && I != SecondI)
605  if (isModSet(AA->getModRefInfo(I, MemLoc)))
606  return false;
607  }
608  if (B != FirstBB) {
609  assert(B != &FirstBB->getParent()->getEntryBlock() &&
610  "Should not hit the entry block because SI must be dominated by LI");
611  for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
612  if (!Visited.insert(*PredI).second)
613  continue;
614  WorkList.push_back(*PredI);
615  }
616  }
617  }
618  return true;
619 }
620 
621 /// Find all blocks that will unconditionally lead to the block BB and append
622 /// them to F.
624  BasicBlock *BB, DominatorTree *DT) {
625  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
626  BasicBlock *Pred = *I;
627  if (Pred == BB) continue;
628  TerminatorInst *PredTI = Pred->getTerminator();
629  if (PredTI->getNumSuccessors() != 1)
630  continue;
631 
632  if (DT->isReachableFromEntry(Pred))
633  Blocks.push_back(Pred);
634  }
635 }
636 
637 /// Handle frees of entire structures whose dependency is a store
638 /// to a field of that structure.
639 static bool handleFree(CallInst *F, AliasAnalysis *AA,
641  const TargetLibraryInfo *TLI,
643  DenseMap<Instruction*, size_t> *InstrOrdering) {
644  bool MadeChange = false;
645 
648  Blocks.push_back(F->getParent());
649  const DataLayout &DL = F->getModule()->getDataLayout();
650 
651  while (!Blocks.empty()) {
652  BasicBlock *BB = Blocks.pop_back_val();
653  Instruction *InstPt = BB->getTerminator();
654  if (BB == F->getParent()) InstPt = F;
655 
656  MemDepResult Dep =
657  MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
658  while (Dep.isDef() || Dep.isClobber()) {
659  Instruction *Dependency = Dep.getInst();
660  if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
661  !isRemovable(Dependency))
662  break;
663 
664  Value *DepPointer =
666 
667  // Check for aliasing.
668  if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
669  break;
670 
671  DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
672  << *Dependency << '\n');
673 
674  // DCE instructions only used to calculate that store.
675  BasicBlock::iterator BBI(Dependency);
676  deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
677  ++NumFastStores;
678  MadeChange = true;
679 
680  // Inst's old Dependency is now deleted. Compute the next dependency,
681  // which may also be dead, as in
682  // s[0] = 0;
683  // s[1] = 0; // This has just been deleted.
684  // free(s);
685  Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
686  }
687 
688  if (Dep.isNonLocal())
689  findUnconditionalPreds(Blocks, BB, DT);
690  }
691 
692  return MadeChange;
693 }
694 
695 /// Check to see if the specified location may alias any of the stack objects in
696 /// the DeadStackObjects set. If so, they become live because the location is
697 /// being loaded.
698 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
699  SmallSetVector<Value *, 16> &DeadStackObjects,
700  const DataLayout &DL, AliasAnalysis *AA,
701  const TargetLibraryInfo *TLI) {
702  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
703 
704  // A constant can't be in the dead pointer set.
705  if (isa<Constant>(UnderlyingPointer))
706  return;
707 
708  // If the kill pointer can be easily reduced to an alloca, don't bother doing
709  // extraneous AA queries.
710  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
711  DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
712  return;
713  }
714 
715  // Remove objects that could alias LoadedLoc.
716  DeadStackObjects.remove_if([&](Value *I) {
717  // See if the loaded location could alias the stack location.
718  MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
719  return !AA->isNoAlias(StackLoc, LoadedLoc);
720  });
721 }
722 
723 /// Remove dead stores to stack-allocated locations in the function end block.
724 /// Ex:
725 /// %A = alloca i32
726 /// ...
727 /// store i32 1, i32* %A
728 /// ret void
731  const TargetLibraryInfo *TLI,
733  DenseMap<Instruction*, size_t> *InstrOrdering) {
734  bool MadeChange = false;
735 
736  // Keep track of all of the stack objects that are dead at the end of the
737  // function.
738  SmallSetVector<Value*, 16> DeadStackObjects;
739 
740  // Find all of the alloca'd pointers in the entry block.
741  BasicBlock &Entry = BB.getParent()->front();
742  for (Instruction &I : Entry) {
743  if (isa<AllocaInst>(&I))
744  DeadStackObjects.insert(&I);
745 
746  // Okay, so these are dead heap objects, but if the pointer never escapes
747  // then it's leaked by this function anyways.
748  else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
749  DeadStackObjects.insert(&I);
750  }
751 
752  // Treat byval or inalloca arguments the same, stores to them are dead at the
753  // end of the function.
754  for (Argument &AI : BB.getParent()->args())
755  if (AI.hasByValOrInAllocaAttr())
756  DeadStackObjects.insert(&AI);
757 
758  const DataLayout &DL = BB.getModule()->getDataLayout();
759 
760  // Scan the basic block backwards
761  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
762  --BBI;
763 
764  // If we find a store, check to see if it points into a dead stack value.
765  if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
766  // See through pointer-to-pointer bitcasts
767  SmallVector<Value *, 4> Pointers;
768  GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
769 
770  // Stores to stack values are valid candidates for removal.
771  bool AllDead = true;
772  for (Value *Pointer : Pointers)
773  if (!DeadStackObjects.count(Pointer)) {
774  AllDead = false;
775  break;
776  }
777 
778  if (AllDead) {
779  Instruction *Dead = &*BBI;
780 
781  DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
782  << *Dead << "\n Objects: ";
783  for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
784  E = Pointers.end(); I != E; ++I) {
785  dbgs() << **I;
786  if (std::next(I) != E)
787  dbgs() << ", ";
788  }
789  dbgs() << '\n');
790 
791  // DCE instructions only used to calculate that store.
792  deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
793  ++NumFastStores;
794  MadeChange = true;
795  continue;
796  }
797  }
798 
799  // Remove any dead non-memory-mutating instructions.
800  if (isInstructionTriviallyDead(&*BBI, TLI)) {
801  DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
802  << *&*BBI << '\n');
803  deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
804  ++NumFastOther;
805  MadeChange = true;
806  continue;
807  }
808 
809  if (isa<AllocaInst>(BBI)) {
810  // Remove allocas from the list of dead stack objects; there can't be
811  // any references before the definition.
812  DeadStackObjects.remove(&*BBI);
813  continue;
814  }
815 
816  if (auto CS = CallSite(&*BBI)) {
817  // Remove allocation function calls from the list of dead stack objects;
818  // there can't be any references before the definition.
819  if (isAllocLikeFn(&*BBI, TLI))
820  DeadStackObjects.remove(&*BBI);
821 
822  // If this call does not access memory, it can't be loading any of our
823  // pointers.
824  if (AA->doesNotAccessMemory(CS))
825  continue;
826 
827  // If the call might load from any of our allocas, then any store above
828  // the call is live.
829  DeadStackObjects.remove_if([&](Value *I) {
830  // See if the call site touches the value.
831  return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)));
832  });
833 
834  // If all of the allocas were clobbered by the call then we're not going
835  // to find anything else to process.
836  if (DeadStackObjects.empty())
837  break;
838 
839  continue;
840  }
841 
842  // We can remove the dead stores, irrespective of the fence and its ordering
843  // (release/acquire/seq_cst). Fences only constraints the ordering of
844  // already visible stores, it does not make a store visible to other
845  // threads. So, skipping over a fence does not change a store from being
846  // dead.
847  if (isa<FenceInst>(*BBI))
848  continue;
849 
850  MemoryLocation LoadedLoc;
851 
852  // If we encounter a use of the pointer, it is no longer considered dead
853  if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
854  if (!L->isUnordered()) // Be conservative with atomic/volatile load
855  break;
856  LoadedLoc = MemoryLocation::get(L);
857  } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
858  LoadedLoc = MemoryLocation::get(V);
859  } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
860  LoadedLoc = MemoryLocation::getForSource(MTI);
861  } else if (!BBI->mayReadFromMemory()) {
862  // Instruction doesn't read memory. Note that stores that weren't removed
863  // above will hit this case.
864  continue;
865  } else {
866  // Unknown inst; assume it clobbers everything.
867  break;
868  }
869 
870  // Remove any allocas from the DeadPointer set that are loaded, as this
871  // makes any stores above the access live.
872  removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI);
873 
874  // If all of the allocas were clobbered by the access then we're not going
875  // to find anything else to process.
876  if (DeadStackObjects.empty())
877  break;
878  }
879 
880  return MadeChange;
881 }
882 
883 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
884  int64_t &EarlierSize, int64_t LaterOffset,
885  int64_t LaterSize, bool IsOverwriteEnd) {
886  // TODO: base this on the target vector size so that if the earlier
887  // store was too small to get vector writes anyway then its likely
888  // a good idea to shorten it
889  // Power of 2 vector writes are probably always a bad idea to optimize
890  // as any store/memset/memcpy is likely using vector instructions so
891  // shortening it to not vector size is likely to be slower
892  MemIntrinsic *EarlierIntrinsic = cast<MemIntrinsic>(EarlierWrite);
893  unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
894  if (!IsOverwriteEnd)
895  LaterOffset = int64_t(LaterOffset + LaterSize);
896 
897  if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
898  !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
899  return false;
900 
901  DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
902  << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
903  << "\n KILLER (offset " << LaterOffset << ", " << EarlierSize
904  << ")\n");
905 
906  int64_t NewLength = IsOverwriteEnd
907  ? LaterOffset - EarlierOffset
908  : EarlierSize - (LaterOffset - EarlierOffset);
909 
910  Value *EarlierWriteLength = EarlierIntrinsic->getLength();
911  Value *TrimmedLength =
912  ConstantInt::get(EarlierWriteLength->getType(), NewLength);
913  EarlierIntrinsic->setLength(TrimmedLength);
914 
915  EarlierSize = NewLength;
916  if (!IsOverwriteEnd) {
917  int64_t OffsetMoved = (LaterOffset - EarlierOffset);
918  Value *Indices[1] = {
919  ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
921  EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
922  EarlierIntrinsic->setDest(NewDestGEP);
923  EarlierOffset = EarlierOffset + OffsetMoved;
924  }
925  return true;
926 }
927 
928 static bool tryToShortenEnd(Instruction *EarlierWrite,
930  int64_t &EarlierStart, int64_t &EarlierSize) {
931  if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
932  return false;
933 
934  OverlapIntervalsTy::iterator OII = --IntervalMap.end();
935  int64_t LaterStart = OII->second;
936  int64_t LaterSize = OII->first - LaterStart;
937 
938  if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
939  LaterStart + LaterSize >= EarlierStart + EarlierSize) {
940  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
941  LaterSize, true)) {
942  IntervalMap.erase(OII);
943  return true;
944  }
945  }
946  return false;
947 }
948 
949 static bool tryToShortenBegin(Instruction *EarlierWrite,
951  int64_t &EarlierStart, int64_t &EarlierSize) {
952  if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
953  return false;
954 
955  OverlapIntervalsTy::iterator OII = IntervalMap.begin();
956  int64_t LaterStart = OII->second;
957  int64_t LaterSize = OII->first - LaterStart;
958 
959  if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
960  assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
961  "Should have been handled as OW_Complete");
962  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
963  LaterSize, false)) {
964  IntervalMap.erase(OII);
965  return true;
966  }
967  }
968  return false;
969 }
970 
972  const DataLayout &DL,
973  InstOverlapIntervalsTy &IOL) {
974  bool Changed = false;
975  for (auto OI : IOL) {
976  Instruction *EarlierWrite = OI.first;
977  MemoryLocation Loc = getLocForWrite(EarlierWrite);
978  assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
979  assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
980 
981  const Value *Ptr = Loc.Ptr->stripPointerCasts();
982  int64_t EarlierStart = 0;
983  int64_t EarlierSize = int64_t(Loc.Size);
984  GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
985  OverlapIntervalsTy &IntervalMap = OI.second;
986  Changed |=
987  tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
988  if (IntervalMap.empty())
989  continue;
990  Changed |=
991  tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
992  }
993  return Changed;
994 }
995 
998  const DataLayout &DL,
999  const TargetLibraryInfo *TLI,
1001  DenseMap<Instruction*, size_t> *InstrOrdering) {
1002  // Must be a store instruction.
1003  StoreInst *SI = dyn_cast<StoreInst>(Inst);
1004  if (!SI)
1005  return false;
1006 
1007  // If we're storing the same value back to a pointer that we just loaded from,
1008  // then the store can be removed.
1009  if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1010  if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1011  isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1012 
1013  DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1014  << *DepLoad << "\n STORE: " << *SI << '\n');
1015 
1016  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1017  ++NumRedundantStores;
1018  return true;
1019  }
1020  }
1021 
1022  // Remove null stores into the calloc'ed objects
1023  Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1024  if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1025  Instruction *UnderlyingPointer =
1027 
1028  if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1029  memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1030  DEBUG(
1031  dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1032  << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1033 
1034  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1035  ++NumRedundantStores;
1036  return true;
1037  }
1038  }
1039  return false;
1040 }
1041 
1044  const TargetLibraryInfo *TLI) {
1045  const DataLayout &DL = BB.getModule()->getDataLayout();
1046  bool MadeChange = false;
1047 
1048  // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1049  // The current OrderedBasicBlock can't deal with mutation at the moment.
1050  size_t LastThrowingInstIndex = 0;
1051  DenseMap<Instruction*, size_t> InstrOrdering;
1052  size_t InstrIndex = 1;
1053 
1054  // A map of interval maps representing partially-overwritten value parts.
1056 
1057  // Do a top-down walk on the BB.
1058  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1059  // Handle 'free' calls specially.
1060  if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1061  MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1062  // Increment BBI after handleFree has potentially deleted instructions.
1063  // This ensures we maintain a valid iterator.
1064  ++BBI;
1065  continue;
1066  }
1067 
1068  Instruction *Inst = &*BBI++;
1069 
1070  size_t CurInstNumber = InstrIndex++;
1071  InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1072  if (Inst->mayThrow()) {
1073  LastThrowingInstIndex = CurInstNumber;
1074  continue;
1075  }
1076 
1077  // Check to see if Inst writes to memory. If not, continue.
1078  if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1079  continue;
1080 
1081  // eliminateNoopStore will update in iterator, if necessary.
1082  if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1083  MadeChange = true;
1084  continue;
1085  }
1086 
1087  // If we find something that writes memory, get its memory dependence.
1088  MemDepResult InstDep = MD->getDependency(Inst);
1089 
1090  // Ignore any store where we can't find a local dependence.
1091  // FIXME: cross-block DSE would be fun. :)
1092  if (!InstDep.isDef() && !InstDep.isClobber())
1093  continue;
1094 
1095  // Figure out what location is being stored to.
1096  MemoryLocation Loc = getLocForWrite(Inst);
1097 
1098  // If we didn't get a useful location, fail.
1099  if (!Loc.Ptr)
1100  continue;
1101 
1102  // Loop until we find a store we can eliminate or a load that
1103  // invalidates the analysis. Without an upper bound on the number of
1104  // instructions examined, this analysis can become very time-consuming.
1105  // However, the potential gain diminishes as we process more instructions
1106  // without eliminating any of them. Therefore, we limit the number of
1107  // instructions we look at.
1108  auto Limit = MD->getDefaultBlockScanLimit();
1109  while (InstDep.isDef() || InstDep.isClobber()) {
1110  // Get the memory clobbered by the instruction we depend on. MemDep will
1111  // skip any instructions that 'Loc' clearly doesn't interact with. If we
1112  // end up depending on a may- or must-aliased load, then we can't optimize
1113  // away the store and we bail out. However, if we depend on something
1114  // that overwrites the memory location we *can* potentially optimize it.
1115  //
1116  // Find out what memory location the dependent instruction stores.
1117  Instruction *DepWrite = InstDep.getInst();
1118  if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1119  break;
1120  MemoryLocation DepLoc = getLocForWrite(DepWrite);
1121  // If we didn't get a useful location, or if it isn't a size, bail out.
1122  if (!DepLoc.Ptr)
1123  break;
1124 
1125  // Make sure we don't look past a call which might throw. This is an
1126  // issue because MemoryDependenceAnalysis works in the wrong direction:
1127  // it finds instructions which dominate the current instruction, rather than
1128  // instructions which are post-dominated by the current instruction.
1129  //
1130  // If the underlying object is a non-escaping memory allocation, any store
1131  // to it is dead along the unwind edge. Otherwise, we need to preserve
1132  // the store.
1133  size_t DepIndex = InstrOrdering.lookup(DepWrite);
1134  assert(DepIndex && "Unexpected instruction");
1135  if (DepIndex <= LastThrowingInstIndex) {
1136  const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1137  bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1138  if (!IsStoreDeadOnUnwind) {
1139  // We're looking for a call to an allocation function
1140  // where the allocation doesn't escape before the last
1141  // throwing instruction; PointerMayBeCaptured
1142  // reasonably fast approximation.
1143  IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1144  !PointerMayBeCaptured(Underlying, false, true);
1145  }
1146  if (!IsStoreDeadOnUnwind)
1147  break;
1148  }
1149 
1150  // If we find a write that is a) removable (i.e., non-volatile), b) is
1151  // completely obliterated by the store to 'Loc', and c) which we know that
1152  // 'Inst' doesn't load from, then we can remove it.
1153  // Also try to merge two stores if a later one only touches memory written
1154  // to by the earlier one.
1155  if (isRemovable(DepWrite) &&
1156  !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1157  int64_t InstWriteOffset, DepWriteOffset;
1159  isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset,
1160  DepWrite, IOL);
1161  if (OR == OW_Complete) {
1162  DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
1163  << *DepWrite << "\n KILLER: " << *Inst << '\n');
1164 
1165  // Delete the store and now-dead instructions that feed it.
1166  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1167  ++NumFastStores;
1168  MadeChange = true;
1169 
1170  // We erased DepWrite; start over.
1171  InstDep = MD->getDependency(Inst);
1172  continue;
1173  } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1174  ((OR == OW_Begin &&
1175  isShortenableAtTheBeginning(DepWrite)))) {
1176  assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1177  "when partial-overwrite "
1178  "tracking is enabled");
1179  int64_t EarlierSize = DepLoc.Size;
1180  int64_t LaterSize = Loc.Size;
1181  bool IsOverwriteEnd = (OR == OW_End);
1182  MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1183  InstWriteOffset, LaterSize, IsOverwriteEnd);
1184  } else if (EnablePartialStoreMerging &&
1185  OR == OW_PartialEarlierWithFullLater) {
1186  auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1187  auto *Later = dyn_cast<StoreInst>(Inst);
1188  if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1189  Later && isa<ConstantInt>(Later->getValueOperand()) &&
1190  memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1191  // If the store we find is:
1192  // a) partially overwritten by the store to 'Loc'
1193  // b) the later store is fully contained in the earlier one and
1194  // c) they both have a constant value
1195  // Merge the two stores, replacing the earlier store's value with a
1196  // merge of both values.
1197  // TODO: Deal with other constant types (vectors, etc), and probably
1198  // some mem intrinsics (if needed)
1199 
1200  APInt EarlierValue =
1201  cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1202  APInt LaterValue =
1203  cast<ConstantInt>(Later->getValueOperand())->getValue();
1204  unsigned LaterBits = LaterValue.getBitWidth();
1205  assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1206  LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1207 
1208  // Offset of the smaller store inside the larger store
1209  unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1210  unsigned LShiftAmount =
1211  DL.isBigEndian()
1212  ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1213  : BitOffsetDiff;
1214  APInt Mask =
1215  APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1216  LShiftAmount + LaterBits);
1217  // Clear the bits we'll be replacing, then OR with the smaller
1218  // store, shifted appropriately.
1219  APInt Merged =
1220  (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1221  DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
1222  << "\n Later: " << *Inst
1223  << "\n Merged Value: " << Merged << '\n');
1224 
1225  auto *SI = new StoreInst(
1226  ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1227  Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1228  Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1229 
1230  unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1234  SI->copyMetadata(*DepWrite, MDToKeep);
1235  ++NumModifiedStores;
1236 
1237  // Remove earlier, wider, store
1238  size_t Idx = InstrOrdering.lookup(DepWrite);
1239  InstrOrdering.erase(DepWrite);
1240  InstrOrdering.insert(std::make_pair(SI, Idx));
1241 
1242  // Delete the old stores and now-dead instructions that feed them.
1243  deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1244  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1245  &InstrOrdering);
1246  MadeChange = true;
1247 
1248  // We erased DepWrite and Inst (Loc); start over.
1249  break;
1250  }
1251  }
1252  }
1253 
1254  // If this is a may-aliased store that is clobbering the store value, we
1255  // can keep searching past it for another must-aliased pointer that stores
1256  // to the same location. For example, in:
1257  // store -> P
1258  // store -> Q
1259  // store -> P
1260  // we can remove the first store to P even though we don't know if P and Q
1261  // alias.
1262  if (DepWrite == &BB.front()) break;
1263 
1264  // Can't look past this instruction if it might read 'Loc'.
1265  if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1266  break;
1267 
1268  InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1269  DepWrite->getIterator(), &BB,
1270  /*QueryInst=*/ nullptr, &Limit);
1271  }
1272  }
1273 
1275  MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1276 
1277  // If this block ends in a return, unwind, or unreachable, all allocas are
1278  // dead at its end, which means stores to them are also dead.
1279  if (BB.getTerminator()->getNumSuccessors() == 0)
1280  MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1281 
1282  return MadeChange;
1283 }
1284 
1287  const TargetLibraryInfo *TLI) {
1288  bool MadeChange = false;
1289  for (BasicBlock &BB : F)
1290  // Only check non-dead blocks. Dead blocks may have strange pointer
1291  // cycles that will confuse alias analysis.
1292  if (DT->isReachableFromEntry(&BB))
1293  MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1294 
1295  return MadeChange;
1296 }
1297 
1298 //===----------------------------------------------------------------------===//
1299 // DSE Pass
1300 //===----------------------------------------------------------------------===//
1302  AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1306 
1307  if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1308  return PreservedAnalyses::all();
1309 
1310  PreservedAnalyses PA;
1311  PA.preserveSet<CFGAnalyses>();
1312  PA.preserve<GlobalsAA>();
1314  return PA;
1315 }
1316 
1317 namespace {
1318 
1319 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1320 class DSELegacyPass : public FunctionPass {
1321 public:
1322  static char ID; // Pass identification, replacement for typeid
1323 
1324  DSELegacyPass() : FunctionPass(ID) {
1326  }
1327 
1328  bool runOnFunction(Function &F) override {
1329  if (skipFunction(F))
1330  return false;
1331 
1332  DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1333  AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1335  &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1336  const TargetLibraryInfo *TLI =
1337  &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1338 
1339  return eliminateDeadStores(F, AA, MD, DT, TLI);
1340  }
1341 
1342  void getAnalysisUsage(AnalysisUsage &AU) const override {
1343  AU.setPreservesCFG();
1351  }
1352 };
1353 
1354 } // end anonymous namespace
1355 
1356 char DSELegacyPass::ID = 0;
1357 
1358 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1359  false)
1366  false)
1367 
1369  return new DSELegacyPass();
1370 }
Legacy wrapper pass to provide the GlobalsAAResult object.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Value * getValueOperand()
Definition: Instructions.h:395
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM&#39;s alias analysis passes.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
This is the interface for a simple mod/ref and alias analysis over globals.
void initializeDSELegacyPassPass(PassRegistry &)
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias. ...
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:863
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block...
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
StringRef getName(LibFunc F) const
void setDest(Value *Ptr)
Set the specified arguments of the instruction.
STATISTIC(NumFunctions, "Total number of functions")
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:225
F(f)
static bool handleFree(CallInst *F, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Handle frees of entire structures whose dependency is a store to a field of that structure.
An instruction for reading from memory.
Definition: Instructions.h:164
static bool isPossibleSelfRead(Instruction *Inst, const MemoryLocation &InstStoreLoc, Instruction *DepWrite, const TargetLibraryInfo &TLI, AliasAnalysis &AA)
If &#39;Inst&#39; might be a self read (i.e.
#define op(i)
Value * getLength() const
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:294
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
static void findUnconditionalPreds(SmallVectorImpl< BasicBlock *> &Blocks, BasicBlock *BB, DominatorTree *DT)
Find all blocks that will unconditionally lead to the block BB and append them to F...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:264
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
AnalysisUsage & addRequired()
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:134
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
static OverwriteResult isOverwrite(const MemoryLocation &Later, const MemoryLocation &Earlier, const DataLayout &DL, const TargetLibraryInfo &TLI, int64_t &EarlierOff, int64_t &LaterOff, Instruction *DepWrite, InstOverlapIntervalsTy &IOL)
Return &#39;OW_Complete&#39; if a store to the &#39;Later&#39; location completely overwrites a store to the &#39;Earlier...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Remove dead stores to stack-allocated locations in the function end block.
unsigned getDefaultBlockScanLimit() const
Some methods limit the number of instructions they will examine.
An analysis that produces MemoryDependenceResults for a function.
static MemoryLocation getLocForWrite(Instruction *Inst)
Return a Location stored to by the specified instruction.
unsigned getDestAlignment() const
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:158
This file implements a class to represent arbitrary precision integral constant values and operations...
static void removeAccessedObjects(const MemoryLocation &LoadedLoc, SmallSetVector< Value *, 16 > &DeadStackObjects, const DataLayout &DL, AliasAnalysis *AA, const TargetLibraryInfo *TLI)
Check to see if the specified location may alias any of the stack objects in the DeadStackObjects set...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:85
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
static Value * getStoredPointerOperand(Instruction *I)
Return the pointer that is being written to.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
void setLength(Value *L)
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
void salvageDebugInfo(Instruction &I)
Assuming the instruction I is going to be deleted, attempt to salvage any dbg.value intrinsics referr...
Definition: Local.cpp:1505
bool has(LibFunc F) const
Tests whether a library function is available.
std::map< int64_t, int64_t > OverlapIntervalsTy
An instruction for storing to memory.
Definition: Instructions.h:306
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:142
Value * getOperand(unsigned i) const
Definition: User.h:170
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:211
Value * getOperand(unsigned i_nocapture) const
const BasicBlock & getEntryBlock() const
Definition: Function.h:621
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
static bool runOnFunction(Function &F, bool PostInlining)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:55
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
static bool removePartiallyOverlappedStores(AliasAnalysis *AA, const DataLayout &DL, InstOverlapIntervalsTy &IOL)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:276
A manager for alias analyses.
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:31
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
bool mayThrow() const
Return true if this instruction may throw an exception.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
Represent the analysis usage information of a pass.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
FunctionPass * createDeadStoreEliminationPass()
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AliasAnalysis *AA)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E&#39;s underlying type.
Definition: BitmaskEnum.h:91
Analysis pass providing a never-invalidated alias analysis result.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
self_iterator getIterator()
Definition: ilist_node.h:82
INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, false) INITIALIZE_PASS_END(DSELegacyPass
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:567
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Dead Store Elimination
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A memory dependence query can return one of three different answers.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst=nullptr, unsigned *Limit=nullptr)
Returns the instruction on which a memory location depends.
static void deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI, MemoryDependenceResults &MD, const TargetLibraryInfo &TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering, SmallSetVector< Value *, 16 > *ValueSet=nullptr)
Delete this instruction.
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
bool doesNotAccessMemory(ImmutableCallSite CS)
Checks if the specified call is known to never read or write memory.
This is the common base class for memset/memcpy/memmove.
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:192
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
iterator end()
Definition: BasicBlock.h:266
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
Module.h This file contains the declarations for the Module class.
Provides information about what library functions are available for the current target.
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:383
static bool hasAnalyzableMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI)
Does this instruction write some memory? This only returns true for things that we can analyze with o...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:611
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:285
static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, AliasAnalysis *AA, MemoryDependenceResults *MD, const DataLayout &DL, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
void setOperand(unsigned i, Value *Val)
Definition: User.h:175
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
Represents analyses that only rely on functions&#39; control flow.
Definition: PassManager.h:114
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition: APInt.h:600
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
This class wraps the llvm.memcpy/memmove intrinsics.
static bool tryToShortenBegin(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
static MemoryLocation getLocForRead(Instruction *Inst, const TargetLibraryInfo &TLI)
Return the location read by the specified "hasAnalyzableMemoryWrite" instruction if any...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
This file provides utility analysis objects describing memory locations.
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:189
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:73
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc...
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:174
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
Analysis pass providing the TargetLibraryInfo.
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static GetElementPtrInst * CreateInBounds(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:897
static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
Definition: Function.h:644
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:346
LLVM Value Representation.
Definition: Value.h:73
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
bool remove_if(UnaryPredicate P)
Remove items from the set vector based on a predicate function.
Definition: SetVector.h:200
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
static bool isRemovable(Instruction *I)
If the value of this instruction and the memory it writes to is unused, may we delete this instructio...
#define DEBUG(X)
Definition: Debug.h:118
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:254
static bool isVolatile(Instruction *Inst)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:138
This header defines various interfaces for pass management in LLVM.
bool isBigEndian() const
Definition: DataLayout.h:222
static uint64_t getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI)
Value * getPointerOperand()
Definition: Instructions.h:398
Value * getRawDest() const
bool use_empty() const
Definition: Value.h:328
static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset, int64_t &EarlierSize, int64_t LaterOffset, int64_t LaterSize, bool IsOverwriteEnd)
iterator_range< arg_iterator > args()
Definition: Function.h:670
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:67
static bool tryToShortenEnd(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
uint64_t Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)