LLVM  6.0.0svn
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file promotes memory references to be register references. It promotes
11 // alloca instructions which only have loads and stores as uses. An alloca is
12 // transformed by using iterated dominator frontiers to place PHI nodes, then
13 // traversing the function in depth-first order to rewrite loads and stores as
14 // appropriate.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DIBuilder.h"
32 #include "llvm/IR/DebugInfo.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/Module.h"
42 #include <algorithm>
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "mem2reg"
46 
47 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
48 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
49 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
50 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
51 
53  // FIXME: If the memory unit is of pointer or integer type, we can permit
54  // assignments to subsections of the memory unit.
55  unsigned AS = AI->getType()->getAddressSpace();
56 
57  // Only allow direct and non-volatile loads and stores...
58  for (const User *U : AI->users()) {
59  if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
60  // Note that atomic loads can be transformed; atomic semantics do
61  // not have any meaning for a local alloca.
62  if (LI->isVolatile())
63  return false;
64  } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
65  if (SI->getOperand(0) == AI)
66  return false; // Don't allow a store OF the AI, only INTO the AI.
67  // Note that atomic stores can be transformed; atomic semantics do
68  // not have any meaning for a local alloca.
69  if (SI->isVolatile())
70  return false;
71  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
72  if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
73  II->getIntrinsicID() != Intrinsic::lifetime_end)
74  return false;
75  } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
76  if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
77  return false;
78  if (!onlyUsedByLifetimeMarkers(BCI))
79  return false;
80  } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
81  if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
82  return false;
83  if (!GEPI->hasAllZeroIndices())
84  return false;
85  if (!onlyUsedByLifetimeMarkers(GEPI))
86  return false;
87  } else {
88  return false;
89  }
90  }
91 
92  return true;
93 }
94 
95 namespace {
96 
97 struct AllocaInfo {
98  SmallVector<BasicBlock *, 32> DefiningBlocks;
100 
101  StoreInst *OnlyStore;
102  BasicBlock *OnlyBlock;
103  bool OnlyUsedInOneBlock;
104 
105  Value *AllocaPointerVal;
107 
108  void clear() {
109  DefiningBlocks.clear();
110  UsingBlocks.clear();
111  OnlyStore = nullptr;
112  OnlyBlock = nullptr;
113  OnlyUsedInOneBlock = true;
114  AllocaPointerVal = nullptr;
115  DbgDeclares.clear();
116  }
117 
118  /// Scan the uses of the specified alloca, filling in the AllocaInfo used
119  /// by the rest of the pass to reason about the uses of this alloca.
120  void AnalyzeAlloca(AllocaInst *AI) {
121  clear();
122 
123  // As we scan the uses of the alloca instruction, keep track of stores,
124  // and decide whether all of the loads and stores to the alloca are within
125  // the same basic block.
126  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
127  Instruction *User = cast<Instruction>(*UI++);
128 
129  if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
130  // Remember the basic blocks which define new values for the alloca
131  DefiningBlocks.push_back(SI->getParent());
132  AllocaPointerVal = SI->getOperand(0);
133  OnlyStore = SI;
134  } else {
135  LoadInst *LI = cast<LoadInst>(User);
136  // Otherwise it must be a load instruction, keep track of variable
137  // reads.
138  UsingBlocks.push_back(LI->getParent());
139  AllocaPointerVal = LI;
140  }
141 
142  if (OnlyUsedInOneBlock) {
143  if (!OnlyBlock)
144  OnlyBlock = User->getParent();
145  else if (OnlyBlock != User->getParent())
146  OnlyUsedInOneBlock = false;
147  }
148  }
149 
150  DbgDeclares = FindDbgAddrUses(AI);
151  }
152 };
153 
154 // Data package used by RenamePass()
155 class RenamePassData {
156 public:
157  typedef std::vector<Value *> ValVector;
158 
159  RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V)
160  : BB(B), Pred(P), Values(std::move(V)) {}
161  BasicBlock *BB;
162  BasicBlock *Pred;
163  ValVector Values;
164 };
165 
166 /// \brief This assigns and keeps a per-bb relative ordering of load/store
167 /// instructions in the block that directly load or store an alloca.
168 ///
169 /// This functionality is important because it avoids scanning large basic
170 /// blocks multiple times when promoting many allocas in the same block.
171 class LargeBlockInfo {
172  /// \brief For each instruction that we track, keep the index of the
173  /// instruction.
174  ///
175  /// The index starts out as the number of the instruction from the start of
176  /// the block.
178 
179 public:
180 
181  /// This code only looks at accesses to allocas.
182  static bool isInterestingInstruction(const Instruction *I) {
183  return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
184  (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
185  }
186 
187  /// Get or calculate the index of the specified instruction.
188  unsigned getInstructionIndex(const Instruction *I) {
189  assert(isInterestingInstruction(I) &&
190  "Not a load/store to/from an alloca?");
191 
192  // If we already have this instruction number, return it.
194  if (It != InstNumbers.end())
195  return It->second;
196 
197  // Scan the whole block to get the instruction. This accumulates
198  // information for every interesting instruction in the block, in order to
199  // avoid gratuitus rescans.
200  const BasicBlock *BB = I->getParent();
201  unsigned InstNo = 0;
202  for (const Instruction &BBI : *BB)
203  if (isInterestingInstruction(&BBI))
204  InstNumbers[&BBI] = InstNo++;
205  It = InstNumbers.find(I);
206 
207  assert(It != InstNumbers.end() && "Didn't insert instruction?");
208  return It->second;
209  }
210 
211  void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
212 
213  void clear() { InstNumbers.clear(); }
214 };
215 
216 struct PromoteMem2Reg {
217  /// The alloca instructions being promoted.
218  std::vector<AllocaInst *> Allocas;
219  DominatorTree &DT;
220  DIBuilder DIB;
221  /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
222  AssumptionCache *AC;
223 
224  const SimplifyQuery SQ;
225  /// Reverse mapping of Allocas.
227 
228  /// \brief The PhiNodes we're adding.
229  ///
230  /// That map is used to simplify some Phi nodes as we iterate over it, so
231  /// it should have deterministic iterators. We could use a MapVector, but
232  /// since we already maintain a map from BasicBlock* to a stable numbering
233  /// (BBNumbers), the DenseMap is more efficient (also supports removal).
235 
236  /// For each PHI node, keep track of which entry in Allocas it corresponds
237  /// to.
238  DenseMap<PHINode *, unsigned> PhiToAllocaMap;
239 
240  /// If we are updating an AliasSetTracker, then for each alloca that is of
241  /// pointer type, we keep track of what to copyValue to the inserted PHI
242  /// nodes here.
243  std::vector<Value *> PointerAllocaValues;
244 
245  /// For each alloca, we keep track of the dbg.declare intrinsic that
246  /// describes it, if any, so that we can convert it to a dbg.value
247  /// intrinsic if the alloca gets promoted.
248  SmallVector<TinyPtrVector<DbgInfoIntrinsic *>, 8> AllocaDbgDeclares;
249 
250  /// The set of basic blocks the renamer has already visited.
251  ///
253 
254  /// Contains a stable numbering of basic blocks to avoid non-determinstic
255  /// behavior.
257 
258  /// Lazily compute the number of predecessors a block has.
260 
261 public:
262  PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
263  AssumptionCache *AC)
264  : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
265  DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
266  AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
267  nullptr, &DT, AC) {}
268 
269  void run();
270 
271 private:
272  void RemoveFromAllocasList(unsigned &AllocaIdx) {
273  Allocas[AllocaIdx] = Allocas.back();
274  Allocas.pop_back();
275  --AllocaIdx;
276  }
277 
278  unsigned getNumPreds(const BasicBlock *BB) {
279  unsigned &NP = BBNumPreds[BB];
280  if (NP == 0)
281  NP = std::distance(pred_begin(BB), pred_end(BB)) + 1;
282  return NP - 1;
283  }
284 
285  void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
286  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
287  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
288  void RenamePass(BasicBlock *BB, BasicBlock *Pred,
289  RenamePassData::ValVector &IncVals,
290  std::vector<RenamePassData> &Worklist);
291  bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
292 };
293 
294 } // end of anonymous namespace
295 
296 /// Given a LoadInst LI this adds assume(LI != null) after it.
298  Function *AssumeIntrinsic =
299  Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
300  ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
302  LoadNotNull->insertAfter(LI);
303  CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
304  CI->insertAfter(LoadNotNull);
305  AC->registerAssumption(CI);
306 }
307 
309  // Knowing that this alloca is promotable, we know that it's safe to kill all
310  // instructions except for load and store.
311 
312  for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
313  Instruction *I = cast<Instruction>(*UI);
314  ++UI;
315  if (isa<LoadInst>(I) || isa<StoreInst>(I))
316  continue;
317 
318  if (!I->getType()->isVoidTy()) {
319  // The only users of this bitcast/GEP instruction are lifetime intrinsics.
320  // Follow the use/def chain to erase them now instead of leaving it for
321  // dead code elimination later.
322  for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
323  Instruction *Inst = cast<Instruction>(*UUI);
324  ++UUI;
325  Inst->eraseFromParent();
326  }
327  }
328  I->eraseFromParent();
329  }
330 }
331 
332 /// \brief Rewrite as many loads as possible given a single store.
333 ///
334 /// When there is only a single store, we can use the domtree to trivially
335 /// replace all of the dominated loads with the stored value. Do so, and return
336 /// true if this has successfully promoted the alloca entirely. If this returns
337 /// false there were some loads which were not dominated by the single store
338 /// and thus must be phi-ed with undef. We fall back to the standard alloca
339 /// promotion algorithm in that case.
340 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
341  LargeBlockInfo &LBI, const DataLayout &DL,
342  DominatorTree &DT, AssumptionCache *AC) {
343  StoreInst *OnlyStore = Info.OnlyStore;
344  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
345  BasicBlock *StoreBB = OnlyStore->getParent();
346  int StoreIndex = -1;
347 
348  // Clear out UsingBlocks. We will reconstruct it here if needed.
349  Info.UsingBlocks.clear();
350 
351  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
352  Instruction *UserInst = cast<Instruction>(*UI++);
353  if (!isa<LoadInst>(UserInst)) {
354  assert(UserInst == OnlyStore && "Should only have load/stores");
355  continue;
356  }
357  LoadInst *LI = cast<LoadInst>(UserInst);
358 
359  // Okay, if we have a load from the alloca, we want to replace it with the
360  // only value stored to the alloca. We can do this if the value is
361  // dominated by the store. If not, we use the rest of the mem2reg machinery
362  // to insert the phi nodes as needed.
363  if (!StoringGlobalVal) { // Non-instructions are always dominated.
364  if (LI->getParent() == StoreBB) {
365  // If we have a use that is in the same block as the store, compare the
366  // indices of the two instructions to see which one came first. If the
367  // load came before the store, we can't handle it.
368  if (StoreIndex == -1)
369  StoreIndex = LBI.getInstructionIndex(OnlyStore);
370 
371  if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
372  // Can't handle this load, bail out.
373  Info.UsingBlocks.push_back(StoreBB);
374  continue;
375  }
376 
377  } else if (LI->getParent() != StoreBB &&
378  !DT.dominates(StoreBB, LI->getParent())) {
379  // If the load and store are in different blocks, use BB dominance to
380  // check their relationships. If the store doesn't dom the use, bail
381  // out.
382  Info.UsingBlocks.push_back(LI->getParent());
383  continue;
384  }
385  }
386 
387  // Otherwise, we *can* safely rewrite this load.
388  Value *ReplVal = OnlyStore->getOperand(0);
389  // If the replacement value is the load, this must occur in unreachable
390  // code.
391  if (ReplVal == LI)
392  ReplVal = UndefValue::get(LI->getType());
393 
394  // If the load was marked as nonnull we don't want to lose
395  // that information when we erase this Load. So we preserve
396  // it with an assume.
397  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
398  !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
399  addAssumeNonNull(AC, LI);
400 
401  LI->replaceAllUsesWith(ReplVal);
402  LI->eraseFromParent();
403  LBI.deleteValue(LI);
404  }
405 
406  // Finally, after the scan, check to see if the store is all that is left.
407  if (!Info.UsingBlocks.empty())
408  return false; // If not, we'll have to fall back for the remainder.
409 
410  // Record debuginfo for the store and remove the declaration's
411  // debuginfo.
412  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
413  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
414  ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
415  DII->eraseFromParent();
416  LBI.deleteValue(DII);
417  }
418  // Remove the (now dead) store and alloca.
419  Info.OnlyStore->eraseFromParent();
420  LBI.deleteValue(Info.OnlyStore);
421 
422  AI->eraseFromParent();
423  LBI.deleteValue(AI);
424  return true;
425 }
426 
427 /// Many allocas are only used within a single basic block. If this is the
428 /// case, avoid traversing the CFG and inserting a lot of potentially useless
429 /// PHI nodes by just performing a single linear pass over the basic block
430 /// using the Alloca.
431 ///
432 /// If we cannot promote this alloca (because it is read before it is written),
433 /// return false. This is necessary in cases where, due to control flow, the
434 /// alloca is undefined only on some control flow paths. e.g. code like
435 /// this is correct in LLVM IR:
436 /// // A is an alloca with no stores so far
437 /// for (...) {
438 /// int t = *A;
439 /// if (!first_iteration)
440 /// use(t);
441 /// *A = 42;
442 /// }
443 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
444  LargeBlockInfo &LBI,
445  const DataLayout &DL,
446  DominatorTree &DT,
447  AssumptionCache *AC) {
448  // The trickiest case to handle is when we have large blocks. Because of this,
449  // this code is optimized assuming that large blocks happen. This does not
450  // significantly pessimize the small block case. This uses LargeBlockInfo to
451  // make it efficient to get the index of various operations in the block.
452 
453  // Walk the use-def list of the alloca, getting the locations of all stores.
454  typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy;
455  StoresByIndexTy StoresByIndex;
456 
457  for (User *U : AI->users())
458  if (StoreInst *SI = dyn_cast<StoreInst>(U))
459  StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
460 
461  // Sort the stores by their index, making it efficient to do a lookup with a
462  // binary search.
463  std::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
464 
465  // Walk all of the loads from this alloca, replacing them with the nearest
466  // store above them, if any.
467  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
468  LoadInst *LI = dyn_cast<LoadInst>(*UI++);
469  if (!LI)
470  continue;
471 
472  unsigned LoadIdx = LBI.getInstructionIndex(LI);
473 
474  // Find the nearest store that has a lower index than this load.
475  StoresByIndexTy::iterator I =
476  std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
477  std::make_pair(LoadIdx,
478  static_cast<StoreInst *>(nullptr)),
479  less_first());
480  if (I == StoresByIndex.begin()) {
481  if (StoresByIndex.empty())
482  // If there are no stores, the load takes the undef value.
484  else
485  // There is no store before this load, bail out (load may be affected
486  // by the following stores - see main comment).
487  return false;
488  } else {
489  // Otherwise, there was a store before this load, the load takes its value.
490  // Note, if the load was marked as nonnull we don't want to lose that
491  // information when we erase it. So we preserve it with an assume.
492  Value *ReplVal = std::prev(I)->second->getOperand(0);
493  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
494  !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
495  addAssumeNonNull(AC, LI);
496 
497  LI->replaceAllUsesWith(ReplVal);
498  }
499 
500  LI->eraseFromParent();
501  LBI.deleteValue(LI);
502  }
503 
504  // Remove the (now dead) stores and alloca.
505  while (!AI->use_empty()) {
506  StoreInst *SI = cast<StoreInst>(AI->user_back());
507  // Record debuginfo for the store before removing it.
508  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
509  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
510  ConvertDebugDeclareToDebugValue(DII, SI, DIB);
511  }
512  SI->eraseFromParent();
513  LBI.deleteValue(SI);
514  }
515 
516  AI->eraseFromParent();
517  LBI.deleteValue(AI);
518 
519  // The alloca's debuginfo can be removed as well.
520  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
521  DII->eraseFromParent();
522  LBI.deleteValue(DII);
523  }
524 
525  ++NumLocalPromoted;
526  return true;
527 }
528 
529 void PromoteMem2Reg::run() {
530  Function &F = *DT.getRoot()->getParent();
531 
532  AllocaDbgDeclares.resize(Allocas.size());
533 
534  AllocaInfo Info;
535  LargeBlockInfo LBI;
536  ForwardIDFCalculator IDF(DT);
537 
538  for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
539  AllocaInst *AI = Allocas[AllocaNum];
540 
541  assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
542  assert(AI->getParent()->getParent() == &F &&
543  "All allocas should be in the same function, which is same as DF!");
544 
546 
547  if (AI->use_empty()) {
548  // If there are no uses of the alloca, just delete it now.
549  AI->eraseFromParent();
550 
551  // Remove the alloca from the Allocas list, since it has been processed
552  RemoveFromAllocasList(AllocaNum);
553  ++NumDeadAlloca;
554  continue;
555  }
556 
557  // Calculate the set of read and write-locations for each alloca. This is
558  // analogous to finding the 'uses' and 'definitions' of each variable.
559  Info.AnalyzeAlloca(AI);
560 
561  // If there is only a single store to this value, replace any loads of
562  // it that are directly dominated by the definition with the value stored.
563  if (Info.DefiningBlocks.size() == 1) {
564  if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
565  // The alloca has been processed, move on.
566  RemoveFromAllocasList(AllocaNum);
567  ++NumSingleStore;
568  continue;
569  }
570  }
571 
572  // If the alloca is only read and written in one basic block, just perform a
573  // linear sweep over the block to eliminate it.
574  if (Info.OnlyUsedInOneBlock &&
575  promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
576  // The alloca has been processed, move on.
577  RemoveFromAllocasList(AllocaNum);
578  continue;
579  }
580 
581  // If we haven't computed a numbering for the BB's in the function, do so
582  // now.
583  if (BBNumbers.empty()) {
584  unsigned ID = 0;
585  for (auto &BB : F)
586  BBNumbers[&BB] = ID++;
587  }
588 
589  // Remember the dbg.declare intrinsic describing this alloca, if any.
590  if (!Info.DbgDeclares.empty())
591  AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares;
592 
593  // Keep the reverse mapping of the 'Allocas' array for the rename pass.
594  AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
595 
596  // At this point, we're committed to promoting the alloca using IDF's, and
597  // the standard SSA construction algorithm. Determine which blocks need PHI
598  // nodes and see if we can optimize out some work by avoiding insertion of
599  // dead phi nodes.
600 
601 
602  // Unique the set of defining blocks for efficient lookup.
604  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
605 
606  // Determine which blocks the value is live in. These are blocks which lead
607  // to uses.
608  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
609  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
610 
611  // At this point, we're committed to promoting the alloca using IDF's, and
612  // the standard SSA construction algorithm. Determine which blocks need phi
613  // nodes and see if we can optimize out some work by avoiding insertion of
614  // dead phi nodes.
615  IDF.setLiveInBlocks(LiveInBlocks);
616  IDF.setDefiningBlocks(DefBlocks);
618  IDF.calculate(PHIBlocks);
619  if (PHIBlocks.size() > 1)
620  std::sort(PHIBlocks.begin(), PHIBlocks.end(),
621  [this](BasicBlock *A, BasicBlock *B) {
622  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
623  });
624 
625  unsigned CurrentVersion = 0;
626  for (BasicBlock *BB : PHIBlocks)
627  QueuePhiNode(BB, AllocaNum, CurrentVersion);
628  }
629 
630  if (Allocas.empty())
631  return; // All of the allocas must have been trivial!
632 
633  LBI.clear();
634 
635  // Set the incoming values for the basic block to be null values for all of
636  // the alloca's. We do this in case there is a load of a value that has not
637  // been stored yet. In this case, it will get this null value.
638  //
639  RenamePassData::ValVector Values(Allocas.size());
640  for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
641  Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
642 
643  // Walks all basic blocks in the function performing the SSA rename algorithm
644  // and inserting the phi nodes we marked as necessary
645  //
646  std::vector<RenamePassData> RenamePassWorkList;
647  RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values));
648  do {
649  RenamePassData RPD = std::move(RenamePassWorkList.back());
650  RenamePassWorkList.pop_back();
651  // RenamePass may add new worklist entries.
652  RenamePass(RPD.BB, RPD.Pred, RPD.Values, RenamePassWorkList);
653  } while (!RenamePassWorkList.empty());
654 
655  // The renamer uses the Visited set to avoid infinite loops. Clear it now.
656  Visited.clear();
657 
658  // Remove the allocas themselves from the function.
659  for (Instruction *A : Allocas) {
660  // If there are any uses of the alloca instructions left, they must be in
661  // unreachable basic blocks that were not processed by walking the dominator
662  // tree. Just delete the users now.
663  if (!A->use_empty())
664  A->replaceAllUsesWith(UndefValue::get(A->getType()));
665  A->eraseFromParent();
666  }
667 
668  // Remove alloca's dbg.declare instrinsics from the function.
669  for (auto &Declares : AllocaDbgDeclares)
670  for (auto *DII : Declares)
671  DII->eraseFromParent();
672 
673  // Loop over all of the PHI nodes and see if there are any that we can get
674  // rid of because they merge all of the same incoming values. This can
675  // happen due to undef values coming into the PHI nodes. This process is
676  // iterative, because eliminating one PHI node can cause others to be removed.
677  bool EliminatedAPHI = true;
678  while (EliminatedAPHI) {
679  EliminatedAPHI = false;
680 
681  // Iterating over NewPhiNodes is deterministic, so it is safe to try to
682  // simplify and RAUW them as we go. If it was not, we could add uses to
683  // the values we replace with in a non-deterministic order, thus creating
684  // non-deterministic def->use chains.
685  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
686  I = NewPhiNodes.begin(),
687  E = NewPhiNodes.end();
688  I != E;) {
689  PHINode *PN = I->second;
690 
691  // If this PHI node merges one value and/or undefs, get the value.
692  if (Value *V = SimplifyInstruction(PN, SQ)) {
693  PN->replaceAllUsesWith(V);
694  PN->eraseFromParent();
695  NewPhiNodes.erase(I++);
696  EliminatedAPHI = true;
697  continue;
698  }
699  ++I;
700  }
701  }
702 
703  // At this point, the renamer has added entries to PHI nodes for all reachable
704  // code. Unfortunately, there may be unreachable blocks which the renamer
705  // hasn't traversed. If this is the case, the PHI nodes may not
706  // have incoming values for all predecessors. Loop over all PHI nodes we have
707  // created, inserting undef values if they are missing any incoming values.
708  //
709  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
710  I = NewPhiNodes.begin(),
711  E = NewPhiNodes.end();
712  I != E; ++I) {
713  // We want to do this once per basic block. As such, only process a block
714  // when we find the PHI that is the first entry in the block.
715  PHINode *SomePHI = I->second;
716  BasicBlock *BB = SomePHI->getParent();
717  if (&BB->front() != SomePHI)
718  continue;
719 
720  // Only do work here if there the PHI nodes are missing incoming values. We
721  // know that all PHI nodes that were inserted in a block will have the same
722  // number of incoming values, so we can just check any of them.
723  if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
724  continue;
725 
726  // Get the preds for BB.
728 
729  // Ok, now we know that all of the PHI nodes are missing entries for some
730  // basic blocks. Start by sorting the incoming predecessors for efficient
731  // access.
732  std::sort(Preds.begin(), Preds.end());
733 
734  // Now we loop through all BB's which have entries in SomePHI and remove
735  // them from the Preds list.
736  for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
737  // Do a log(n) search of the Preds list for the entry we want.
738  SmallVectorImpl<BasicBlock *>::iterator EntIt = std::lower_bound(
739  Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i));
740  assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
741  "PHI node has entry for a block which is not a predecessor!");
742 
743  // Remove the entry
744  Preds.erase(EntIt);
745  }
746 
747  // At this point, the blocks left in the preds list must have dummy
748  // entries inserted into every PHI nodes for the block. Update all the phi
749  // nodes in this block that we are inserting (there could be phis before
750  // mem2reg runs).
751  unsigned NumBadPreds = SomePHI->getNumIncomingValues();
752  BasicBlock::iterator BBI = BB->begin();
753  while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
754  SomePHI->getNumIncomingValues() == NumBadPreds) {
755  Value *UndefVal = UndefValue::get(SomePHI->getType());
756  for (BasicBlock *Pred : Preds)
757  SomePHI->addIncoming(UndefVal, Pred);
758  }
759  }
760 
761  NewPhiNodes.clear();
762 }
763 
764 /// \brief Determine which blocks the value is live in.
765 ///
766 /// These are blocks which lead to uses. Knowing this allows us to avoid
767 /// inserting PHI nodes into blocks which don't lead to uses (thus, the
768 /// inserted phi nodes would be dead).
769 void PromoteMem2Reg::ComputeLiveInBlocks(
770  AllocaInst *AI, AllocaInfo &Info,
771  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
772  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
773 
774  // To determine liveness, we must iterate through the predecessors of blocks
775  // where the def is live. Blocks are added to the worklist if we need to
776  // check their predecessors. Start with all the using blocks.
777  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
778  Info.UsingBlocks.end());
779 
780  // If any of the using blocks is also a definition block, check to see if the
781  // definition occurs before or after the use. If it happens before the use,
782  // the value isn't really live-in.
783  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
784  BasicBlock *BB = LiveInBlockWorklist[i];
785  if (!DefBlocks.count(BB))
786  continue;
787 
788  // Okay, this is a block that both uses and defines the value. If the first
789  // reference to the alloca is a def (store), then we know it isn't live-in.
790  for (BasicBlock::iterator I = BB->begin();; ++I) {
791  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
792  if (SI->getOperand(1) != AI)
793  continue;
794 
795  // We found a store to the alloca before a load. The alloca is not
796  // actually live-in here.
797  LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
798  LiveInBlockWorklist.pop_back();
799  --i;
800  --e;
801  break;
802  }
803 
804  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
805  if (LI->getOperand(0) != AI)
806  continue;
807 
808  // Okay, we found a load before a store to the alloca. It is actually
809  // live into this block.
810  break;
811  }
812  }
813  }
814 
815  // Now that we have a set of blocks where the phi is live-in, recursively add
816  // their predecessors until we find the full region the value is live.
817  while (!LiveInBlockWorklist.empty()) {
818  BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
819 
820  // The block really is live in here, insert it into the set. If already in
821  // the set, then it has already been processed.
822  if (!LiveInBlocks.insert(BB).second)
823  continue;
824 
825  // Since the value is live into BB, it is either defined in a predecessor or
826  // live into it to. Add the preds to the worklist unless they are a
827  // defining block.
828  for (BasicBlock *P : predecessors(BB)) {
829  // The value is not live into a predecessor if it defines the value.
830  if (DefBlocks.count(P))
831  continue;
832 
833  // Otherwise it is, add to the worklist.
834  LiveInBlockWorklist.push_back(P);
835  }
836  }
837 }
838 
839 /// \brief Queue a phi-node to be added to a basic-block for a specific Alloca.
840 ///
841 /// Returns true if there wasn't already a phi-node for that variable
842 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
843  unsigned &Version) {
844  // Look up the basic-block in question.
845  PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
846 
847  // If the BB already has a phi node added for the i'th alloca then we're done!
848  if (PN)
849  return false;
850 
851  // Create a PhiNode using the dereferenced type... and add the phi-node to the
852  // BasicBlock.
853  PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
854  Allocas[AllocaNo]->getName() + "." + Twine(Version++),
855  &BB->front());
856  ++NumPHIInsert;
857  PhiToAllocaMap[PN] = AllocaNo;
858  return true;
859 }
860 
861 /// \brief Recursively traverse the CFG of the function, renaming loads and
862 /// stores to the allocas which we are promoting.
863 ///
864 /// IncomingVals indicates what value each Alloca contains on exit from the
865 /// predecessor block Pred.
866 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
867  RenamePassData::ValVector &IncomingVals,
868  std::vector<RenamePassData> &Worklist) {
869 NextIteration:
870  // If we are inserting any phi nodes into this BB, they will already be in the
871  // block.
872  if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
873  // If we have PHI nodes to update, compute the number of edges from Pred to
874  // BB.
875  if (PhiToAllocaMap.count(APN)) {
876  // We want to be able to distinguish between PHI nodes being inserted by
877  // this invocation of mem2reg from those phi nodes that already existed in
878  // the IR before mem2reg was run. We determine that APN is being inserted
879  // because it is missing incoming edges. All other PHI nodes being
880  // inserted by this pass of mem2reg will have the same number of incoming
881  // operands so far. Remember this count.
882  unsigned NewPHINumOperands = APN->getNumOperands();
883 
884  unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB);
885  assert(NumEdges && "Must be at least one edge from Pred to BB!");
886 
887  // Add entries for all the phis.
888  BasicBlock::iterator PNI = BB->begin();
889  do {
890  unsigned AllocaNo = PhiToAllocaMap[APN];
891 
892  // Add N incoming values to the PHI node.
893  for (unsigned i = 0; i != NumEdges; ++i)
894  APN->addIncoming(IncomingVals[AllocaNo], Pred);
895 
896  // The currently active variable for this block is now the PHI.
897  IncomingVals[AllocaNo] = APN;
898  for (DbgInfoIntrinsic *DII : AllocaDbgDeclares[AllocaNo])
899  ConvertDebugDeclareToDebugValue(DII, APN, DIB);
900 
901  // Get the next phi node.
902  ++PNI;
903  APN = dyn_cast<PHINode>(PNI);
904  if (!APN)
905  break;
906 
907  // Verify that it is missing entries. If not, it is not being inserted
908  // by this mem2reg invocation so we want to ignore it.
909  } while (APN->getNumOperands() == NewPHINumOperands);
910  }
911  }
912 
913  // Don't revisit blocks.
914  if (!Visited.insert(BB).second)
915  return;
916 
917  for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II);) {
918  Instruction *I = &*II++; // get the instruction, increment iterator
919 
920  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
921  AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
922  if (!Src)
923  continue;
924 
925  DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
926  if (AI == AllocaLookup.end())
927  continue;
928 
929  Value *V = IncomingVals[AI->second];
930 
931  // If the load was marked as nonnull we don't want to lose
932  // that information when we erase this Load. So we preserve
933  // it with an assume.
934  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
935  !llvm::isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
936  addAssumeNonNull(AC, LI);
937 
938  // Anything using the load now uses the current value.
939  LI->replaceAllUsesWith(V);
940  BB->getInstList().erase(LI);
941  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
942  // Delete this instruction and mark the name as the current holder of the
943  // value
944  AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
945  if (!Dest)
946  continue;
947 
948  DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
949  if (ai == AllocaLookup.end())
950  continue;
951 
952  // what value were we writing?
953  IncomingVals[ai->second] = SI->getOperand(0);
954  // Record debuginfo for the store before removing it.
955  for (DbgInfoIntrinsic *DII : AllocaDbgDeclares[ai->second])
957  BB->getInstList().erase(SI);
958  }
959  }
960 
961  // 'Recurse' to our successors.
962  succ_iterator I = succ_begin(BB), E = succ_end(BB);
963  if (I == E)
964  return;
965 
966  // Keep track of the successors so we don't visit the same successor twice
967  SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
968 
969  // Handle the first successor without using the worklist.
970  VisitedSuccs.insert(*I);
971  Pred = BB;
972  BB = *I;
973  ++I;
974 
975  for (; I != E; ++I)
976  if (VisitedSuccs.insert(*I).second)
977  Worklist.emplace_back(*I, Pred, IncomingVals);
978 
979  goto NextIteration;
980 }
981 
983  AssumptionCache *AC) {
984  // If there is nothing to do, bail out...
985  if (Allocas.empty())
986  return;
987 
988  PromoteMem2Reg(Allocas, DT, AC).run();
989 }
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:69
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:158
iterator erase(iterator where)
Definition: ilist.h:280
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
iterator begin() const
Definition: ArrayRef.h:137
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
static void removeLifetimeIntrinsicUsers(AllocaInst *AI)
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
A cache of .assume calls within a function.
STATISTIC(NumFunctions, "Total number of functions")
static CallInst * Create(Value *Func, ArrayRef< Value *> Args, ArrayRef< OperandBundleDef > Bundles=None, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
An instruction for reading from memory.
Definition: Instructions.h:164
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:31
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:336
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
static StringRef getName(Value *V)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:103
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
An instruction for storing to memory.
Definition: Instructions.h:306
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(std::begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:878
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:428
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:975
Value * getOperand(unsigned i) const
Definition: User.h:154
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:106
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
#define P(N)
bool erase(const KeyT &Val)
Definition: DenseMap.h:268
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
Definition: Local.cpp:1102
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
const Instruction & front() const
Definition: BasicBlock.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:363
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
const Instruction & back() const
Definition: BasicBlock.h:266
This instruction compares its operands according to the predicate given to the constructor.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:374
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1320
const AMDGPUAS & AS
iterator erase(const_iterator CI)
Definition: SmallVector.h:449
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:317
void setLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is live on entry to the block...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:410
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Many allocas are only used within a single basic block.
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:239
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
This is the common base class for debug info intrinsics.
Definition: IntrinsicInst.h:67
iterator end() const
Definition: ArrayRef.h:138
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:110
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:57
iterator_range< user_iterator > users()
Definition: Value.h:395
TinyPtrVector< DbgInfoIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1238
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:210
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
void registerAssumption(CallInst *CI)
Add an .assume intrinsic to this function&#39;s cache.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
Definition: Instruction.cpp:81
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:79
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:371
const BasicBlock & front() const
Definition: Function.h:595
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Rewrite as many loads as possible given a single store.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:545
LLVM Value Representation.
Definition: Value.h:73
void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
NodeT * getRoot() const
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to be non-zero when defined.
void sort(Policy policy, RandomAccessIterator Start, RandomAccessIterator End, const Comparator &Comp=Comparator())
Definition: Parallel.h:199
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
const uint64_t Version
Definition: InstrProf.h:865
bool use_empty() const
Definition: Value.h:322
Function object to check whether the first component of a std::pair compares less than the first comp...
Definition: STLExtras.h:658
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:60
user_iterator user_end()
Definition: Value.h:379