LLVM  8.0.0svn
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file promotes memory references to be register references. It promotes
11 // alloca instructions which only have loads and stores as uses. An alloca is
12 // transformed by using iterated dominator frontiers to place PHI nodes, then
13 // traversing the function in depth-first order to rewrite loads and stores as
14 // appropriate.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/TinyPtrVector.h"
25 #include "llvm/ADT/Twine.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DIBuilder.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/User.h"
48 #include "llvm/Support/Casting.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <iterator>
53 #include <utility>
54 #include <vector>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "mem2reg"
59 
60 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
61 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
62 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
63 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
64 
66  // FIXME: If the memory unit is of pointer or integer type, we can permit
67  // assignments to subsections of the memory unit.
68  unsigned AS = AI->getType()->getAddressSpace();
69 
70  // Only allow direct and non-volatile loads and stores...
71  for (const User *U : AI->users()) {
72  if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
73  // Note that atomic loads can be transformed; atomic semantics do
74  // not have any meaning for a local alloca.
75  if (LI->isVolatile())
76  return false;
77  } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
78  if (SI->getOperand(0) == AI)
79  return false; // Don't allow a store OF the AI, only INTO the AI.
80  // Note that atomic stores can be transformed; atomic semantics do
81  // not have any meaning for a local alloca.
82  if (SI->isVolatile())
83  return false;
84  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
85  if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
86  II->getIntrinsicID() != Intrinsic::lifetime_end)
87  return false;
88  } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
89  if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
90  return false;
91  if (!onlyUsedByLifetimeMarkers(BCI))
92  return false;
93  } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
94  if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
95  return false;
96  if (!GEPI->hasAllZeroIndices())
97  return false;
98  if (!onlyUsedByLifetimeMarkers(GEPI))
99  return false;
100  } else {
101  return false;
102  }
103  }
104 
105  return true;
106 }
107 
108 namespace {
109 
110 struct AllocaInfo {
111  SmallVector<BasicBlock *, 32> DefiningBlocks;
112  SmallVector<BasicBlock *, 32> UsingBlocks;
113 
114  StoreInst *OnlyStore;
115  BasicBlock *OnlyBlock;
116  bool OnlyUsedInOneBlock;
117 
118  Value *AllocaPointerVal;
120 
121  void clear() {
122  DefiningBlocks.clear();
123  UsingBlocks.clear();
124  OnlyStore = nullptr;
125  OnlyBlock = nullptr;
126  OnlyUsedInOneBlock = true;
127  AllocaPointerVal = nullptr;
128  DbgDeclares.clear();
129  }
130 
131  /// Scan the uses of the specified alloca, filling in the AllocaInfo used
132  /// by the rest of the pass to reason about the uses of this alloca.
133  void AnalyzeAlloca(AllocaInst *AI) {
134  clear();
135 
136  // As we scan the uses of the alloca instruction, keep track of stores,
137  // and decide whether all of the loads and stores to the alloca are within
138  // the same basic block.
139  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
140  Instruction *User = cast<Instruction>(*UI++);
141 
142  if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
143  // Remember the basic blocks which define new values for the alloca
144  DefiningBlocks.push_back(SI->getParent());
145  AllocaPointerVal = SI->getOperand(0);
146  OnlyStore = SI;
147  } else {
148  LoadInst *LI = cast<LoadInst>(User);
149  // Otherwise it must be a load instruction, keep track of variable
150  // reads.
151  UsingBlocks.push_back(LI->getParent());
152  AllocaPointerVal = LI;
153  }
154 
155  if (OnlyUsedInOneBlock) {
156  if (!OnlyBlock)
157  OnlyBlock = User->getParent();
158  else if (OnlyBlock != User->getParent())
159  OnlyUsedInOneBlock = false;
160  }
161  }
162 
163  DbgDeclares = FindDbgAddrUses(AI);
164  }
165 };
166 
167 /// Data package used by RenamePass().
168 struct RenamePassData {
169  using ValVector = std::vector<Value *>;
170  using LocationVector = std::vector<DebugLoc>;
171 
172  RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L)
173  : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {}
174 
175  BasicBlock *BB;
176  BasicBlock *Pred;
177  ValVector Values;
178  LocationVector Locations;
179 };
180 
181 /// This assigns and keeps a per-bb relative ordering of load/store
182 /// instructions in the block that directly load or store an alloca.
183 ///
184 /// This functionality is important because it avoids scanning large basic
185 /// blocks multiple times when promoting many allocas in the same block.
186 class LargeBlockInfo {
187  /// For each instruction that we track, keep the index of the
188  /// instruction.
189  ///
190  /// The index starts out as the number of the instruction from the start of
191  /// the block.
193 
194 public:
195 
196  /// This code only looks at accesses to allocas.
197  static bool isInterestingInstruction(const Instruction *I) {
198  return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
199  (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
200  }
201 
202  /// Get or calculate the index of the specified instruction.
203  unsigned getInstructionIndex(const Instruction *I) {
204  assert(isInterestingInstruction(I) &&
205  "Not a load/store to/from an alloca?");
206 
207  // If we already have this instruction number, return it.
209  if (It != InstNumbers.end())
210  return It->second;
211 
212  // Scan the whole block to get the instruction. This accumulates
213  // information for every interesting instruction in the block, in order to
214  // avoid gratuitus rescans.
215  const BasicBlock *BB = I->getParent();
216  unsigned InstNo = 0;
217  for (const Instruction &BBI : *BB)
218  if (isInterestingInstruction(&BBI))
219  InstNumbers[&BBI] = InstNo++;
220  It = InstNumbers.find(I);
221 
222  assert(It != InstNumbers.end() && "Didn't insert instruction?");
223  return It->second;
224  }
225 
226  void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
227 
228  void clear() { InstNumbers.clear(); }
229 };
230 
231 struct PromoteMem2Reg {
232  /// The alloca instructions being promoted.
233  std::vector<AllocaInst *> Allocas;
234 
235  DominatorTree &DT;
236  DIBuilder DIB;
237 
238  /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
239  AssumptionCache *AC;
240 
241  const SimplifyQuery SQ;
242 
243  /// Reverse mapping of Allocas.
245 
246  /// The PhiNodes we're adding.
247  ///
248  /// That map is used to simplify some Phi nodes as we iterate over it, so
249  /// it should have deterministic iterators. We could use a MapVector, but
250  /// since we already maintain a map from BasicBlock* to a stable numbering
251  /// (BBNumbers), the DenseMap is more efficient (also supports removal).
253 
254  /// For each PHI node, keep track of which entry in Allocas it corresponds
255  /// to.
256  DenseMap<PHINode *, unsigned> PhiToAllocaMap;
257 
258  /// If we are updating an AliasSetTracker, then for each alloca that is of
259  /// pointer type, we keep track of what to copyValue to the inserted PHI
260  /// nodes here.
261  std::vector<Value *> PointerAllocaValues;
262 
263  /// For each alloca, we keep track of the dbg.declare intrinsic that
264  /// describes it, if any, so that we can convert it to a dbg.value
265  /// intrinsic if the alloca gets promoted.
267 
268  /// The set of basic blocks the renamer has already visited.
270 
271  /// Contains a stable numbering of basic blocks to avoid non-determinstic
272  /// behavior.
274 
275  /// Lazily compute the number of predecessors a block has.
277 
278 public:
279  PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
280  AssumptionCache *AC)
281  : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
282  DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
283  AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
284  nullptr, &DT, AC) {}
285 
286  void run();
287 
288 private:
289  void RemoveFromAllocasList(unsigned &AllocaIdx) {
290  Allocas[AllocaIdx] = Allocas.back();
291  Allocas.pop_back();
292  --AllocaIdx;
293  }
294 
295  unsigned getNumPreds(const BasicBlock *BB) {
296  unsigned &NP = BBNumPreds[BB];
297  if (NP == 0)
298  NP = pred_size(BB) + 1;
299  return NP - 1;
300  }
301 
302  void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
303  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
304  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
305  void RenamePass(BasicBlock *BB, BasicBlock *Pred,
306  RenamePassData::ValVector &IncVals,
307  RenamePassData::LocationVector &IncLocs,
308  std::vector<RenamePassData> &Worklist);
309  bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
310 };
311 
312 } // end anonymous namespace
313 
314 /// Given a LoadInst LI this adds assume(LI != null) after it.
316  Function *AssumeIntrinsic =
317  Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
318  ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
320  LoadNotNull->insertAfter(LI);
321  CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
322  CI->insertAfter(LoadNotNull);
323  AC->registerAssumption(CI);
324 }
325 
327  // Knowing that this alloca is promotable, we know that it's safe to kill all
328  // instructions except for load and store.
329 
330  for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
331  Instruction *I = cast<Instruction>(*UI);
332  ++UI;
333  if (isa<LoadInst>(I) || isa<StoreInst>(I))
334  continue;
335 
336  if (!I->getType()->isVoidTy()) {
337  // The only users of this bitcast/GEP instruction are lifetime intrinsics.
338  // Follow the use/def chain to erase them now instead of leaving it for
339  // dead code elimination later.
340  for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
341  Instruction *Inst = cast<Instruction>(*UUI);
342  ++UUI;
343  Inst->eraseFromParent();
344  }
345  }
346  I->eraseFromParent();
347  }
348 }
349 
350 /// Rewrite as many loads as possible given a single store.
351 ///
352 /// When there is only a single store, we can use the domtree to trivially
353 /// replace all of the dominated loads with the stored value. Do so, and return
354 /// true if this has successfully promoted the alloca entirely. If this returns
355 /// false there were some loads which were not dominated by the single store
356 /// and thus must be phi-ed with undef. We fall back to the standard alloca
357 /// promotion algorithm in that case.
358 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
359  LargeBlockInfo &LBI, const DataLayout &DL,
360  DominatorTree &DT, AssumptionCache *AC) {
361  StoreInst *OnlyStore = Info.OnlyStore;
362  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
363  BasicBlock *StoreBB = OnlyStore->getParent();
364  int StoreIndex = -1;
365 
366  // Clear out UsingBlocks. We will reconstruct it here if needed.
367  Info.UsingBlocks.clear();
368 
369  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
370  Instruction *UserInst = cast<Instruction>(*UI++);
371  if (!isa<LoadInst>(UserInst)) {
372  assert(UserInst == OnlyStore && "Should only have load/stores");
373  continue;
374  }
375  LoadInst *LI = cast<LoadInst>(UserInst);
376 
377  // Okay, if we have a load from the alloca, we want to replace it with the
378  // only value stored to the alloca. We can do this if the value is
379  // dominated by the store. If not, we use the rest of the mem2reg machinery
380  // to insert the phi nodes as needed.
381  if (!StoringGlobalVal) { // Non-instructions are always dominated.
382  if (LI->getParent() == StoreBB) {
383  // If we have a use that is in the same block as the store, compare the
384  // indices of the two instructions to see which one came first. If the
385  // load came before the store, we can't handle it.
386  if (StoreIndex == -1)
387  StoreIndex = LBI.getInstructionIndex(OnlyStore);
388 
389  if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
390  // Can't handle this load, bail out.
391  Info.UsingBlocks.push_back(StoreBB);
392  continue;
393  }
394  } else if (LI->getParent() != StoreBB &&
395  !DT.dominates(StoreBB, LI->getParent())) {
396  // If the load and store are in different blocks, use BB dominance to
397  // check their relationships. If the store doesn't dom the use, bail
398  // out.
399  Info.UsingBlocks.push_back(LI->getParent());
400  continue;
401  }
402  }
403 
404  // Otherwise, we *can* safely rewrite this load.
405  Value *ReplVal = OnlyStore->getOperand(0);
406  // If the replacement value is the load, this must occur in unreachable
407  // code.
408  if (ReplVal == LI)
409  ReplVal = UndefValue::get(LI->getType());
410 
411  // If the load was marked as nonnull we don't want to lose
412  // that information when we erase this Load. So we preserve
413  // it with an assume.
414  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
415  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
416  addAssumeNonNull(AC, LI);
417 
418  LI->replaceAllUsesWith(ReplVal);
419  LI->eraseFromParent();
420  LBI.deleteValue(LI);
421  }
422 
423  // Finally, after the scan, check to see if the store is all that is left.
424  if (!Info.UsingBlocks.empty())
425  return false; // If not, we'll have to fall back for the remainder.
426 
427  // Record debuginfo for the store and remove the declaration's
428  // debuginfo.
429  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
430  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
431  ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
432  DII->eraseFromParent();
433  LBI.deleteValue(DII);
434  }
435  // Remove the (now dead) store and alloca.
436  Info.OnlyStore->eraseFromParent();
437  LBI.deleteValue(Info.OnlyStore);
438 
439  AI->eraseFromParent();
440  LBI.deleteValue(AI);
441  return true;
442 }
443 
444 /// Many allocas are only used within a single basic block. If this is the
445 /// case, avoid traversing the CFG and inserting a lot of potentially useless
446 /// PHI nodes by just performing a single linear pass over the basic block
447 /// using the Alloca.
448 ///
449 /// If we cannot promote this alloca (because it is read before it is written),
450 /// return false. This is necessary in cases where, due to control flow, the
451 /// alloca is undefined only on some control flow paths. e.g. code like
452 /// this is correct in LLVM IR:
453 /// // A is an alloca with no stores so far
454 /// for (...) {
455 /// int t = *A;
456 /// if (!first_iteration)
457 /// use(t);
458 /// *A = 42;
459 /// }
460 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
461  LargeBlockInfo &LBI,
462  const DataLayout &DL,
463  DominatorTree &DT,
464  AssumptionCache *AC) {
465  // The trickiest case to handle is when we have large blocks. Because of this,
466  // this code is optimized assuming that large blocks happen. This does not
467  // significantly pessimize the small block case. This uses LargeBlockInfo to
468  // make it efficient to get the index of various operations in the block.
469 
470  // Walk the use-def list of the alloca, getting the locations of all stores.
471  using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
472  StoresByIndexTy StoresByIndex;
473 
474  for (User *U : AI->users())
475  if (StoreInst *SI = dyn_cast<StoreInst>(U))
476  StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
477 
478  // Sort the stores by their index, making it efficient to do a lookup with a
479  // binary search.
480  llvm::sort(StoresByIndex, less_first());
481 
482  // Walk all of the loads from this alloca, replacing them with the nearest
483  // store above them, if any.
484  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
485  LoadInst *LI = dyn_cast<LoadInst>(*UI++);
486  if (!LI)
487  continue;
488 
489  unsigned LoadIdx = LBI.getInstructionIndex(LI);
490 
491  // Find the nearest store that has a lower index than this load.
492  StoresByIndexTy::iterator I =
493  std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
494  std::make_pair(LoadIdx,
495  static_cast<StoreInst *>(nullptr)),
496  less_first());
497  if (I == StoresByIndex.begin()) {
498  if (StoresByIndex.empty())
499  // If there are no stores, the load takes the undef value.
501  else
502  // There is no store before this load, bail out (load may be affected
503  // by the following stores - see main comment).
504  return false;
505  } else {
506  // Otherwise, there was a store before this load, the load takes its value.
507  // Note, if the load was marked as nonnull we don't want to lose that
508  // information when we erase it. So we preserve it with an assume.
509  Value *ReplVal = std::prev(I)->second->getOperand(0);
510  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
511  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
512  addAssumeNonNull(AC, LI);
513 
514  // If the replacement value is the load, this must occur in unreachable
515  // code.
516  if (ReplVal == LI)
517  ReplVal = UndefValue::get(LI->getType());
518 
519  LI->replaceAllUsesWith(ReplVal);
520  }
521 
522  LI->eraseFromParent();
523  LBI.deleteValue(LI);
524  }
525 
526  // Remove the (now dead) stores and alloca.
527  while (!AI->use_empty()) {
528  StoreInst *SI = cast<StoreInst>(AI->user_back());
529  // Record debuginfo for the store before removing it.
530  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
531  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
532  ConvertDebugDeclareToDebugValue(DII, SI, DIB);
533  }
534  SI->eraseFromParent();
535  LBI.deleteValue(SI);
536  }
537 
538  AI->eraseFromParent();
539  LBI.deleteValue(AI);
540 
541  // The alloca's debuginfo can be removed as well.
542  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
543  DII->eraseFromParent();
544  LBI.deleteValue(DII);
545  }
546 
547  ++NumLocalPromoted;
548  return true;
549 }
550 
551 void PromoteMem2Reg::run() {
552  Function &F = *DT.getRoot()->getParent();
553 
554  AllocaDbgDeclares.resize(Allocas.size());
555 
556  AllocaInfo Info;
557  LargeBlockInfo LBI;
558  ForwardIDFCalculator IDF(DT);
559 
560  for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
561  AllocaInst *AI = Allocas[AllocaNum];
562 
563  assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
564  assert(AI->getParent()->getParent() == &F &&
565  "All allocas should be in the same function, which is same as DF!");
566 
568 
569  if (AI->use_empty()) {
570  // If there are no uses of the alloca, just delete it now.
571  AI->eraseFromParent();
572 
573  // Remove the alloca from the Allocas list, since it has been processed
574  RemoveFromAllocasList(AllocaNum);
575  ++NumDeadAlloca;
576  continue;
577  }
578 
579  // Calculate the set of read and write-locations for each alloca. This is
580  // analogous to finding the 'uses' and 'definitions' of each variable.
581  Info.AnalyzeAlloca(AI);
582 
583  // If there is only a single store to this value, replace any loads of
584  // it that are directly dominated by the definition with the value stored.
585  if (Info.DefiningBlocks.size() == 1) {
586  if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
587  // The alloca has been processed, move on.
588  RemoveFromAllocasList(AllocaNum);
589  ++NumSingleStore;
590  continue;
591  }
592  }
593 
594  // If the alloca is only read and written in one basic block, just perform a
595  // linear sweep over the block to eliminate it.
596  if (Info.OnlyUsedInOneBlock &&
597  promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
598  // The alloca has been processed, move on.
599  RemoveFromAllocasList(AllocaNum);
600  continue;
601  }
602 
603  // If we haven't computed a numbering for the BB's in the function, do so
604  // now.
605  if (BBNumbers.empty()) {
606  unsigned ID = 0;
607  for (auto &BB : F)
608  BBNumbers[&BB] = ID++;
609  }
610 
611  // Remember the dbg.declare intrinsic describing this alloca, if any.
612  if (!Info.DbgDeclares.empty())
613  AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares;
614 
615  // Keep the reverse mapping of the 'Allocas' array for the rename pass.
616  AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
617 
618  // At this point, we're committed to promoting the alloca using IDF's, and
619  // the standard SSA construction algorithm. Determine which blocks need PHI
620  // nodes and see if we can optimize out some work by avoiding insertion of
621  // dead phi nodes.
622 
623  // Unique the set of defining blocks for efficient lookup.
625  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
626 
627  // Determine which blocks the value is live in. These are blocks which lead
628  // to uses.
629  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
630  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
631 
632  // At this point, we're committed to promoting the alloca using IDF's, and
633  // the standard SSA construction algorithm. Determine which blocks need phi
634  // nodes and see if we can optimize out some work by avoiding insertion of
635  // dead phi nodes.
636  IDF.setLiveInBlocks(LiveInBlocks);
637  IDF.setDefiningBlocks(DefBlocks);
639  IDF.calculate(PHIBlocks);
640  if (PHIBlocks.size() > 1)
641  llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
642  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
643  });
644 
645  unsigned CurrentVersion = 0;
646  for (BasicBlock *BB : PHIBlocks)
647  QueuePhiNode(BB, AllocaNum, CurrentVersion);
648  }
649 
650  if (Allocas.empty())
651  return; // All of the allocas must have been trivial!
652 
653  LBI.clear();
654 
655  // Set the incoming values for the basic block to be null values for all of
656  // the alloca's. We do this in case there is a load of a value that has not
657  // been stored yet. In this case, it will get this null value.
658  RenamePassData::ValVector Values(Allocas.size());
659  for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
660  Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
661 
662  // When handling debug info, treat all incoming values as if they have unknown
663  // locations until proven otherwise.
664  RenamePassData::LocationVector Locations(Allocas.size());
665 
666  // Walks all basic blocks in the function performing the SSA rename algorithm
667  // and inserting the phi nodes we marked as necessary
668  std::vector<RenamePassData> RenamePassWorkList;
669  RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values),
670  std::move(Locations));
671  do {
672  RenamePassData RPD = std::move(RenamePassWorkList.back());
673  RenamePassWorkList.pop_back();
674  // RenamePass may add new worklist entries.
675  RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList);
676  } while (!RenamePassWorkList.empty());
677 
678  // The renamer uses the Visited set to avoid infinite loops. Clear it now.
679  Visited.clear();
680 
681  // Remove the allocas themselves from the function.
682  for (Instruction *A : Allocas) {
683  // If there are any uses of the alloca instructions left, they must be in
684  // unreachable basic blocks that were not processed by walking the dominator
685  // tree. Just delete the users now.
686  if (!A->use_empty())
687  A->replaceAllUsesWith(UndefValue::get(A->getType()));
688  A->eraseFromParent();
689  }
690 
691  // Remove alloca's dbg.declare instrinsics from the function.
692  for (auto &Declares : AllocaDbgDeclares)
693  for (auto *DII : Declares)
694  DII->eraseFromParent();
695 
696  // Loop over all of the PHI nodes and see if there are any that we can get
697  // rid of because they merge all of the same incoming values. This can
698  // happen due to undef values coming into the PHI nodes. This process is
699  // iterative, because eliminating one PHI node can cause others to be removed.
700  bool EliminatedAPHI = true;
701  while (EliminatedAPHI) {
702  EliminatedAPHI = false;
703 
704  // Iterating over NewPhiNodes is deterministic, so it is safe to try to
705  // simplify and RAUW them as we go. If it was not, we could add uses to
706  // the values we replace with in a non-deterministic order, thus creating
707  // non-deterministic def->use chains.
708  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
709  I = NewPhiNodes.begin(),
710  E = NewPhiNodes.end();
711  I != E;) {
712  PHINode *PN = I->second;
713 
714  // If this PHI node merges one value and/or undefs, get the value.
715  if (Value *V = SimplifyInstruction(PN, SQ)) {
716  PN->replaceAllUsesWith(V);
717  PN->eraseFromParent();
718  NewPhiNodes.erase(I++);
719  EliminatedAPHI = true;
720  continue;
721  }
722  ++I;
723  }
724  }
725 
726  // At this point, the renamer has added entries to PHI nodes for all reachable
727  // code. Unfortunately, there may be unreachable blocks which the renamer
728  // hasn't traversed. If this is the case, the PHI nodes may not
729  // have incoming values for all predecessors. Loop over all PHI nodes we have
730  // created, inserting undef values if they are missing any incoming values.
731  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
732  I = NewPhiNodes.begin(),
733  E = NewPhiNodes.end();
734  I != E; ++I) {
735  // We want to do this once per basic block. As such, only process a block
736  // when we find the PHI that is the first entry in the block.
737  PHINode *SomePHI = I->second;
738  BasicBlock *BB = SomePHI->getParent();
739  if (&BB->front() != SomePHI)
740  continue;
741 
742  // Only do work here if there the PHI nodes are missing incoming values. We
743  // know that all PHI nodes that were inserted in a block will have the same
744  // number of incoming values, so we can just check any of them.
745  if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
746  continue;
747 
748  // Get the preds for BB.
750 
751  // Ok, now we know that all of the PHI nodes are missing entries for some
752  // basic blocks. Start by sorting the incoming predecessors for efficient
753  // access.
754  auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) {
755  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
756  };
757  llvm::sort(Preds, CompareBBNumbers);
758 
759  // Now we loop through all BB's which have entries in SomePHI and remove
760  // them from the Preds list.
761  for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
762  // Do a log(n) search of the Preds list for the entry we want.
764  Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i),
765  CompareBBNumbers);
766  assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
767  "PHI node has entry for a block which is not a predecessor!");
768 
769  // Remove the entry
770  Preds.erase(EntIt);
771  }
772 
773  // At this point, the blocks left in the preds list must have dummy
774  // entries inserted into every PHI nodes for the block. Update all the phi
775  // nodes in this block that we are inserting (there could be phis before
776  // mem2reg runs).
777  unsigned NumBadPreds = SomePHI->getNumIncomingValues();
778  BasicBlock::iterator BBI = BB->begin();
779  while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
780  SomePHI->getNumIncomingValues() == NumBadPreds) {
781  Value *UndefVal = UndefValue::get(SomePHI->getType());
782  for (BasicBlock *Pred : Preds)
783  SomePHI->addIncoming(UndefVal, Pred);
784  }
785  }
786 
787  NewPhiNodes.clear();
788 }
789 
790 /// Determine which blocks the value is live in.
791 ///
792 /// These are blocks which lead to uses. Knowing this allows us to avoid
793 /// inserting PHI nodes into blocks which don't lead to uses (thus, the
794 /// inserted phi nodes would be dead).
796  AllocaInst *AI, AllocaInfo &Info,
797  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
798  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
799  // To determine liveness, we must iterate through the predecessors of blocks
800  // where the def is live. Blocks are added to the worklist if we need to
801  // check their predecessors. Start with all the using blocks.
802  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
803  Info.UsingBlocks.end());
804 
805  // If any of the using blocks is also a definition block, check to see if the
806  // definition occurs before or after the use. If it happens before the use,
807  // the value isn't really live-in.
808  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
809  BasicBlock *BB = LiveInBlockWorklist[i];
810  if (!DefBlocks.count(BB))
811  continue;
812 
813  // Okay, this is a block that both uses and defines the value. If the first
814  // reference to the alloca is a def (store), then we know it isn't live-in.
815  for (BasicBlock::iterator I = BB->begin();; ++I) {
816  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
817  if (SI->getOperand(1) != AI)
818  continue;
819 
820  // We found a store to the alloca before a load. The alloca is not
821  // actually live-in here.
822  LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
823  LiveInBlockWorklist.pop_back();
824  --i;
825  --e;
826  break;
827  }
828 
829  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
830  if (LI->getOperand(0) != AI)
831  continue;
832 
833  // Okay, we found a load before a store to the alloca. It is actually
834  // live into this block.
835  break;
836  }
837  }
838  }
839 
840  // Now that we have a set of blocks where the phi is live-in, recursively add
841  // their predecessors until we find the full region the value is live.
842  while (!LiveInBlockWorklist.empty()) {
843  BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
844 
845  // The block really is live in here, insert it into the set. If already in
846  // the set, then it has already been processed.
847  if (!LiveInBlocks.insert(BB).second)
848  continue;
849 
850  // Since the value is live into BB, it is either defined in a predecessor or
851  // live into it to. Add the preds to the worklist unless they are a
852  // defining block.
853  for (BasicBlock *P : predecessors(BB)) {
854  // The value is not live into a predecessor if it defines the value.
855  if (DefBlocks.count(P))
856  continue;
857 
858  // Otherwise it is, add to the worklist.
859  LiveInBlockWorklist.push_back(P);
860  }
861  }
862 }
863 
864 /// Queue a phi-node to be added to a basic-block for a specific Alloca.
865 ///
866 /// Returns true if there wasn't already a phi-node for that variable
867 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
868  unsigned &Version) {
869  // Look up the basic-block in question.
870  PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
871 
872  // If the BB already has a phi node added for the i'th alloca then we're done!
873  if (PN)
874  return false;
875 
876  // Create a PhiNode using the dereferenced type... and add the phi-node to the
877  // BasicBlock.
878  PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
879  Allocas[AllocaNo]->getName() + "." + Twine(Version++),
880  &BB->front());
881  ++NumPHIInsert;
882  PhiToAllocaMap[PN] = AllocaNo;
883  return true;
884 }
885 
886 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
887 /// create a merged location incorporating \p DL, or to set \p DL directly.
889  bool ApplyMergedLoc) {
890  if (ApplyMergedLoc)
891  PN->applyMergedLocation(PN->getDebugLoc(), DL);
892  else
893  PN->setDebugLoc(DL);
894 }
895 
896 /// Recursively traverse the CFG of the function, renaming loads and
897 /// stores to the allocas which we are promoting.
898 ///
899 /// IncomingVals indicates what value each Alloca contains on exit from the
900 /// predecessor block Pred.
901 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
902  RenamePassData::ValVector &IncomingVals,
903  RenamePassData::LocationVector &IncomingLocs,
904  std::vector<RenamePassData> &Worklist) {
905 NextIteration:
906  // If we are inserting any phi nodes into this BB, they will already be in the
907  // block.
908  if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
909  // If we have PHI nodes to update, compute the number of edges from Pred to
910  // BB.
911  if (PhiToAllocaMap.count(APN)) {
912  // We want to be able to distinguish between PHI nodes being inserted by
913  // this invocation of mem2reg from those phi nodes that already existed in
914  // the IR before mem2reg was run. We determine that APN is being inserted
915  // because it is missing incoming edges. All other PHI nodes being
916  // inserted by this pass of mem2reg will have the same number of incoming
917  // operands so far. Remember this count.
918  unsigned NewPHINumOperands = APN->getNumOperands();
919 
920  unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB);
921  assert(NumEdges && "Must be at least one edge from Pred to BB!");
922 
923  // Add entries for all the phis.
924  BasicBlock::iterator PNI = BB->begin();
925  do {
926  unsigned AllocaNo = PhiToAllocaMap[APN];
927 
928  // Update the location of the phi node.
929  updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
930  APN->getNumIncomingValues() > 0);
931 
932  // Add N incoming values to the PHI node.
933  for (unsigned i = 0; i != NumEdges; ++i)
934  APN->addIncoming(IncomingVals[AllocaNo], Pred);
935 
936  // The currently active variable for this block is now the PHI.
937  IncomingVals[AllocaNo] = APN;
938  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[AllocaNo])
939  ConvertDebugDeclareToDebugValue(DII, APN, DIB);
940 
941  // Get the next phi node.
942  ++PNI;
943  APN = dyn_cast<PHINode>(PNI);
944  if (!APN)
945  break;
946 
947  // Verify that it is missing entries. If not, it is not being inserted
948  // by this mem2reg invocation so we want to ignore it.
949  } while (APN->getNumOperands() == NewPHINumOperands);
950  }
951  }
952 
953  // Don't revisit blocks.
954  if (!Visited.insert(BB).second)
955  return;
956 
957  for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
958  Instruction *I = &*II++; // get the instruction, increment iterator
959 
960  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
961  AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
962  if (!Src)
963  continue;
964 
965  DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
966  if (AI == AllocaLookup.end())
967  continue;
968 
969  Value *V = IncomingVals[AI->second];
970 
971  // If the load was marked as nonnull we don't want to lose
972  // that information when we erase this Load. So we preserve
973  // it with an assume.
974  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
975  !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
976  addAssumeNonNull(AC, LI);
977 
978  // Anything using the load now uses the current value.
979  LI->replaceAllUsesWith(V);
980  BB->getInstList().erase(LI);
981  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
982  // Delete this instruction and mark the name as the current holder of the
983  // value
984  AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
985  if (!Dest)
986  continue;
987 
988  DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
989  if (ai == AllocaLookup.end())
990  continue;
991 
992  // what value were we writing?
993  unsigned AllocaNo = ai->second;
994  IncomingVals[AllocaNo] = SI->getOperand(0);
995 
996  // Record debuginfo for the store before removing it.
997  IncomingLocs[AllocaNo] = SI->getDebugLoc();
998  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[ai->second])
1000  BB->getInstList().erase(SI);
1001  }
1002  }
1003 
1004  // 'Recurse' to our successors.
1005  succ_iterator I = succ_begin(BB), E = succ_end(BB);
1006  if (I == E)
1007  return;
1008 
1009  // Keep track of the successors so we don't visit the same successor twice
1010  SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
1011 
1012  // Handle the first successor without using the worklist.
1013  VisitedSuccs.insert(*I);
1014  Pred = BB;
1015  BB = *I;
1016  ++I;
1017 
1018  for (; I != E; ++I)
1019  if (VisitedSuccs.insert(*I).second)
1020  Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs);
1021 
1022  goto NextIteration;
1023 }
1024 
1026  AssumptionCache *AC) {
1027  // If there is nothing to do, bail out...
1028  if (Allocas.empty())
1029  return;
1030 
1031  PromoteMem2Reg(Allocas, DT, AC).run();
1032 }
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:68
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:158
iterator erase(iterator where)
Definition: ilist.h:267
This class represents lattice values for constants.
Definition: AllocatorList.h:24
iterator begin() const
Definition: ArrayRef.h:137
static void removeLifetimeIntrinsicUsers(AllocaInst *AI)
This class represents a function call, abstracting a target machine&#39;s calling convention.
A cache of @llvm.assume calls within a function.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
static CallInst * Create(Value *Func, ArrayRef< Value *> Args, ArrayRef< OperandBundleDef > Bundles=None, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
An instruction for reading from memory.
Definition: Instructions.h:168
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:31
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:265
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:269
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:365
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
static StringRef getName(Value *V)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:103
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
This is the common base class for debug info intrinsics for variables.
Definition: IntrinsicInst.h:88
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:221
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1276
An instruction for storing to memory.
Definition: Instructions.h:310
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:430
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:145
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1021
Value * getOperand(unsigned i) const
Definition: User.h:170
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1246
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:106
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:843
#define P(N)
bool erase(const KeyT &Val)
Definition: DenseMap.h:298
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:308
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &UsingBlocks, const SmallPtrSetImpl< BasicBlock *> &DefBlocks, SmallPtrSetImpl< BasicBlock *> &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
const Instruction & front() const
Definition: BasicBlock.h:281
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
const Instruction & back() const
Definition: BasicBlock.h:283
This instruction compares its operands according to the predicate given to the constructor.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1415
iterator erase(const_iterator CI)
Definition: SmallVector.h:445
size_t size() const
Definition: SmallVector.h:53
Compute iterated dominance frontiers using a linear time algorithm.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1110
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:334
void setLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is live on entry to the block...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
TinyPtrVector< DbgVariableIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1473
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Many allocas are only used within a single basic block.
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:249
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:64
iterator end() const
Definition: ArrayRef.h:138
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:689
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:125
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
iterator_range< user_iterator > users()
Definition: Value.h:400
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:212
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:133
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:311
void registerAssumption(CallInst *CI)
Add an @llvm.assume intrinsic to this function&#39;s cache.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
Definition: Instruction.cpp:80
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:122
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:109
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:376
const BasicBlock & front() const
Definition: Function.h:663
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Rewrite as many loads as possible given a single store.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
Definition: Local.cpp:1277
void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
NodeT * getRoot() const
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
const uint64_t Version
Definition: InstrProf.h:895
bool use_empty() const
Definition: Value.h:323
Function object to check whether the first component of a std::pair compares less than the first comp...
Definition: STLExtras.h:954
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:67
an instruction to allocate memory on the stack
Definition: Instructions.h:60
user_iterator user_end()
Definition: Value.h:384