LLVM  9.0.0svn
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file promotes memory references to be register references. It promotes
10 // alloca instructions which only have loads and stores as uses. An alloca is
11 // transformed by using iterated dominator frontiers to place PHI nodes, then
12 // traversing the function in depth-first order to rewrite loads and stores as
13 // appropriate.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/TinyPtrVector.h"
24 #include "llvm/ADT/Twine.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/Support/Casting.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <iterator>
52 #include <utility>
53 #include <vector>
54 
55 using namespace llvm;
56 
57 #define DEBUG_TYPE "mem2reg"
58 
59 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
60 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
61 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
62 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
63 
65  // FIXME: If the memory unit is of pointer or integer type, we can permit
66  // assignments to subsections of the memory unit.
67  unsigned AS = AI->getType()->getAddressSpace();
68 
69  // Only allow direct and non-volatile loads and stores...
70  for (const User *U : AI->users()) {
71  if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
72  // Note that atomic loads can be transformed; atomic semantics do
73  // not have any meaning for a local alloca.
74  if (LI->isVolatile())
75  return false;
76  } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
77  if (SI->getOperand(0) == AI)
78  return false; // Don't allow a store OF the AI, only INTO the AI.
79  // Note that atomic stores can be transformed; atomic semantics do
80  // not have any meaning for a local alloca.
81  if (SI->isVolatile())
82  return false;
83  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
84  if (!II->isLifetimeStartOrEnd())
85  return false;
86  } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
87  if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
88  return false;
89  if (!onlyUsedByLifetimeMarkers(BCI))
90  return false;
91  } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
92  if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
93  return false;
94  if (!GEPI->hasAllZeroIndices())
95  return false;
96  if (!onlyUsedByLifetimeMarkers(GEPI))
97  return false;
98  } else {
99  return false;
100  }
101  }
102 
103  return true;
104 }
105 
106 namespace {
107 
108 struct AllocaInfo {
109  SmallVector<BasicBlock *, 32> DefiningBlocks;
110  SmallVector<BasicBlock *, 32> UsingBlocks;
111 
112  StoreInst *OnlyStore;
113  BasicBlock *OnlyBlock;
114  bool OnlyUsedInOneBlock;
115 
116  Value *AllocaPointerVal;
118 
119  void clear() {
120  DefiningBlocks.clear();
121  UsingBlocks.clear();
122  OnlyStore = nullptr;
123  OnlyBlock = nullptr;
124  OnlyUsedInOneBlock = true;
125  AllocaPointerVal = nullptr;
126  DbgDeclares.clear();
127  }
128 
129  /// Scan the uses of the specified alloca, filling in the AllocaInfo used
130  /// by the rest of the pass to reason about the uses of this alloca.
131  void AnalyzeAlloca(AllocaInst *AI) {
132  clear();
133 
134  // As we scan the uses of the alloca instruction, keep track of stores,
135  // and decide whether all of the loads and stores to the alloca are within
136  // the same basic block.
137  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
138  Instruction *User = cast<Instruction>(*UI++);
139 
140  if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
141  // Remember the basic blocks which define new values for the alloca
142  DefiningBlocks.push_back(SI->getParent());
143  AllocaPointerVal = SI->getOperand(0);
144  OnlyStore = SI;
145  } else {
146  LoadInst *LI = cast<LoadInst>(User);
147  // Otherwise it must be a load instruction, keep track of variable
148  // reads.
149  UsingBlocks.push_back(LI->getParent());
150  AllocaPointerVal = LI;
151  }
152 
153  if (OnlyUsedInOneBlock) {
154  if (!OnlyBlock)
155  OnlyBlock = User->getParent();
156  else if (OnlyBlock != User->getParent())
157  OnlyUsedInOneBlock = false;
158  }
159  }
160 
161  DbgDeclares = FindDbgAddrUses(AI);
162  }
163 };
164 
165 /// Data package used by RenamePass().
166 struct RenamePassData {
167  using ValVector = std::vector<Value *>;
168  using LocationVector = std::vector<DebugLoc>;
169 
170  RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L)
171  : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {}
172 
173  BasicBlock *BB;
174  BasicBlock *Pred;
175  ValVector Values;
176  LocationVector Locations;
177 };
178 
179 /// This assigns and keeps a per-bb relative ordering of load/store
180 /// instructions in the block that directly load or store an alloca.
181 ///
182 /// This functionality is important because it avoids scanning large basic
183 /// blocks multiple times when promoting many allocas in the same block.
184 class LargeBlockInfo {
185  /// For each instruction that we track, keep the index of the
186  /// instruction.
187  ///
188  /// The index starts out as the number of the instruction from the start of
189  /// the block.
191 
192 public:
193 
194  /// This code only looks at accesses to allocas.
195  static bool isInterestingInstruction(const Instruction *I) {
196  return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
197  (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
198  }
199 
200  /// Get or calculate the index of the specified instruction.
201  unsigned getInstructionIndex(const Instruction *I) {
202  assert(isInterestingInstruction(I) &&
203  "Not a load/store to/from an alloca?");
204 
205  // If we already have this instruction number, return it.
207  if (It != InstNumbers.end())
208  return It->second;
209 
210  // Scan the whole block to get the instruction. This accumulates
211  // information for every interesting instruction in the block, in order to
212  // avoid gratuitus rescans.
213  const BasicBlock *BB = I->getParent();
214  unsigned InstNo = 0;
215  for (const Instruction &BBI : *BB)
216  if (isInterestingInstruction(&BBI))
217  InstNumbers[&BBI] = InstNo++;
218  It = InstNumbers.find(I);
219 
220  assert(It != InstNumbers.end() && "Didn't insert instruction?");
221  return It->second;
222  }
223 
224  void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
225 
226  void clear() { InstNumbers.clear(); }
227 };
228 
229 struct PromoteMem2Reg {
230  /// The alloca instructions being promoted.
231  std::vector<AllocaInst *> Allocas;
232 
233  DominatorTree &DT;
234  DIBuilder DIB;
235 
236  /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
237  AssumptionCache *AC;
238 
239  const SimplifyQuery SQ;
240 
241  /// Reverse mapping of Allocas.
243 
244  /// The PhiNodes we're adding.
245  ///
246  /// That map is used to simplify some Phi nodes as we iterate over it, so
247  /// it should have deterministic iterators. We could use a MapVector, but
248  /// since we already maintain a map from BasicBlock* to a stable numbering
249  /// (BBNumbers), the DenseMap is more efficient (also supports removal).
251 
252  /// For each PHI node, keep track of which entry in Allocas it corresponds
253  /// to.
254  DenseMap<PHINode *, unsigned> PhiToAllocaMap;
255 
256  /// If we are updating an AliasSetTracker, then for each alloca that is of
257  /// pointer type, we keep track of what to copyValue to the inserted PHI
258  /// nodes here.
259  std::vector<Value *> PointerAllocaValues;
260 
261  /// For each alloca, we keep track of the dbg.declare intrinsic that
262  /// describes it, if any, so that we can convert it to a dbg.value
263  /// intrinsic if the alloca gets promoted.
265 
266  /// The set of basic blocks the renamer has already visited.
268 
269  /// Contains a stable numbering of basic blocks to avoid non-determinstic
270  /// behavior.
272 
273  /// Lazily compute the number of predecessors a block has.
275 
276 public:
277  PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
278  AssumptionCache *AC)
279  : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
280  DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
281  AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
282  nullptr, &DT, AC) {}
283 
284  void run();
285 
286 private:
287  void RemoveFromAllocasList(unsigned &AllocaIdx) {
288  Allocas[AllocaIdx] = Allocas.back();
289  Allocas.pop_back();
290  --AllocaIdx;
291  }
292 
293  unsigned getNumPreds(const BasicBlock *BB) {
294  unsigned &NP = BBNumPreds[BB];
295  if (NP == 0)
296  NP = pred_size(BB) + 1;
297  return NP - 1;
298  }
299 
300  void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
301  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
302  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
303  void RenamePass(BasicBlock *BB, BasicBlock *Pred,
304  RenamePassData::ValVector &IncVals,
305  RenamePassData::LocationVector &IncLocs,
306  std::vector<RenamePassData> &Worklist);
307  bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
308 };
309 
310 } // end anonymous namespace
311 
312 /// Given a LoadInst LI this adds assume(LI != null) after it.
314  Function *AssumeIntrinsic =
315  Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
316  ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
318  LoadNotNull->insertAfter(LI);
319  CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
320  CI->insertAfter(LoadNotNull);
321  AC->registerAssumption(CI);
322 }
323 
325  // Knowing that this alloca is promotable, we know that it's safe to kill all
326  // instructions except for load and store.
327 
328  for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
329  Instruction *I = cast<Instruction>(*UI);
330  ++UI;
331  if (isa<LoadInst>(I) || isa<StoreInst>(I))
332  continue;
333 
334  if (!I->getType()->isVoidTy()) {
335  // The only users of this bitcast/GEP instruction are lifetime intrinsics.
336  // Follow the use/def chain to erase them now instead of leaving it for
337  // dead code elimination later.
338  for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
339  Instruction *Inst = cast<Instruction>(*UUI);
340  ++UUI;
341  Inst->eraseFromParent();
342  }
343  }
344  I->eraseFromParent();
345  }
346 }
347 
348 /// Rewrite as many loads as possible given a single store.
349 ///
350 /// When there is only a single store, we can use the domtree to trivially
351 /// replace all of the dominated loads with the stored value. Do so, and return
352 /// true if this has successfully promoted the alloca entirely. If this returns
353 /// false there were some loads which were not dominated by the single store
354 /// and thus must be phi-ed with undef. We fall back to the standard alloca
355 /// promotion algorithm in that case.
356 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
357  LargeBlockInfo &LBI, const DataLayout &DL,
358  DominatorTree &DT, AssumptionCache *AC) {
359  StoreInst *OnlyStore = Info.OnlyStore;
360  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
361  BasicBlock *StoreBB = OnlyStore->getParent();
362  int StoreIndex = -1;
363 
364  // Clear out UsingBlocks. We will reconstruct it here if needed.
365  Info.UsingBlocks.clear();
366 
367  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
368  Instruction *UserInst = cast<Instruction>(*UI++);
369  if (!isa<LoadInst>(UserInst)) {
370  assert(UserInst == OnlyStore && "Should only have load/stores");
371  continue;
372  }
373  LoadInst *LI = cast<LoadInst>(UserInst);
374 
375  // Okay, if we have a load from the alloca, we want to replace it with the
376  // only value stored to the alloca. We can do this if the value is
377  // dominated by the store. If not, we use the rest of the mem2reg machinery
378  // to insert the phi nodes as needed.
379  if (!StoringGlobalVal) { // Non-instructions are always dominated.
380  if (LI->getParent() == StoreBB) {
381  // If we have a use that is in the same block as the store, compare the
382  // indices of the two instructions to see which one came first. If the
383  // load came before the store, we can't handle it.
384  if (StoreIndex == -1)
385  StoreIndex = LBI.getInstructionIndex(OnlyStore);
386 
387  if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
388  // Can't handle this load, bail out.
389  Info.UsingBlocks.push_back(StoreBB);
390  continue;
391  }
392  } else if (LI->getParent() != StoreBB &&
393  !DT.dominates(StoreBB, LI->getParent())) {
394  // If the load and store are in different blocks, use BB dominance to
395  // check their relationships. If the store doesn't dom the use, bail
396  // out.
397  Info.UsingBlocks.push_back(LI->getParent());
398  continue;
399  }
400  }
401 
402  // Otherwise, we *can* safely rewrite this load.
403  Value *ReplVal = OnlyStore->getOperand(0);
404  // If the replacement value is the load, this must occur in unreachable
405  // code.
406  if (ReplVal == LI)
407  ReplVal = UndefValue::get(LI->getType());
408 
409  // If the load was marked as nonnull we don't want to lose
410  // that information when we erase this Load. So we preserve
411  // it with an assume.
412  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
413  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
414  addAssumeNonNull(AC, LI);
415 
416  LI->replaceAllUsesWith(ReplVal);
417  LI->eraseFromParent();
418  LBI.deleteValue(LI);
419  }
420 
421  // Finally, after the scan, check to see if the store is all that is left.
422  if (!Info.UsingBlocks.empty())
423  return false; // If not, we'll have to fall back for the remainder.
424 
425  // Record debuginfo for the store and remove the declaration's
426  // debuginfo.
427  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
428  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
429  ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
430  DII->eraseFromParent();
431  LBI.deleteValue(DII);
432  }
433  // Remove the (now dead) store and alloca.
434  Info.OnlyStore->eraseFromParent();
435  LBI.deleteValue(Info.OnlyStore);
436 
437  AI->eraseFromParent();
438  LBI.deleteValue(AI);
439  return true;
440 }
441 
442 /// Many allocas are only used within a single basic block. If this is the
443 /// case, avoid traversing the CFG and inserting a lot of potentially useless
444 /// PHI nodes by just performing a single linear pass over the basic block
445 /// using the Alloca.
446 ///
447 /// If we cannot promote this alloca (because it is read before it is written),
448 /// return false. This is necessary in cases where, due to control flow, the
449 /// alloca is undefined only on some control flow paths. e.g. code like
450 /// this is correct in LLVM IR:
451 /// // A is an alloca with no stores so far
452 /// for (...) {
453 /// int t = *A;
454 /// if (!first_iteration)
455 /// use(t);
456 /// *A = 42;
457 /// }
458 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
459  LargeBlockInfo &LBI,
460  const DataLayout &DL,
461  DominatorTree &DT,
462  AssumptionCache *AC) {
463  // The trickiest case to handle is when we have large blocks. Because of this,
464  // this code is optimized assuming that large blocks happen. This does not
465  // significantly pessimize the small block case. This uses LargeBlockInfo to
466  // make it efficient to get the index of various operations in the block.
467 
468  // Walk the use-def list of the alloca, getting the locations of all stores.
469  using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
470  StoresByIndexTy StoresByIndex;
471 
472  for (User *U : AI->users())
473  if (StoreInst *SI = dyn_cast<StoreInst>(U))
474  StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
475 
476  // Sort the stores by their index, making it efficient to do a lookup with a
477  // binary search.
478  llvm::sort(StoresByIndex, less_first());
479 
480  // Walk all of the loads from this alloca, replacing them with the nearest
481  // store above them, if any.
482  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
483  LoadInst *LI = dyn_cast<LoadInst>(*UI++);
484  if (!LI)
485  continue;
486 
487  unsigned LoadIdx = LBI.getInstructionIndex(LI);
488 
489  // Find the nearest store that has a lower index than this load.
490  StoresByIndexTy::iterator I =
491  std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
492  std::make_pair(LoadIdx,
493  static_cast<StoreInst *>(nullptr)),
494  less_first());
495  if (I == StoresByIndex.begin()) {
496  if (StoresByIndex.empty())
497  // If there are no stores, the load takes the undef value.
499  else
500  // There is no store before this load, bail out (load may be affected
501  // by the following stores - see main comment).
502  return false;
503  } else {
504  // Otherwise, there was a store before this load, the load takes its value.
505  // Note, if the load was marked as nonnull we don't want to lose that
506  // information when we erase it. So we preserve it with an assume.
507  Value *ReplVal = std::prev(I)->second->getOperand(0);
508  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
509  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
510  addAssumeNonNull(AC, LI);
511 
512  // If the replacement value is the load, this must occur in unreachable
513  // code.
514  if (ReplVal == LI)
515  ReplVal = UndefValue::get(LI->getType());
516 
517  LI->replaceAllUsesWith(ReplVal);
518  }
519 
520  LI->eraseFromParent();
521  LBI.deleteValue(LI);
522  }
523 
524  // Remove the (now dead) stores and alloca.
525  while (!AI->use_empty()) {
526  StoreInst *SI = cast<StoreInst>(AI->user_back());
527  // Record debuginfo for the store before removing it.
528  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
529  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
530  ConvertDebugDeclareToDebugValue(DII, SI, DIB);
531  }
532  SI->eraseFromParent();
533  LBI.deleteValue(SI);
534  }
535 
536  AI->eraseFromParent();
537  LBI.deleteValue(AI);
538 
539  // The alloca's debuginfo can be removed as well.
540  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
541  DII->eraseFromParent();
542  LBI.deleteValue(DII);
543  }
544 
545  ++NumLocalPromoted;
546  return true;
547 }
548 
549 void PromoteMem2Reg::run() {
550  Function &F = *DT.getRoot()->getParent();
551 
552  AllocaDbgDeclares.resize(Allocas.size());
553 
554  AllocaInfo Info;
555  LargeBlockInfo LBI;
556  ForwardIDFCalculator IDF(DT);
557 
558  for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
559  AllocaInst *AI = Allocas[AllocaNum];
560 
561  assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
562  assert(AI->getParent()->getParent() == &F &&
563  "All allocas should be in the same function, which is same as DF!");
564 
566 
567  if (AI->use_empty()) {
568  // If there are no uses of the alloca, just delete it now.
569  AI->eraseFromParent();
570 
571  // Remove the alloca from the Allocas list, since it has been processed
572  RemoveFromAllocasList(AllocaNum);
573  ++NumDeadAlloca;
574  continue;
575  }
576 
577  // Calculate the set of read and write-locations for each alloca. This is
578  // analogous to finding the 'uses' and 'definitions' of each variable.
579  Info.AnalyzeAlloca(AI);
580 
581  // If there is only a single store to this value, replace any loads of
582  // it that are directly dominated by the definition with the value stored.
583  if (Info.DefiningBlocks.size() == 1) {
584  if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
585  // The alloca has been processed, move on.
586  RemoveFromAllocasList(AllocaNum);
587  ++NumSingleStore;
588  continue;
589  }
590  }
591 
592  // If the alloca is only read and written in one basic block, just perform a
593  // linear sweep over the block to eliminate it.
594  if (Info.OnlyUsedInOneBlock &&
595  promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
596  // The alloca has been processed, move on.
597  RemoveFromAllocasList(AllocaNum);
598  continue;
599  }
600 
601  // If we haven't computed a numbering for the BB's in the function, do so
602  // now.
603  if (BBNumbers.empty()) {
604  unsigned ID = 0;
605  for (auto &BB : F)
606  BBNumbers[&BB] = ID++;
607  }
608 
609  // Remember the dbg.declare intrinsic describing this alloca, if any.
610  if (!Info.DbgDeclares.empty())
611  AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares;
612 
613  // Keep the reverse mapping of the 'Allocas' array for the rename pass.
614  AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
615 
616  // At this point, we're committed to promoting the alloca using IDF's, and
617  // the standard SSA construction algorithm. Determine which blocks need PHI
618  // nodes and see if we can optimize out some work by avoiding insertion of
619  // dead phi nodes.
620 
621  // Unique the set of defining blocks for efficient lookup.
623  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
624 
625  // Determine which blocks the value is live in. These are blocks which lead
626  // to uses.
627  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
628  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
629 
630  // At this point, we're committed to promoting the alloca using IDF's, and
631  // the standard SSA construction algorithm. Determine which blocks need phi
632  // nodes and see if we can optimize out some work by avoiding insertion of
633  // dead phi nodes.
634  IDF.setLiveInBlocks(LiveInBlocks);
635  IDF.setDefiningBlocks(DefBlocks);
637  IDF.calculate(PHIBlocks);
638  if (PHIBlocks.size() > 1)
639  llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
640  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
641  });
642 
643  unsigned CurrentVersion = 0;
644  for (BasicBlock *BB : PHIBlocks)
645  QueuePhiNode(BB, AllocaNum, CurrentVersion);
646  }
647 
648  if (Allocas.empty())
649  return; // All of the allocas must have been trivial!
650 
651  LBI.clear();
652 
653  // Set the incoming values for the basic block to be null values for all of
654  // the alloca's. We do this in case there is a load of a value that has not
655  // been stored yet. In this case, it will get this null value.
656  RenamePassData::ValVector Values(Allocas.size());
657  for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
658  Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
659 
660  // When handling debug info, treat all incoming values as if they have unknown
661  // locations until proven otherwise.
662  RenamePassData::LocationVector Locations(Allocas.size());
663 
664  // Walks all basic blocks in the function performing the SSA rename algorithm
665  // and inserting the phi nodes we marked as necessary
666  std::vector<RenamePassData> RenamePassWorkList;
667  RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values),
668  std::move(Locations));
669  do {
670  RenamePassData RPD = std::move(RenamePassWorkList.back());
671  RenamePassWorkList.pop_back();
672  // RenamePass may add new worklist entries.
673  RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList);
674  } while (!RenamePassWorkList.empty());
675 
676  // The renamer uses the Visited set to avoid infinite loops. Clear it now.
677  Visited.clear();
678 
679  // Remove the allocas themselves from the function.
680  for (Instruction *A : Allocas) {
681  // If there are any uses of the alloca instructions left, they must be in
682  // unreachable basic blocks that were not processed by walking the dominator
683  // tree. Just delete the users now.
684  if (!A->use_empty())
685  A->replaceAllUsesWith(UndefValue::get(A->getType()));
686  A->eraseFromParent();
687  }
688 
689  // Remove alloca's dbg.declare instrinsics from the function.
690  for (auto &Declares : AllocaDbgDeclares)
691  for (auto *DII : Declares)
692  DII->eraseFromParent();
693 
694  // Loop over all of the PHI nodes and see if there are any that we can get
695  // rid of because they merge all of the same incoming values. This can
696  // happen due to undef values coming into the PHI nodes. This process is
697  // iterative, because eliminating one PHI node can cause others to be removed.
698  bool EliminatedAPHI = true;
699  while (EliminatedAPHI) {
700  EliminatedAPHI = false;
701 
702  // Iterating over NewPhiNodes is deterministic, so it is safe to try to
703  // simplify and RAUW them as we go. If it was not, we could add uses to
704  // the values we replace with in a non-deterministic order, thus creating
705  // non-deterministic def->use chains.
706  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
707  I = NewPhiNodes.begin(),
708  E = NewPhiNodes.end();
709  I != E;) {
710  PHINode *PN = I->second;
711 
712  // If this PHI node merges one value and/or undefs, get the value.
713  if (Value *V = SimplifyInstruction(PN, SQ)) {
714  PN->replaceAllUsesWith(V);
715  PN->eraseFromParent();
716  NewPhiNodes.erase(I++);
717  EliminatedAPHI = true;
718  continue;
719  }
720  ++I;
721  }
722  }
723 
724  // At this point, the renamer has added entries to PHI nodes for all reachable
725  // code. Unfortunately, there may be unreachable blocks which the renamer
726  // hasn't traversed. If this is the case, the PHI nodes may not
727  // have incoming values for all predecessors. Loop over all PHI nodes we have
728  // created, inserting undef values if they are missing any incoming values.
729  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
730  I = NewPhiNodes.begin(),
731  E = NewPhiNodes.end();
732  I != E; ++I) {
733  // We want to do this once per basic block. As such, only process a block
734  // when we find the PHI that is the first entry in the block.
735  PHINode *SomePHI = I->second;
736  BasicBlock *BB = SomePHI->getParent();
737  if (&BB->front() != SomePHI)
738  continue;
739 
740  // Only do work here if there the PHI nodes are missing incoming values. We
741  // know that all PHI nodes that were inserted in a block will have the same
742  // number of incoming values, so we can just check any of them.
743  if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
744  continue;
745 
746  // Get the preds for BB.
748 
749  // Ok, now we know that all of the PHI nodes are missing entries for some
750  // basic blocks. Start by sorting the incoming predecessors for efficient
751  // access.
752  auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) {
753  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
754  };
755  llvm::sort(Preds, CompareBBNumbers);
756 
757  // Now we loop through all BB's which have entries in SomePHI and remove
758  // them from the Preds list.
759  for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
760  // Do a log(n) search of the Preds list for the entry we want.
762  Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i),
763  CompareBBNumbers);
764  assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
765  "PHI node has entry for a block which is not a predecessor!");
766 
767  // Remove the entry
768  Preds.erase(EntIt);
769  }
770 
771  // At this point, the blocks left in the preds list must have dummy
772  // entries inserted into every PHI nodes for the block. Update all the phi
773  // nodes in this block that we are inserting (there could be phis before
774  // mem2reg runs).
775  unsigned NumBadPreds = SomePHI->getNumIncomingValues();
776  BasicBlock::iterator BBI = BB->begin();
777  while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
778  SomePHI->getNumIncomingValues() == NumBadPreds) {
779  Value *UndefVal = UndefValue::get(SomePHI->getType());
780  for (BasicBlock *Pred : Preds)
781  SomePHI->addIncoming(UndefVal, Pred);
782  }
783  }
784 
785  NewPhiNodes.clear();
786 }
787 
788 /// Determine which blocks the value is live in.
789 ///
790 /// These are blocks which lead to uses. Knowing this allows us to avoid
791 /// inserting PHI nodes into blocks which don't lead to uses (thus, the
792 /// inserted phi nodes would be dead).
794  AllocaInst *AI, AllocaInfo &Info,
795  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
796  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
797  // To determine liveness, we must iterate through the predecessors of blocks
798  // where the def is live. Blocks are added to the worklist if we need to
799  // check their predecessors. Start with all the using blocks.
800  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
801  Info.UsingBlocks.end());
802 
803  // If any of the using blocks is also a definition block, check to see if the
804  // definition occurs before or after the use. If it happens before the use,
805  // the value isn't really live-in.
806  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
807  BasicBlock *BB = LiveInBlockWorklist[i];
808  if (!DefBlocks.count(BB))
809  continue;
810 
811  // Okay, this is a block that both uses and defines the value. If the first
812  // reference to the alloca is a def (store), then we know it isn't live-in.
813  for (BasicBlock::iterator I = BB->begin();; ++I) {
814  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
815  if (SI->getOperand(1) != AI)
816  continue;
817 
818  // We found a store to the alloca before a load. The alloca is not
819  // actually live-in here.
820  LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
821  LiveInBlockWorklist.pop_back();
822  --i;
823  --e;
824  break;
825  }
826 
827  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
828  if (LI->getOperand(0) != AI)
829  continue;
830 
831  // Okay, we found a load before a store to the alloca. It is actually
832  // live into this block.
833  break;
834  }
835  }
836  }
837 
838  // Now that we have a set of blocks where the phi is live-in, recursively add
839  // their predecessors until we find the full region the value is live.
840  while (!LiveInBlockWorklist.empty()) {
841  BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
842 
843  // The block really is live in here, insert it into the set. If already in
844  // the set, then it has already been processed.
845  if (!LiveInBlocks.insert(BB).second)
846  continue;
847 
848  // Since the value is live into BB, it is either defined in a predecessor or
849  // live into it to. Add the preds to the worklist unless they are a
850  // defining block.
851  for (BasicBlock *P : predecessors(BB)) {
852  // The value is not live into a predecessor if it defines the value.
853  if (DefBlocks.count(P))
854  continue;
855 
856  // Otherwise it is, add to the worklist.
857  LiveInBlockWorklist.push_back(P);
858  }
859  }
860 }
861 
862 /// Queue a phi-node to be added to a basic-block for a specific Alloca.
863 ///
864 /// Returns true if there wasn't already a phi-node for that variable
865 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
866  unsigned &Version) {
867  // Look up the basic-block in question.
868  PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
869 
870  // If the BB already has a phi node added for the i'th alloca then we're done!
871  if (PN)
872  return false;
873 
874  // Create a PhiNode using the dereferenced type... and add the phi-node to the
875  // BasicBlock.
876  PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
877  Allocas[AllocaNo]->getName() + "." + Twine(Version++),
878  &BB->front());
879  ++NumPHIInsert;
880  PhiToAllocaMap[PN] = AllocaNo;
881  return true;
882 }
883 
884 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
885 /// create a merged location incorporating \p DL, or to set \p DL directly.
887  bool ApplyMergedLoc) {
888  if (ApplyMergedLoc)
889  PN->applyMergedLocation(PN->getDebugLoc(), DL);
890  else
891  PN->setDebugLoc(DL);
892 }
893 
894 /// Recursively traverse the CFG of the function, renaming loads and
895 /// stores to the allocas which we are promoting.
896 ///
897 /// IncomingVals indicates what value each Alloca contains on exit from the
898 /// predecessor block Pred.
899 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
900  RenamePassData::ValVector &IncomingVals,
901  RenamePassData::LocationVector &IncomingLocs,
902  std::vector<RenamePassData> &Worklist) {
903 NextIteration:
904  // If we are inserting any phi nodes into this BB, they will already be in the
905  // block.
906  if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
907  // If we have PHI nodes to update, compute the number of edges from Pred to
908  // BB.
909  if (PhiToAllocaMap.count(APN)) {
910  // We want to be able to distinguish between PHI nodes being inserted by
911  // this invocation of mem2reg from those phi nodes that already existed in
912  // the IR before mem2reg was run. We determine that APN is being inserted
913  // because it is missing incoming edges. All other PHI nodes being
914  // inserted by this pass of mem2reg will have the same number of incoming
915  // operands so far. Remember this count.
916  unsigned NewPHINumOperands = APN->getNumOperands();
917 
918  unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB);
919  assert(NumEdges && "Must be at least one edge from Pred to BB!");
920 
921  // Add entries for all the phis.
922  BasicBlock::iterator PNI = BB->begin();
923  do {
924  unsigned AllocaNo = PhiToAllocaMap[APN];
925 
926  // Update the location of the phi node.
927  updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
928  APN->getNumIncomingValues() > 0);
929 
930  // Add N incoming values to the PHI node.
931  for (unsigned i = 0; i != NumEdges; ++i)
932  APN->addIncoming(IncomingVals[AllocaNo], Pred);
933 
934  // The currently active variable for this block is now the PHI.
935  IncomingVals[AllocaNo] = APN;
936  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[AllocaNo])
937  ConvertDebugDeclareToDebugValue(DII, APN, DIB);
938 
939  // Get the next phi node.
940  ++PNI;
941  APN = dyn_cast<PHINode>(PNI);
942  if (!APN)
943  break;
944 
945  // Verify that it is missing entries. If not, it is not being inserted
946  // by this mem2reg invocation so we want to ignore it.
947  } while (APN->getNumOperands() == NewPHINumOperands);
948  }
949  }
950 
951  // Don't revisit blocks.
952  if (!Visited.insert(BB).second)
953  return;
954 
955  for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
956  Instruction *I = &*II++; // get the instruction, increment iterator
957 
958  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
959  AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
960  if (!Src)
961  continue;
962 
963  DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
964  if (AI == AllocaLookup.end())
965  continue;
966 
967  Value *V = IncomingVals[AI->second];
968 
969  // If the load was marked as nonnull we don't want to lose
970  // that information when we erase this Load. So we preserve
971  // it with an assume.
972  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
973  !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
974  addAssumeNonNull(AC, LI);
975 
976  // Anything using the load now uses the current value.
977  LI->replaceAllUsesWith(V);
978  BB->getInstList().erase(LI);
979  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
980  // Delete this instruction and mark the name as the current holder of the
981  // value
982  AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
983  if (!Dest)
984  continue;
985 
986  DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
987  if (ai == AllocaLookup.end())
988  continue;
989 
990  // what value were we writing?
991  unsigned AllocaNo = ai->second;
992  IncomingVals[AllocaNo] = SI->getOperand(0);
993 
994  // Record debuginfo for the store before removing it.
995  IncomingLocs[AllocaNo] = SI->getDebugLoc();
996  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[ai->second])
998  BB->getInstList().erase(SI);
999  }
1000  }
1001 
1002  // 'Recurse' to our successors.
1003  succ_iterator I = succ_begin(BB), E = succ_end(BB);
1004  if (I == E)
1005  return;
1006 
1007  // Keep track of the successors so we don't visit the same successor twice
1008  SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
1009 
1010  // Handle the first successor without using the worklist.
1011  VisitedSuccs.insert(*I);
1012  Pred = BB;
1013  BB = *I;
1014  ++I;
1015 
1016  for (; I != E; ++I)
1017  if (VisitedSuccs.insert(*I).second)
1018  Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs);
1019 
1020  goto NextIteration;
1021 }
1022 
1024  AssumptionCache *AC) {
1025  // If there is nothing to do, bail out...
1026  if (Allocas.empty())
1027  return;
1028 
1029  PromoteMem2Reg(Allocas, DT, AC).run();
1030 }
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:157
iterator erase(iterator where)
Definition: ilist.h:265
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator begin() const
Definition: ArrayRef.h:136
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static void removeLifetimeIntrinsicUsers(AllocaInst *AI)
This class represents a function call, abstracting a target machine&#39;s calling convention.
A cache of @llvm.assume calls within a function.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:33
An instruction for reading from memory.
Definition: Instructions.h:167
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:30
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:264
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
static StringRef getName(Value *V)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:102
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
This is the common base class for debug info intrinsics for variables.
Definition: IntrinsicInst.h:87
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:234
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
An instruction for storing to memory.
Definition: Instructions.h:320
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1018
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1251
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:105
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:873
#define P(N)
bool erase(const KeyT &Val)
Definition: DenseMap.h:298
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:321
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &UsingBlocks, const SmallPtrSetImpl< BasicBlock *> &DefBlocks, SmallPtrSetImpl< BasicBlock *> &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
const Instruction & front() const
Definition: BasicBlock.h:280
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
const Instruction & back() const
Definition: BasicBlock.h:282
This instruction compares its operands according to the predicate given to the constructor.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:526
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1414
iterator erase(const_iterator CI)
Definition: SmallVector.h:437
size_t size() const
Definition: SmallVector.h:52
Compute iterated dominance frontiers using a linear time algorithm.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1115
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:333
void setLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is live on entry to the block...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
TinyPtrVector< DbgVariableIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1484
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Many allocas are only used within a single basic block.
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
iterator end() const
Definition: ArrayRef.h:137
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:688
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
iterator_range< user_iterator > users()
Definition: Value.h:399
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:211
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:324
void registerAssumption(CallInst *CI)
Add an @llvm.assume intrinsic to this function&#39;s cache.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:106
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
Definition: Instruction.cpp:79
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:121
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:108
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:375
const BasicBlock & front() const
Definition: Function.h:662
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Rewrite as many loads as possible given a single store.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
Definition: Local.cpp:1288
void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
NodeT * getRoot() const
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
const uint64_t Version
Definition: InstrProf.h:894
bool use_empty() const
Definition: Value.h:322
Function object to check whether the first component of a std::pair compares less than the first comp...
Definition: STLExtras.h:959
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:59
user_iterator user_end()
Definition: Value.h:383