LLVM  6.0.0svn
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file promotes memory references to be register references. It promotes
11 // alloca instructions which only have loads and stores as uses. An alloca is
12 // transformed by using iterated dominator frontiers to place PHI nodes, then
13 // traversing the function in depth-first order to rewrite loads and stores as
14 // appropriate.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/TinyPtrVector.h"
25 #include "llvm/ADT/Twine.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/Support/Casting.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <iterator>
53 #include <utility>
54 #include <vector>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "mem2reg"
59 
60 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
61 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
62 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
63 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
64 
66  // FIXME: If the memory unit is of pointer or integer type, we can permit
67  // assignments to subsections of the memory unit.
68  unsigned AS = AI->getType()->getAddressSpace();
69 
70  // Only allow direct and non-volatile loads and stores...
71  for (const User *U : AI->users()) {
72  if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
73  // Note that atomic loads can be transformed; atomic semantics do
74  // not have any meaning for a local alloca.
75  if (LI->isVolatile())
76  return false;
77  } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
78  if (SI->getOperand(0) == AI)
79  return false; // Don't allow a store OF the AI, only INTO the AI.
80  // Note that atomic stores can be transformed; atomic semantics do
81  // not have any meaning for a local alloca.
82  if (SI->isVolatile())
83  return false;
84  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
85  if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
86  II->getIntrinsicID() != Intrinsic::lifetime_end)
87  return false;
88  } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
89  if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
90  return false;
91  if (!onlyUsedByLifetimeMarkers(BCI))
92  return false;
93  } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
94  if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
95  return false;
96  if (!GEPI->hasAllZeroIndices())
97  return false;
98  if (!onlyUsedByLifetimeMarkers(GEPI))
99  return false;
100  } else {
101  return false;
102  }
103  }
104 
105  return true;
106 }
107 
108 namespace {
109 
110 struct AllocaInfo {
111  SmallVector<BasicBlock *, 32> DefiningBlocks;
112  SmallVector<BasicBlock *, 32> UsingBlocks;
113 
114  StoreInst *OnlyStore;
115  BasicBlock *OnlyBlock;
116  bool OnlyUsedInOneBlock;
117 
118  Value *AllocaPointerVal;
120 
121  void clear() {
122  DefiningBlocks.clear();
123  UsingBlocks.clear();
124  OnlyStore = nullptr;
125  OnlyBlock = nullptr;
126  OnlyUsedInOneBlock = true;
127  AllocaPointerVal = nullptr;
128  DbgDeclares.clear();
129  }
130 
131  /// Scan the uses of the specified alloca, filling in the AllocaInfo used
132  /// by the rest of the pass to reason about the uses of this alloca.
133  void AnalyzeAlloca(AllocaInst *AI) {
134  clear();
135 
136  // As we scan the uses of the alloca instruction, keep track of stores,
137  // and decide whether all of the loads and stores to the alloca are within
138  // the same basic block.
139  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
140  Instruction *User = cast<Instruction>(*UI++);
141 
142  if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
143  // Remember the basic blocks which define new values for the alloca
144  DefiningBlocks.push_back(SI->getParent());
145  AllocaPointerVal = SI->getOperand(0);
146  OnlyStore = SI;
147  } else {
148  LoadInst *LI = cast<LoadInst>(User);
149  // Otherwise it must be a load instruction, keep track of variable
150  // reads.
151  UsingBlocks.push_back(LI->getParent());
152  AllocaPointerVal = LI;
153  }
154 
155  if (OnlyUsedInOneBlock) {
156  if (!OnlyBlock)
157  OnlyBlock = User->getParent();
158  else if (OnlyBlock != User->getParent())
159  OnlyUsedInOneBlock = false;
160  }
161  }
162 
163  DbgDeclares = FindDbgAddrUses(AI);
164  }
165 };
166 
167 // Data package used by RenamePass()
168 class RenamePassData {
169 public:
170  using ValVector = std::vector<Value *>;
171 
172  RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V)
173  : BB(B), Pred(P), Values(std::move(V)) {}
174 
175  BasicBlock *BB;
176  BasicBlock *Pred;
177  ValVector Values;
178 };
179 
180 /// \brief This assigns and keeps a per-bb relative ordering of load/store
181 /// instructions in the block that directly load or store an alloca.
182 ///
183 /// This functionality is important because it avoids scanning large basic
184 /// blocks multiple times when promoting many allocas in the same block.
185 class LargeBlockInfo {
186  /// \brief For each instruction that we track, keep the index of the
187  /// instruction.
188  ///
189  /// The index starts out as the number of the instruction from the start of
190  /// the block.
192 
193 public:
194 
195  /// This code only looks at accesses to allocas.
196  static bool isInterestingInstruction(const Instruction *I) {
197  return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
198  (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
199  }
200 
201  /// Get or calculate the index of the specified instruction.
202  unsigned getInstructionIndex(const Instruction *I) {
203  assert(isInterestingInstruction(I) &&
204  "Not a load/store to/from an alloca?");
205 
206  // If we already have this instruction number, return it.
208  if (It != InstNumbers.end())
209  return It->second;
210 
211  // Scan the whole block to get the instruction. This accumulates
212  // information for every interesting instruction in the block, in order to
213  // avoid gratuitus rescans.
214  const BasicBlock *BB = I->getParent();
215  unsigned InstNo = 0;
216  for (const Instruction &BBI : *BB)
217  if (isInterestingInstruction(&BBI))
218  InstNumbers[&BBI] = InstNo++;
219  It = InstNumbers.find(I);
220 
221  assert(It != InstNumbers.end() && "Didn't insert instruction?");
222  return It->second;
223  }
224 
225  void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
226 
227  void clear() { InstNumbers.clear(); }
228 };
229 
230 struct PromoteMem2Reg {
231  /// The alloca instructions being promoted.
232  std::vector<AllocaInst *> Allocas;
233 
234  DominatorTree &DT;
235  DIBuilder DIB;
236 
237  /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
238  AssumptionCache *AC;
239 
240  const SimplifyQuery SQ;
241 
242  /// Reverse mapping of Allocas.
244 
245  /// \brief The PhiNodes we're adding.
246  ///
247  /// That map is used to simplify some Phi nodes as we iterate over it, so
248  /// it should have deterministic iterators. We could use a MapVector, but
249  /// since we already maintain a map from BasicBlock* to a stable numbering
250  /// (BBNumbers), the DenseMap is more efficient (also supports removal).
252 
253  /// For each PHI node, keep track of which entry in Allocas it corresponds
254  /// to.
255  DenseMap<PHINode *, unsigned> PhiToAllocaMap;
256 
257  /// If we are updating an AliasSetTracker, then for each alloca that is of
258  /// pointer type, we keep track of what to copyValue to the inserted PHI
259  /// nodes here.
260  std::vector<Value *> PointerAllocaValues;
261 
262  /// For each alloca, we keep track of the dbg.declare intrinsic that
263  /// describes it, if any, so that we can convert it to a dbg.value
264  /// intrinsic if the alloca gets promoted.
265  SmallVector<TinyPtrVector<DbgInfoIntrinsic *>, 8> AllocaDbgDeclares;
266 
267  /// The set of basic blocks the renamer has already visited.
269 
270  /// Contains a stable numbering of basic blocks to avoid non-determinstic
271  /// behavior.
273 
274  /// Lazily compute the number of predecessors a block has.
276 
277 public:
278  PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
279  AssumptionCache *AC)
280  : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
281  DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
282  AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
283  nullptr, &DT, AC) {}
284 
285  void run();
286 
287 private:
288  void RemoveFromAllocasList(unsigned &AllocaIdx) {
289  Allocas[AllocaIdx] = Allocas.back();
290  Allocas.pop_back();
291  --AllocaIdx;
292  }
293 
294  unsigned getNumPreds(const BasicBlock *BB) {
295  unsigned &NP = BBNumPreds[BB];
296  if (NP == 0)
297  NP = std::distance(pred_begin(BB), pred_end(BB)) + 1;
298  return NP - 1;
299  }
300 
301  void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
302  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
303  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
304  void RenamePass(BasicBlock *BB, BasicBlock *Pred,
305  RenamePassData::ValVector &IncVals,
306  std::vector<RenamePassData> &Worklist);
307  bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
308 };
309 
310 } // end anonymous namespace
311 
312 /// Given a LoadInst LI this adds assume(LI != null) after it.
314  Function *AssumeIntrinsic =
315  Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
316  ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
318  LoadNotNull->insertAfter(LI);
319  CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
320  CI->insertAfter(LoadNotNull);
321  AC->registerAssumption(CI);
322 }
323 
325  // Knowing that this alloca is promotable, we know that it's safe to kill all
326  // instructions except for load and store.
327 
328  for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
329  Instruction *I = cast<Instruction>(*UI);
330  ++UI;
331  if (isa<LoadInst>(I) || isa<StoreInst>(I))
332  continue;
333 
334  if (!I->getType()->isVoidTy()) {
335  // The only users of this bitcast/GEP instruction are lifetime intrinsics.
336  // Follow the use/def chain to erase them now instead of leaving it for
337  // dead code elimination later.
338  for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
339  Instruction *Inst = cast<Instruction>(*UUI);
340  ++UUI;
341  Inst->eraseFromParent();
342  }
343  }
344  I->eraseFromParent();
345  }
346 }
347 
348 /// \brief Rewrite as many loads as possible given a single store.
349 ///
350 /// When there is only a single store, we can use the domtree to trivially
351 /// replace all of the dominated loads with the stored value. Do so, and return
352 /// true if this has successfully promoted the alloca entirely. If this returns
353 /// false there were some loads which were not dominated by the single store
354 /// and thus must be phi-ed with undef. We fall back to the standard alloca
355 /// promotion algorithm in that case.
356 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
357  LargeBlockInfo &LBI, const DataLayout &DL,
358  DominatorTree &DT, AssumptionCache *AC) {
359  StoreInst *OnlyStore = Info.OnlyStore;
360  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
361  BasicBlock *StoreBB = OnlyStore->getParent();
362  int StoreIndex = -1;
363 
364  // Clear out UsingBlocks. We will reconstruct it here if needed.
365  Info.UsingBlocks.clear();
366 
367  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
368  Instruction *UserInst = cast<Instruction>(*UI++);
369  if (!isa<LoadInst>(UserInst)) {
370  assert(UserInst == OnlyStore && "Should only have load/stores");
371  continue;
372  }
373  LoadInst *LI = cast<LoadInst>(UserInst);
374 
375  // Okay, if we have a load from the alloca, we want to replace it with the
376  // only value stored to the alloca. We can do this if the value is
377  // dominated by the store. If not, we use the rest of the mem2reg machinery
378  // to insert the phi nodes as needed.
379  if (!StoringGlobalVal) { // Non-instructions are always dominated.
380  if (LI->getParent() == StoreBB) {
381  // If we have a use that is in the same block as the store, compare the
382  // indices of the two instructions to see which one came first. If the
383  // load came before the store, we can't handle it.
384  if (StoreIndex == -1)
385  StoreIndex = LBI.getInstructionIndex(OnlyStore);
386 
387  if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
388  // Can't handle this load, bail out.
389  Info.UsingBlocks.push_back(StoreBB);
390  continue;
391  }
392  } else if (LI->getParent() != StoreBB &&
393  !DT.dominates(StoreBB, LI->getParent())) {
394  // If the load and store are in different blocks, use BB dominance to
395  // check their relationships. If the store doesn't dom the use, bail
396  // out.
397  Info.UsingBlocks.push_back(LI->getParent());
398  continue;
399  }
400  }
401 
402  // Otherwise, we *can* safely rewrite this load.
403  Value *ReplVal = OnlyStore->getOperand(0);
404  // If the replacement value is the load, this must occur in unreachable
405  // code.
406  if (ReplVal == LI)
407  ReplVal = UndefValue::get(LI->getType());
408 
409  // If the load was marked as nonnull we don't want to lose
410  // that information when we erase this Load. So we preserve
411  // it with an assume.
412  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
413  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
414  addAssumeNonNull(AC, LI);
415 
416  LI->replaceAllUsesWith(ReplVal);
417  LI->eraseFromParent();
418  LBI.deleteValue(LI);
419  }
420 
421  // Finally, after the scan, check to see if the store is all that is left.
422  if (!Info.UsingBlocks.empty())
423  return false; // If not, we'll have to fall back for the remainder.
424 
425  // Record debuginfo for the store and remove the declaration's
426  // debuginfo.
427  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
428  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
429  ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
430  DII->eraseFromParent();
431  LBI.deleteValue(DII);
432  }
433  // Remove the (now dead) store and alloca.
434  Info.OnlyStore->eraseFromParent();
435  LBI.deleteValue(Info.OnlyStore);
436 
437  AI->eraseFromParent();
438  LBI.deleteValue(AI);
439  return true;
440 }
441 
442 /// Many allocas are only used within a single basic block. If this is the
443 /// case, avoid traversing the CFG and inserting a lot of potentially useless
444 /// PHI nodes by just performing a single linear pass over the basic block
445 /// using the Alloca.
446 ///
447 /// If we cannot promote this alloca (because it is read before it is written),
448 /// return false. This is necessary in cases where, due to control flow, the
449 /// alloca is undefined only on some control flow paths. e.g. code like
450 /// this is correct in LLVM IR:
451 /// // A is an alloca with no stores so far
452 /// for (...) {
453 /// int t = *A;
454 /// if (!first_iteration)
455 /// use(t);
456 /// *A = 42;
457 /// }
458 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
459  LargeBlockInfo &LBI,
460  const DataLayout &DL,
461  DominatorTree &DT,
462  AssumptionCache *AC) {
463  // The trickiest case to handle is when we have large blocks. Because of this,
464  // this code is optimized assuming that large blocks happen. This does not
465  // significantly pessimize the small block case. This uses LargeBlockInfo to
466  // make it efficient to get the index of various operations in the block.
467 
468  // Walk the use-def list of the alloca, getting the locations of all stores.
469  using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
470  StoresByIndexTy StoresByIndex;
471 
472  for (User *U : AI->users())
473  if (StoreInst *SI = dyn_cast<StoreInst>(U))
474  StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
475 
476  // Sort the stores by their index, making it efficient to do a lookup with a
477  // binary search.
478  std::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
479 
480  // Walk all of the loads from this alloca, replacing them with the nearest
481  // store above them, if any.
482  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
483  LoadInst *LI = dyn_cast<LoadInst>(*UI++);
484  if (!LI)
485  continue;
486 
487  unsigned LoadIdx = LBI.getInstructionIndex(LI);
488 
489  // Find the nearest store that has a lower index than this load.
490  StoresByIndexTy::iterator I =
491  std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
492  std::make_pair(LoadIdx,
493  static_cast<StoreInst *>(nullptr)),
494  less_first());
495  if (I == StoresByIndex.begin()) {
496  if (StoresByIndex.empty())
497  // If there are no stores, the load takes the undef value.
499  else
500  // There is no store before this load, bail out (load may be affected
501  // by the following stores - see main comment).
502  return false;
503  } else {
504  // Otherwise, there was a store before this load, the load takes its value.
505  // Note, if the load was marked as nonnull we don't want to lose that
506  // information when we erase it. So we preserve it with an assume.
507  Value *ReplVal = std::prev(I)->second->getOperand(0);
508  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
509  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
510  addAssumeNonNull(AC, LI);
511 
512  LI->replaceAllUsesWith(ReplVal);
513  }
514 
515  LI->eraseFromParent();
516  LBI.deleteValue(LI);
517  }
518 
519  // Remove the (now dead) stores and alloca.
520  while (!AI->use_empty()) {
521  StoreInst *SI = cast<StoreInst>(AI->user_back());
522  // Record debuginfo for the store before removing it.
523  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
524  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
525  ConvertDebugDeclareToDebugValue(DII, SI, DIB);
526  }
527  SI->eraseFromParent();
528  LBI.deleteValue(SI);
529  }
530 
531  AI->eraseFromParent();
532  LBI.deleteValue(AI);
533 
534  // The alloca's debuginfo can be removed as well.
535  for (DbgInfoIntrinsic *DII : Info.DbgDeclares) {
536  DII->eraseFromParent();
537  LBI.deleteValue(DII);
538  }
539 
540  ++NumLocalPromoted;
541  return true;
542 }
543 
544 void PromoteMem2Reg::run() {
545  Function &F = *DT.getRoot()->getParent();
546 
547  AllocaDbgDeclares.resize(Allocas.size());
548 
549  AllocaInfo Info;
550  LargeBlockInfo LBI;
551  ForwardIDFCalculator IDF(DT);
552 
553  for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
554  AllocaInst *AI = Allocas[AllocaNum];
555 
556  assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
557  assert(AI->getParent()->getParent() == &F &&
558  "All allocas should be in the same function, which is same as DF!");
559 
561 
562  if (AI->use_empty()) {
563  // If there are no uses of the alloca, just delete it now.
564  AI->eraseFromParent();
565 
566  // Remove the alloca from the Allocas list, since it has been processed
567  RemoveFromAllocasList(AllocaNum);
568  ++NumDeadAlloca;
569  continue;
570  }
571 
572  // Calculate the set of read and write-locations for each alloca. This is
573  // analogous to finding the 'uses' and 'definitions' of each variable.
574  Info.AnalyzeAlloca(AI);
575 
576  // If there is only a single store to this value, replace any loads of
577  // it that are directly dominated by the definition with the value stored.
578  if (Info.DefiningBlocks.size() == 1) {
579  if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
580  // The alloca has been processed, move on.
581  RemoveFromAllocasList(AllocaNum);
582  ++NumSingleStore;
583  continue;
584  }
585  }
586 
587  // If the alloca is only read and written in one basic block, just perform a
588  // linear sweep over the block to eliminate it.
589  if (Info.OnlyUsedInOneBlock &&
590  promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
591  // The alloca has been processed, move on.
592  RemoveFromAllocasList(AllocaNum);
593  continue;
594  }
595 
596  // If we haven't computed a numbering for the BB's in the function, do so
597  // now.
598  if (BBNumbers.empty()) {
599  unsigned ID = 0;
600  for (auto &BB : F)
601  BBNumbers[&BB] = ID++;
602  }
603 
604  // Remember the dbg.declare intrinsic describing this alloca, if any.
605  if (!Info.DbgDeclares.empty())
606  AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares;
607 
608  // Keep the reverse mapping of the 'Allocas' array for the rename pass.
609  AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
610 
611  // At this point, we're committed to promoting the alloca using IDF's, and
612  // the standard SSA construction algorithm. Determine which blocks need PHI
613  // nodes and see if we can optimize out some work by avoiding insertion of
614  // dead phi nodes.
615 
616  // Unique the set of defining blocks for efficient lookup.
618  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
619 
620  // Determine which blocks the value is live in. These are blocks which lead
621  // to uses.
622  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
623  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
624 
625  // At this point, we're committed to promoting the alloca using IDF's, and
626  // the standard SSA construction algorithm. Determine which blocks need phi
627  // nodes and see if we can optimize out some work by avoiding insertion of
628  // dead phi nodes.
629  IDF.setLiveInBlocks(LiveInBlocks);
630  IDF.setDefiningBlocks(DefBlocks);
632  IDF.calculate(PHIBlocks);
633  if (PHIBlocks.size() > 1)
634  std::sort(PHIBlocks.begin(), PHIBlocks.end(),
635  [this](BasicBlock *A, BasicBlock *B) {
636  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
637  });
638 
639  unsigned CurrentVersion = 0;
640  for (BasicBlock *BB : PHIBlocks)
641  QueuePhiNode(BB, AllocaNum, CurrentVersion);
642  }
643 
644  if (Allocas.empty())
645  return; // All of the allocas must have been trivial!
646 
647  LBI.clear();
648 
649  // Set the incoming values for the basic block to be null values for all of
650  // the alloca's. We do this in case there is a load of a value that has not
651  // been stored yet. In this case, it will get this null value.
652  RenamePassData::ValVector Values(Allocas.size());
653  for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
654  Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
655 
656  // Walks all basic blocks in the function performing the SSA rename algorithm
657  // and inserting the phi nodes we marked as necessary
658  std::vector<RenamePassData> RenamePassWorkList;
659  RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values));
660  do {
661  RenamePassData RPD = std::move(RenamePassWorkList.back());
662  RenamePassWorkList.pop_back();
663  // RenamePass may add new worklist entries.
664  RenamePass(RPD.BB, RPD.Pred, RPD.Values, RenamePassWorkList);
665  } while (!RenamePassWorkList.empty());
666 
667  // The renamer uses the Visited set to avoid infinite loops. Clear it now.
668  Visited.clear();
669 
670  // Remove the allocas themselves from the function.
671  for (Instruction *A : Allocas) {
672  // If there are any uses of the alloca instructions left, they must be in
673  // unreachable basic blocks that were not processed by walking the dominator
674  // tree. Just delete the users now.
675  if (!A->use_empty())
676  A->replaceAllUsesWith(UndefValue::get(A->getType()));
677  A->eraseFromParent();
678  }
679 
680  // Remove alloca's dbg.declare instrinsics from the function.
681  for (auto &Declares : AllocaDbgDeclares)
682  for (auto *DII : Declares)
683  DII->eraseFromParent();
684 
685  // Loop over all of the PHI nodes and see if there are any that we can get
686  // rid of because they merge all of the same incoming values. This can
687  // happen due to undef values coming into the PHI nodes. This process is
688  // iterative, because eliminating one PHI node can cause others to be removed.
689  bool EliminatedAPHI = true;
690  while (EliminatedAPHI) {
691  EliminatedAPHI = false;
692 
693  // Iterating over NewPhiNodes is deterministic, so it is safe to try to
694  // simplify and RAUW them as we go. If it was not, we could add uses to
695  // the values we replace with in a non-deterministic order, thus creating
696  // non-deterministic def->use chains.
697  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
698  I = NewPhiNodes.begin(),
699  E = NewPhiNodes.end();
700  I != E;) {
701  PHINode *PN = I->second;
702 
703  // If this PHI node merges one value and/or undefs, get the value.
704  if (Value *V = SimplifyInstruction(PN, SQ)) {
705  PN->replaceAllUsesWith(V);
706  PN->eraseFromParent();
707  NewPhiNodes.erase(I++);
708  EliminatedAPHI = true;
709  continue;
710  }
711  ++I;
712  }
713  }
714 
715  // At this point, the renamer has added entries to PHI nodes for all reachable
716  // code. Unfortunately, there may be unreachable blocks which the renamer
717  // hasn't traversed. If this is the case, the PHI nodes may not
718  // have incoming values for all predecessors. Loop over all PHI nodes we have
719  // created, inserting undef values if they are missing any incoming values.
720  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
721  I = NewPhiNodes.begin(),
722  E = NewPhiNodes.end();
723  I != E; ++I) {
724  // We want to do this once per basic block. As such, only process a block
725  // when we find the PHI that is the first entry in the block.
726  PHINode *SomePHI = I->second;
727  BasicBlock *BB = SomePHI->getParent();
728  if (&BB->front() != SomePHI)
729  continue;
730 
731  // Only do work here if there the PHI nodes are missing incoming values. We
732  // know that all PHI nodes that were inserted in a block will have the same
733  // number of incoming values, so we can just check any of them.
734  if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
735  continue;
736 
737  // Get the preds for BB.
739 
740  // Ok, now we know that all of the PHI nodes are missing entries for some
741  // basic blocks. Start by sorting the incoming predecessors for efficient
742  // access.
743  std::sort(Preds.begin(), Preds.end());
744 
745  // Now we loop through all BB's which have entries in SomePHI and remove
746  // them from the Preds list.
747  for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
748  // Do a log(n) search of the Preds list for the entry we want.
750  Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i));
751  assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
752  "PHI node has entry for a block which is not a predecessor!");
753 
754  // Remove the entry
755  Preds.erase(EntIt);
756  }
757 
758  // At this point, the blocks left in the preds list must have dummy
759  // entries inserted into every PHI nodes for the block. Update all the phi
760  // nodes in this block that we are inserting (there could be phis before
761  // mem2reg runs).
762  unsigned NumBadPreds = SomePHI->getNumIncomingValues();
763  BasicBlock::iterator BBI = BB->begin();
764  while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
765  SomePHI->getNumIncomingValues() == NumBadPreds) {
766  Value *UndefVal = UndefValue::get(SomePHI->getType());
767  for (BasicBlock *Pred : Preds)
768  SomePHI->addIncoming(UndefVal, Pred);
769  }
770  }
771 
772  NewPhiNodes.clear();
773 }
774 
775 /// \brief Determine which blocks the value is live in.
776 ///
777 /// These are blocks which lead to uses. Knowing this allows us to avoid
778 /// inserting PHI nodes into blocks which don't lead to uses (thus, the
779 /// inserted phi nodes would be dead).
780 void PromoteMem2Reg::ComputeLiveInBlocks(
781  AllocaInst *AI, AllocaInfo &Info,
782  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
783  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
784  // To determine liveness, we must iterate through the predecessors of blocks
785  // where the def is live. Blocks are added to the worklist if we need to
786  // check their predecessors. Start with all the using blocks.
787  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
788  Info.UsingBlocks.end());
789 
790  // If any of the using blocks is also a definition block, check to see if the
791  // definition occurs before or after the use. If it happens before the use,
792  // the value isn't really live-in.
793  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
794  BasicBlock *BB = LiveInBlockWorklist[i];
795  if (!DefBlocks.count(BB))
796  continue;
797 
798  // Okay, this is a block that both uses and defines the value. If the first
799  // reference to the alloca is a def (store), then we know it isn't live-in.
800  for (BasicBlock::iterator I = BB->begin();; ++I) {
801  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
802  if (SI->getOperand(1) != AI)
803  continue;
804 
805  // We found a store to the alloca before a load. The alloca is not
806  // actually live-in here.
807  LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
808  LiveInBlockWorklist.pop_back();
809  --i;
810  --e;
811  break;
812  }
813 
814  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
815  if (LI->getOperand(0) != AI)
816  continue;
817 
818  // Okay, we found a load before a store to the alloca. It is actually
819  // live into this block.
820  break;
821  }
822  }
823  }
824 
825  // Now that we have a set of blocks where the phi is live-in, recursively add
826  // their predecessors until we find the full region the value is live.
827  while (!LiveInBlockWorklist.empty()) {
828  BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
829 
830  // The block really is live in here, insert it into the set. If already in
831  // the set, then it has already been processed.
832  if (!LiveInBlocks.insert(BB).second)
833  continue;
834 
835  // Since the value is live into BB, it is either defined in a predecessor or
836  // live into it to. Add the preds to the worklist unless they are a
837  // defining block.
838  for (BasicBlock *P : predecessors(BB)) {
839  // The value is not live into a predecessor if it defines the value.
840  if (DefBlocks.count(P))
841  continue;
842 
843  // Otherwise it is, add to the worklist.
844  LiveInBlockWorklist.push_back(P);
845  }
846  }
847 }
848 
849 /// \brief Queue a phi-node to be added to a basic-block for a specific Alloca.
850 ///
851 /// Returns true if there wasn't already a phi-node for that variable
852 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
853  unsigned &Version) {
854  // Look up the basic-block in question.
855  PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
856 
857  // If the BB already has a phi node added for the i'th alloca then we're done!
858  if (PN)
859  return false;
860 
861  // Create a PhiNode using the dereferenced type... and add the phi-node to the
862  // BasicBlock.
863  PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
864  Allocas[AllocaNo]->getName() + "." + Twine(Version++),
865  &BB->front());
866  ++NumPHIInsert;
867  PhiToAllocaMap[PN] = AllocaNo;
868  return true;
869 }
870 
871 /// \brief Recursively traverse the CFG of the function, renaming loads and
872 /// stores to the allocas which we are promoting.
873 ///
874 /// IncomingVals indicates what value each Alloca contains on exit from the
875 /// predecessor block Pred.
876 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
877  RenamePassData::ValVector &IncomingVals,
878  std::vector<RenamePassData> &Worklist) {
879 NextIteration:
880  // If we are inserting any phi nodes into this BB, they will already be in the
881  // block.
882  if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
883  // If we have PHI nodes to update, compute the number of edges from Pred to
884  // BB.
885  if (PhiToAllocaMap.count(APN)) {
886  // We want to be able to distinguish between PHI nodes being inserted by
887  // this invocation of mem2reg from those phi nodes that already existed in
888  // the IR before mem2reg was run. We determine that APN is being inserted
889  // because it is missing incoming edges. All other PHI nodes being
890  // inserted by this pass of mem2reg will have the same number of incoming
891  // operands so far. Remember this count.
892  unsigned NewPHINumOperands = APN->getNumOperands();
893 
894  unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB);
895  assert(NumEdges && "Must be at least one edge from Pred to BB!");
896 
897  // Add entries for all the phis.
898  BasicBlock::iterator PNI = BB->begin();
899  do {
900  unsigned AllocaNo = PhiToAllocaMap[APN];
901 
902  // Add N incoming values to the PHI node.
903  for (unsigned i = 0; i != NumEdges; ++i)
904  APN->addIncoming(IncomingVals[AllocaNo], Pred);
905 
906  // The currently active variable for this block is now the PHI.
907  IncomingVals[AllocaNo] = APN;
908  for (DbgInfoIntrinsic *DII : AllocaDbgDeclares[AllocaNo])
909  ConvertDebugDeclareToDebugValue(DII, APN, DIB);
910 
911  // Get the next phi node.
912  ++PNI;
913  APN = dyn_cast<PHINode>(PNI);
914  if (!APN)
915  break;
916 
917  // Verify that it is missing entries. If not, it is not being inserted
918  // by this mem2reg invocation so we want to ignore it.
919  } while (APN->getNumOperands() == NewPHINumOperands);
920  }
921  }
922 
923  // Don't revisit blocks.
924  if (!Visited.insert(BB).second)
925  return;
926 
927  for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II);) {
928  Instruction *I = &*II++; // get the instruction, increment iterator
929 
930  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
931  AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
932  if (!Src)
933  continue;
934 
935  DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
936  if (AI == AllocaLookup.end())
937  continue;
938 
939  Value *V = IncomingVals[AI->second];
940 
941  // If the load was marked as nonnull we don't want to lose
942  // that information when we erase this Load. So we preserve
943  // it with an assume.
944  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
945  !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
946  addAssumeNonNull(AC, LI);
947 
948  // Anything using the load now uses the current value.
949  LI->replaceAllUsesWith(V);
950  BB->getInstList().erase(LI);
951  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
952  // Delete this instruction and mark the name as the current holder of the
953  // value
954  AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
955  if (!Dest)
956  continue;
957 
958  DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
959  if (ai == AllocaLookup.end())
960  continue;
961 
962  // what value were we writing?
963  IncomingVals[ai->second] = SI->getOperand(0);
964  // Record debuginfo for the store before removing it.
965  for (DbgInfoIntrinsic *DII : AllocaDbgDeclares[ai->second])
967  BB->getInstList().erase(SI);
968  }
969  }
970 
971  // 'Recurse' to our successors.
972  succ_iterator I = succ_begin(BB), E = succ_end(BB);
973  if (I == E)
974  return;
975 
976  // Keep track of the successors so we don't visit the same successor twice
977  SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
978 
979  // Handle the first successor without using the worklist.
980  VisitedSuccs.insert(*I);
981  Pred = BB;
982  BB = *I;
983  ++I;
984 
985  for (; I != E; ++I)
986  if (VisitedSuccs.insert(*I).second)
987  Worklist.emplace_back(*I, Pred, IncomingVals);
988 
989  goto NextIteration;
990 }
991 
993  AssumptionCache *AC) {
994  // If there is nothing to do, bail out...
995  if (Allocas.empty())
996  return;
997 
998  PromoteMem2Reg(Allocas, DT, AC).run();
999 }
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:69
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:158
iterator erase(iterator where)
Definition: ilist.h:280
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
iterator begin() const
Definition: ArrayRef.h:137
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
static void removeLifetimeIntrinsicUsers(AllocaInst *AI)
This class represents a function call, abstracting a target machine&#39;s calling convention.
A cache of .assume calls within a function.
STATISTIC(NumFunctions, "Total number of functions")
static CallInst * Create(Value *Func, ArrayRef< Value *> Args, ArrayRef< OperandBundleDef > Bundles=None, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
An instruction for reading from memory.
Definition: Instructions.h:164
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:31
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
static StringRef getName(Value *V)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:103
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:904
An instruction for storing to memory.
Definition: Instructions.h:306
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:430
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:980
Value * getOperand(unsigned i) const
Definition: User.h:154
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:874
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:106
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
#define P(N)
bool erase(const KeyT &Val)
Definition: DenseMap.h:268
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
Definition: Local.cpp:1134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
const Instruction & front() const
Definition: BasicBlock.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
const Instruction & back() const
Definition: BasicBlock.h:266
This instruction compares its operands according to the predicate given to the constructor.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1320
const AMDGPUAS & AS
iterator erase(const_iterator CI)
Definition: SmallVector.h:449
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:317
void setLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is live on entry to the block...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Many allocas are only used within a single basic block.
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:239
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
This is the common base class for debug info intrinsics.
Definition: IntrinsicInst.h:67
iterator end() const
Definition: ArrayRef.h:138
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:110
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:57
iterator_range< user_iterator > users()
Definition: Value.h:401
TinyPtrVector< DbgInfoIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1270
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:210
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
void registerAssumption(CallInst *CI)
Add an .assume intrinsic to this function&#39;s cache.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
Definition: Instruction.cpp:81
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:79
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:377
const BasicBlock & front() const
Definition: Function.h:595
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Rewrite as many loads as possible given a single store.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:556
LLVM Value Representation.
Definition: Value.h:73
void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
NodeT * getRoot() const
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to be non-zero when defined.
void sort(Policy policy, RandomAccessIterator Start, RandomAccessIterator End, const Comparator &Comp=Comparator())
Definition: Parallel.h:199
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
const uint64_t Version
Definition: InstrProf.h:867
bool use_empty() const
Definition: Value.h:328
Function object to check whether the first component of a std::pair compares less than the first comp...
Definition: STLExtras.h:649
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:60
user_iterator user_end()
Definition: Value.h:385