LLVM 22.0.0git
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1//===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file promotes memory references to be register references. It promotes
10// alloca instructions which only have loads and stores as uses. An alloca is
11// transformed by using iterated dominator frontiers to place PHI nodes, then
12// traversing the function in depth-first order to rewrite loads and stores as
13// appropriate.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/ADT/Twine.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DIBuilder.h"
34#include "llvm/IR/DebugInfo.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/Operator.h"
46#include "llvm/IR/Type.h"
47#include "llvm/IR/User.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <utility>
55#include <vector>
56
57using namespace llvm;
58
59#define DEBUG_TYPE "mem2reg"
60
61STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
62STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
63STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
64STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
65
67 // Only allow direct and non-volatile loads and stores...
68 for (const User *U : AI->users()) {
69 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
70 // Note that atomic loads can be transformed; atomic semantics do
71 // not have any meaning for a local alloca.
72 if (LI->isVolatile() || LI->getType() != AI->getAllocatedType())
73 return false;
74 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
75 if (SI->getValueOperand() == AI ||
76 SI->getValueOperand()->getType() != AI->getAllocatedType())
77 return false; // Don't allow a store OF the AI, only INTO the AI.
78 // Note that atomic stores can be transformed; atomic semantics do
79 // not have any meaning for a local alloca.
80 if (SI->isVolatile())
81 return false;
82 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
83 if (!II->isLifetimeStartOrEnd() && !II->isDroppable() &&
84 II->getIntrinsicID() != Intrinsic::fake_use)
85 return false;
86 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
88 return false;
89 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
90 if (!GEPI->hasAllZeroIndices())
91 return false;
93 return false;
94 } else if (const AddrSpaceCastInst *ASCI = dyn_cast<AddrSpaceCastInst>(U)) {
96 return false;
97 } else {
98 return false;
99 }
100 }
101
102 return true;
103}
104
105namespace {
106
107static void createDebugValue(DIBuilder &DIB, Value *NewValue,
108 DILocalVariable *Variable,
110 DbgVariableRecord *InsertBefore) {
111 // FIXME: Merge these two functions now that DIBuilder supports
112 // DbgVariableRecords. We neeed the API to accept DbgVariableRecords as an
113 // insert point for that to work.
114 (void)DIB;
116 *InsertBefore);
117}
118
119/// Helper for updating assignment tracking debug info when promoting allocas.
120class AssignmentTrackingInfo {
121 /// DbgAssignIntrinsics linked to the alloca with at most one per variable
122 /// fragment. (i.e. not be a comprehensive set if there are multiple
123 /// dbg.assigns for one variable fragment).
125
126public:
127 void init(AllocaInst *AI) {
128 SmallSet<DebugVariable, 2> Vars;
129 for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(AI)) {
130 if (Vars.insert(DebugVariable(DVR)).second)
131 DVRAssigns.push_back(DVR);
132 }
133 }
134
135 /// Update assignment tracking debug info given for the to-be-deleted store
136 /// \p ToDelete that stores to this alloca.
137 void updateForDeletedStore(
138 StoreInst *ToDelete, DIBuilder &DIB,
139 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) const {
140 // There's nothing to do if the alloca doesn't have any variables using
141 // assignment tracking.
142 if (DVRAssigns.empty())
143 return;
144
145 // Insert a dbg.value where the linked dbg.assign is and remember to delete
146 // the dbg.assign later. Demoting to dbg.value isn't necessary for
147 // correctness but does reduce compile time and memory usage by reducing
148 // unnecessary function-local metadata. Remember that we've seen a
149 // dbg.assign for each variable fragment for the untracked store handling
150 // (after this loop).
151 SmallSet<DebugVariableAggregate, 2> VarHasDbgAssignForStore;
152 auto InsertValueForAssign = [&](auto *DbgAssign, auto *&AssignList) {
153 VarHasDbgAssignForStore.insert(DebugVariableAggregate(DbgAssign));
154 AssignList->insert(DbgAssign);
155 createDebugValue(DIB, DbgAssign->getValue(), DbgAssign->getVariable(),
156 DbgAssign->getExpression(), DbgAssign->getDebugLoc(),
157 DbgAssign);
158 };
159 for (auto *Assign : at::getDVRAssignmentMarkers(ToDelete))
160 InsertValueForAssign(Assign, DVRAssignsToDelete);
161
162 // It's possible for variables using assignment tracking to have no
163 // dbg.assign linked to this store. These are variables in DVRAssigns that
164 // are missing from VarHasDbgAssignForStore. Since there isn't a dbg.assign
165 // to mark the assignment - and the store is going to be deleted - insert a
166 // dbg.value to do that now. An untracked store may be either one that
167 // cannot be represented using assignment tracking (non-const offset or
168 // size) or one that is trackable but has had its DIAssignID attachment
169 // dropped accidentally.
170 auto ConvertUnlinkedAssignToValue = [&](DbgVariableRecord *Assign) {
171 if (VarHasDbgAssignForStore.contains(DebugVariableAggregate(Assign)))
172 return;
173 ConvertDebugDeclareToDebugValue(Assign, ToDelete, DIB);
174 };
175 for_each(DVRAssigns, ConvertUnlinkedAssignToValue);
176 }
177
178 /// Update assignment tracking debug info given for the newly inserted PHI \p
179 /// NewPhi.
180 void updateForNewPhi(PHINode *NewPhi, DIBuilder &DIB) const {
181 // Regardless of the position of dbg.assigns relative to stores, the
182 // incoming values into a new PHI should be the same for the (imaginary)
183 // debug-phi.
184 for (auto *DVR : DVRAssigns)
185 ConvertDebugDeclareToDebugValue(DVR, NewPhi, DIB);
186 }
187
188 void clear() { DVRAssigns.clear(); }
189 bool empty() { return DVRAssigns.empty(); }
190};
191
192struct AllocaInfo {
193 using DPUserVec = SmallVector<DbgVariableRecord *, 1>;
194
195 SmallVector<BasicBlock *, 32> DefiningBlocks;
197
198 StoreInst *OnlyStore;
199 BasicBlock *OnlyBlock;
200 bool OnlyUsedInOneBlock;
201
202 /// Debug users of the alloca - does not include dbg.assign intrinsics.
203 DPUserVec DPUsers;
204 /// Helper to update assignment tracking debug info.
205 AssignmentTrackingInfo AssignmentTracking;
206
207 void clear() {
208 DefiningBlocks.clear();
209 UsingBlocks.clear();
210 OnlyStore = nullptr;
211 OnlyBlock = nullptr;
212 OnlyUsedInOneBlock = true;
213 DPUsers.clear();
214 AssignmentTracking.clear();
215 }
216
217 /// Scan the uses of the specified alloca, filling in the AllocaInfo used
218 /// by the rest of the pass to reason about the uses of this alloca.
219 void AnalyzeAlloca(AllocaInst *AI) {
220 clear();
221
222 // As we scan the uses of the alloca instruction, keep track of stores,
223 // and decide whether all of the loads and stores to the alloca are within
224 // the same basic block.
225 for (User *U : AI->users()) {
227
228 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
229 // Remember the basic blocks which define new values for the alloca
230 DefiningBlocks.push_back(SI->getParent());
231 OnlyStore = SI;
232 } else {
233 LoadInst *LI = cast<LoadInst>(User);
234 // Otherwise it must be a load instruction, keep track of variable
235 // reads.
236 UsingBlocks.push_back(LI->getParent());
237 }
238
239 if (OnlyUsedInOneBlock) {
240 if (!OnlyBlock)
241 OnlyBlock = User->getParent();
242 else if (OnlyBlock != User->getParent())
243 OnlyUsedInOneBlock = false;
244 }
245 }
247 findDbgUsers(AI, AllDPUsers);
248 std::copy_if(AllDPUsers.begin(), AllDPUsers.end(),
249 std::back_inserter(DPUsers),
250 [](DbgVariableRecord *DVR) { return !DVR->isDbgAssign(); });
251 AssignmentTracking.init(AI);
252 }
253};
254
255template <typename T> class VectorWithUndo {
258
259public:
260 void undo(size_t S) {
261 assert(S <= Undo.size());
262 while (S < Undo.size()) {
263 Vals[Undo.back().first] = Undo.back().second;
264 Undo.pop_back();
265 }
266 }
267
268 void resize(size_t Sz) { Vals.resize(Sz); }
269
270 size_t undoSize() const { return Undo.size(); }
271
272 const T &operator[](size_t Idx) const { return Vals[Idx]; }
273
274 void set(size_t Idx, const T &Val) {
275 if (Vals[Idx] == Val)
276 return;
277 Undo.emplace_back(Idx, Vals[Idx]);
278 Vals[Idx] = Val;
279 }
280
281 void init(size_t Idx, const T &Val) {
282 assert(Undo.empty());
283 Vals[Idx] = Val;
284 }
285};
286
287/// Data package used by RenamePass().
288struct RenamePassData {
289 RenamePassData(BasicBlock *B, BasicBlock *P, size_t V, size_t L)
290 : BB(B), Pred(P), UndoVals(V), UndoLocs(L) {}
291
292 BasicBlock *BB;
293 BasicBlock *Pred;
294
295 size_t UndoVals;
296 size_t UndoLocs;
297};
298
299/// This assigns and keeps a per-bb relative ordering of load/store
300/// instructions in the block that directly load or store an alloca.
301///
302/// This functionality is important because it avoids scanning large basic
303/// blocks multiple times when promoting many allocas in the same block.
304class LargeBlockInfo {
305 /// For each instruction that we track, keep the index of the
306 /// instruction.
307 ///
308 /// The index starts out as the number of the instruction from the start of
309 /// the block.
310 DenseMap<const Instruction *, unsigned> InstNumbers;
311
312public:
313
314 /// This code only looks at accesses to allocas.
315 static bool isInterestingInstruction(const Instruction *I) {
316 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
317 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
318 }
319
320 /// Get or calculate the index of the specified instruction.
321 unsigned getInstructionIndex(const Instruction *I) {
322 assert(isInterestingInstruction(I) &&
323 "Not a load/store to/from an alloca?");
324
325 // If we already have this instruction number, return it.
326 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I);
327 if (It != InstNumbers.end())
328 return It->second;
329
330 // Scan the whole block to get the instruction. This accumulates
331 // information for every interesting instruction in the block, in order to
332 // avoid gratuitus rescans.
333 const BasicBlock *BB = I->getParent();
334 unsigned InstNo = 0;
335 for (const Instruction &BBI : *BB)
336 if (isInterestingInstruction(&BBI))
337 InstNumbers[&BBI] = InstNo++;
338 It = InstNumbers.find(I);
339
340 assert(It != InstNumbers.end() && "Didn't insert instruction?");
341 return It->second;
342 }
343
344 void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
345
346 void clear() { InstNumbers.clear(); }
347};
348
349struct PromoteMem2Reg {
350 /// The alloca instructions being promoted.
351 std::vector<AllocaInst *> Allocas;
352
353 DominatorTree &DT;
354 DIBuilder DIB;
355
356 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
357 AssumptionCache *AC;
358
359 const SimplifyQuery SQ;
360
361 /// Reverse mapping of Allocas.
362 DenseMap<AllocaInst *, unsigned> AllocaLookup;
363
364 /// The PhiNodes we're adding.
365 ///
366 /// That map is used to simplify some Phi nodes as we iterate over it, so
367 /// it should have deterministic iterators. We could use a MapVector, but
368 /// since basic blocks have numbers, using these are more efficient.
369 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes;
370
371 /// For each PHI node, keep track of which entry in Allocas it corresponds
372 /// to.
373 DenseMap<PHINode *, unsigned> PhiToAllocaMap;
374
375 /// For each alloca, we keep track of the dbg.declare record that
376 /// describes it, if any, so that we can convert it to a dbg.value
377 /// record if the alloca gets promoted.
379
380 /// For each alloca, keep an instance of a helper class that gives us an easy
381 /// way to update assignment tracking debug info if the alloca is promoted.
383 /// A set of dbg.assigns to delete because they've been demoted to
384 /// dbg.values. Call cleanUpDbgAssigns to delete them.
385 SmallPtrSet<DbgVariableRecord *, 8> DVRAssignsToDelete;
386
387 /// The set of basic blocks the renamer has already visited.
388 BitVector Visited;
389
390 /// Lazily compute the number of predecessors a block has, indexed by block
391 /// number.
392 SmallVector<unsigned> BBNumPreds;
393
394 /// The state of incoming values for the current DFS step.
395 VectorWithUndo<Value *> IncomingVals;
396
397 /// The state of incoming locations for the current DFS step.
398 VectorWithUndo<DebugLoc> IncomingLocs;
399
400 // DFS work stack.
402
403 /// Whether the function has the no-signed-zeros-fp-math attribute set.
404 bool NoSignedZeros = false;
405
406public:
407 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
408 AssumptionCache *AC)
409 : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
410 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
411 AC(AC), SQ(DT.getRoot()->getDataLayout(),
412 nullptr, &DT, AC) {}
413
414 void run();
415
416private:
417 void RemoveFromAllocasList(unsigned &AllocaIdx) {
418 Allocas[AllocaIdx] = Allocas.back();
419 Allocas.pop_back();
420 --AllocaIdx;
421 }
422
423 unsigned getNumPreds(const BasicBlock *BB) {
424 // BBNumPreds is resized to getMaxBlockNumber() at the beginning.
425 unsigned &NP = BBNumPreds[BB->getNumber()];
426 if (NP == 0)
427 NP = pred_size(BB) + 1;
428 return NP - 1;
429 }
430
431 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
432 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
433 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
434 void RenamePass(BasicBlock *BB, BasicBlock *Pred);
435 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
436
437 /// Delete dbg.assigns that have been demoted to dbg.values.
438 void cleanUpDbgAssigns() {
439 for (auto *DVR : DVRAssignsToDelete)
440 DVR->eraseFromParent();
441 DVRAssignsToDelete.clear();
442 }
443
444 void pushToWorklist(BasicBlock *BB, BasicBlock *Pred) {
445 Worklist.emplace_back(BB, Pred, IncomingVals.undoSize(),
446 IncomingLocs.undoSize());
447 }
448
449 RenamePassData popFromWorklist() {
450 RenamePassData R = Worklist.back();
451 Worklist.pop_back();
452 IncomingVals.undo(R.UndoVals);
453 IncomingLocs.undo(R.UndoLocs);
454 return R;
455 }
456};
457
458} // end anonymous namespace
459
460/// Given a LoadInst LI this adds assume(LI != null) after it.
462 Function *AssumeIntrinsic =
463 Intrinsic::getOrInsertDeclaration(LI->getModule(), Intrinsic::assume);
464 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
466 LoadNotNull->insertAfter(LI->getIterator());
467 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
468 CI->insertAfter(LoadNotNull->getIterator());
470}
471
473 const DataLayout &DL, AssumptionCache *AC,
474 const DominatorTree *DT) {
475 if (isa<UndefValue>(Val) && LI->hasMetadata(LLVMContext::MD_noundef)) {
476 // Insert non-terminator unreachable.
477 LLVMContext &Ctx = LI->getContext();
480 /*isVolatile=*/false, Align(1), LI->getIterator());
481 return;
482 }
483
484 // If the load was marked as nonnull we don't want to lose that information
485 // when we erase this Load. So we preserve it with an assume. As !nonnull
486 // returns poison while assume violations are immediate undefined behavior,
487 // we can only do this if the value is known non-poison.
488 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
489 LI->getMetadata(LLVMContext::MD_noundef) &&
490 !isKnownNonZero(Val, SimplifyQuery(DL, DT, AC, LI)))
491 addAssumeNonNull(AC, LI);
492}
493
495 // Knowing that this alloca is promotable, we know that it's safe to kill all
496 // instructions except for load and store.
497
498 for (Use &U : llvm::make_early_inc_range(AI->uses())) {
499 Instruction *I = cast<Instruction>(U.getUser());
501 continue;
502
503 // Drop the use of AI in droppable instructions.
504 if (I->isDroppable()) {
505 I->dropDroppableUse(U);
506 continue;
507 }
508
509 if (!I->getType()->isVoidTy()) {
510 // The only users of this bitcast/GEP instruction are lifetime intrinsics.
511 // Follow the use/def chain to erase them now instead of leaving it for
512 // dead code elimination later.
513 for (Use &UU : llvm::make_early_inc_range(I->uses())) {
514 Instruction *Inst = cast<Instruction>(UU.getUser());
515
516 // Drop the use of I in droppable instructions.
517 if (Inst->isDroppable()) {
518 Inst->dropDroppableUse(UU);
519 continue;
520 }
521 Inst->eraseFromParent();
522 }
523 }
524 I->eraseFromParent();
525 }
526}
527
528/// Rewrite as many loads as possible given a single store.
529///
530/// When there is only a single store, we can use the domtree to trivially
531/// replace all of the dominated loads with the stored value. Do so, and return
532/// true if this has successfully promoted the alloca entirely. If this returns
533/// false there were some loads which were not dominated by the single store
534/// and thus must be phi-ed with undef. We fall back to the standard alloca
535/// promotion algorithm in that case.
537 AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL,
539 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
540 StoreInst *OnlyStore = Info.OnlyStore;
541 Value *ReplVal = OnlyStore->getOperand(0);
542 // Loads may either load the stored value or uninitialized memory (undef).
543 // If the stored value may be poison, then replacing an uninitialized memory
544 // load with it would be incorrect. If the store dominates the load, we know
545 // it is always initialized.
546 bool RequireDominatingStore =
547 isa<Instruction>(ReplVal) || !isGuaranteedNotToBePoison(ReplVal);
548 BasicBlock *StoreBB = OnlyStore->getParent();
549 int StoreIndex = -1;
550
551 // Clear out UsingBlocks. We will reconstruct it here if needed.
552 Info.UsingBlocks.clear();
553
554 for (User *U : make_early_inc_range(AI->users())) {
555 Instruction *UserInst = cast<Instruction>(U);
556 if (UserInst == OnlyStore)
557 continue;
558 LoadInst *LI = cast<LoadInst>(UserInst);
559
560 // Okay, if we have a load from the alloca, we want to replace it with the
561 // only value stored to the alloca. We can do this if the value is
562 // dominated by the store. If not, we use the rest of the mem2reg machinery
563 // to insert the phi nodes as needed.
564 if (RequireDominatingStore) {
565 if (LI->getParent() == StoreBB) {
566 // If we have a use that is in the same block as the store, compare the
567 // indices of the two instructions to see which one came first. If the
568 // load came before the store, we can't handle it.
569 if (StoreIndex == -1)
570 StoreIndex = LBI.getInstructionIndex(OnlyStore);
571
572 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
573 // Can't handle this load, bail out.
574 Info.UsingBlocks.push_back(StoreBB);
575 continue;
576 }
577 } else if (!DT.dominates(StoreBB, LI->getParent())) {
578 // If the load and store are in different blocks, use BB dominance to
579 // check their relationships. If the store doesn't dom the use, bail
580 // out.
581 Info.UsingBlocks.push_back(LI->getParent());
582 continue;
583 }
584 }
585
586 // Otherwise, we *can* safely rewrite this load.
587 // If the replacement value is the load, this must occur in unreachable
588 // code.
589 if (ReplVal == LI)
590 ReplVal = PoisonValue::get(LI->getType());
591
592 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
593 LI->replaceAllUsesWith(ReplVal);
594 LI->eraseFromParent();
595 LBI.deleteValue(LI);
596 }
597
598 // Finally, after the scan, check to see if the store is all that is left.
599 if (!Info.UsingBlocks.empty())
600 return false; // If not, we'll have to fall back for the remainder.
601
602 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
603 // Update assignment tracking info for the store we're going to delete.
604 Info.AssignmentTracking.updateForDeletedStore(Info.OnlyStore, DIB,
605 DVRAssignsToDelete);
606
607 // Record debuginfo for the store and remove the declaration's
608 // debuginfo.
609 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
610 if (DbgItem->isAddressOfVariable()) {
611 ConvertDebugDeclareToDebugValue(DbgItem, Info.OnlyStore, DIB);
612 DbgItem->eraseFromParent();
613 } else if (DbgItem->isValueOfVariable() &&
614 DbgItem->getExpression()->startsWithDeref()) {
615 InsertDebugValueAtStoreLoc(DbgItem, Info.OnlyStore, DIB);
616 DbgItem->eraseFromParent();
617 } else if (DbgItem->getExpression()->startsWithDeref()) {
618 DbgItem->eraseFromParent();
619 }
620 }
621
622 // Remove dbg.assigns linked to the alloca as these are now redundant.
624
625 // Remove the (now dead) store and alloca.
626 Info.OnlyStore->eraseFromParent();
627 LBI.deleteValue(Info.OnlyStore);
628
629 AI->eraseFromParent();
630 return true;
631}
632
633/// Many allocas are only used within a single basic block. If this is the
634/// case, avoid traversing the CFG and inserting a lot of potentially useless
635/// PHI nodes by just performing a single linear pass over the basic block
636/// using the Alloca.
637///
638/// If we cannot promote this alloca (because it is read before it is written),
639/// return false. This is necessary in cases where, due to control flow, the
640/// alloca is undefined only on some control flow paths. e.g. code like
641/// this is correct in LLVM IR:
642/// // A is an alloca with no stores so far
643/// for (...) {
644/// int t = *A;
645/// if (!first_iteration)
646/// use(t);
647/// *A = 42;
648/// }
650 AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI,
652 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
653 // The trickiest case to handle is when we have large blocks. Because of this,
654 // this code is optimized assuming that large blocks happen. This does not
655 // significantly pessimize the small block case. This uses LargeBlockInfo to
656 // make it efficient to get the index of various operations in the block.
657
658 // Walk the use-def list of the alloca, getting the locations of all stores.
659 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
660 StoresByIndexTy StoresByIndex;
661
662 for (User *U : AI->users())
664 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
665
666 // Sort the stores by their index, making it efficient to do a lookup with a
667 // binary search.
668 llvm::sort(StoresByIndex, less_first());
669
670 // Walk all of the loads from this alloca, replacing them with the nearest
671 // store above them, if any.
672 for (User *U : make_early_inc_range(AI->users())) {
674 if (!LI)
675 continue;
676
677 unsigned LoadIdx = LBI.getInstructionIndex(LI);
678
679 // Find the nearest store that has a lower index than this load.
680 StoresByIndexTy::iterator I = llvm::lower_bound(
681 StoresByIndex,
682 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)),
683 less_first());
684 Value *ReplVal;
685 if (I == StoresByIndex.begin()) {
686 if (StoresByIndex.empty())
687 // If there are no stores, the load takes the undef value.
688 ReplVal = UndefValue::get(LI->getType());
689 else
690 // There is no store before this load, bail out (load may be affected
691 // by the following stores - see main comment).
692 return false;
693 } else {
694 // Otherwise, there was a store before this load, the load takes its
695 // value.
696 ReplVal = std::prev(I)->second->getOperand(0);
697 }
698
699 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
700
701 // If the replacement value is the load, this must occur in unreachable
702 // code.
703 if (ReplVal == LI)
704 ReplVal = PoisonValue::get(LI->getType());
705
706 LI->replaceAllUsesWith(ReplVal);
707 LI->eraseFromParent();
708 LBI.deleteValue(LI);
709 }
710
711 // Remove the (now dead) stores and alloca.
712 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
713 while (!AI->use_empty()) {
715 // Update assignment tracking info for the store we're going to delete.
716 Info.AssignmentTracking.updateForDeletedStore(SI, DIB, DVRAssignsToDelete);
717 // Record debuginfo for the store before removing it.
718 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
719 if (DbgItem->isAddressOfVariable()) {
720 ConvertDebugDeclareToDebugValue(DbgItem, SI, DIB);
721 }
722 }
723
724 SI->eraseFromParent();
725 LBI.deleteValue(SI);
726 }
727
728 // Remove dbg.assigns linked to the alloca as these are now redundant.
730 AI->eraseFromParent();
731
732 // The alloca's debuginfo can be removed as well.
733 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
734 if (DbgItem->isAddressOfVariable() ||
735 DbgItem->getExpression()->startsWithDeref())
736 DbgItem->eraseFromParent();
737 }
738
739 ++NumLocalPromoted;
740 return true;
741}
742
743void PromoteMem2Reg::run() {
744 Function &F = *DT.getRoot()->getParent();
745
746 AllocaATInfo.resize(Allocas.size());
747 AllocaDPUsers.resize(Allocas.size());
748
749 AllocaInfo Info;
750 LargeBlockInfo LBI;
751 ForwardIDFCalculator IDF(DT);
752
753 NoSignedZeros = F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool();
754
755 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
756 AllocaInst *AI = Allocas[AllocaNum];
757
758 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
759 assert(AI->getParent()->getParent() == &F &&
760 "All allocas should be in the same function, which is same as DF!");
761
763
764 if (AI->use_empty()) {
765 // If there are no uses of the alloca, just delete it now.
766 AI->eraseFromParent();
767
768 // Remove the alloca from the Allocas list, since it has been processed
769 RemoveFromAllocasList(AllocaNum);
770 ++NumDeadAlloca;
771 continue;
772 }
773
774 // Calculate the set of read and write-locations for each alloca. This is
775 // analogous to finding the 'uses' and 'definitions' of each variable.
776 Info.AnalyzeAlloca(AI);
777
778 // If there is only a single store to this value, replace any loads of
779 // it that are directly dominated by the definition with the value stored.
780 if (Info.DefiningBlocks.size() == 1) {
781 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC,
782 &DVRAssignsToDelete)) {
783 // The alloca has been processed, move on.
784 RemoveFromAllocasList(AllocaNum);
785 ++NumSingleStore;
786 continue;
787 }
788 }
789
790 // If the alloca is only read and written in one basic block, just perform a
791 // linear sweep over the block to eliminate it.
792 if (Info.OnlyUsedInOneBlock &&
793 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC,
794 &DVRAssignsToDelete)) {
795 // The alloca has been processed, move on.
796 RemoveFromAllocasList(AllocaNum);
797 continue;
798 }
799
800 // Initialize BBNumPreds lazily
801 if (BBNumPreds.empty())
802 BBNumPreds.resize(F.getMaxBlockNumber());
803
804 // Remember the dbg.declare record describing this alloca, if any.
805 if (!Info.AssignmentTracking.empty())
806 AllocaATInfo[AllocaNum] = Info.AssignmentTracking;
807 if (!Info.DPUsers.empty())
808 AllocaDPUsers[AllocaNum] = Info.DPUsers;
809
810 // Keep the reverse mapping of the 'Allocas' array for the rename pass.
811 AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
812
813 // Unique the set of defining blocks for efficient lookup.
814 SmallPtrSet<BasicBlock *, 32> DefBlocks(llvm::from_range,
815 Info.DefiningBlocks);
816
817 // Determine which blocks the value is live in. These are blocks which lead
818 // to uses.
819 SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
820 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
821
822 // At this point, we're committed to promoting the alloca using IDF's, and
823 // the standard SSA construction algorithm. Determine which blocks need phi
824 // nodes and see if we can optimize out some work by avoiding insertion of
825 // dead phi nodes.
826 IDF.setLiveInBlocks(LiveInBlocks);
827 IDF.setDefiningBlocks(DefBlocks);
829 IDF.calculate(PHIBlocks);
830 llvm::sort(PHIBlocks, [](BasicBlock *A, BasicBlock *B) {
831 return A->getNumber() < B->getNumber();
832 });
833
834 unsigned CurrentVersion = 0;
835 for (BasicBlock *BB : PHIBlocks)
836 QueuePhiNode(BB, AllocaNum, CurrentVersion);
837 }
838
839 if (Allocas.empty()) {
840 cleanUpDbgAssigns();
841 return; // All of the allocas must have been trivial!
842 }
843 LBI.clear();
844
845 // Set the incoming values for the basic block to be null values for all of
846 // the alloca's. We do this in case there is a load of a value that has not
847 // been stored yet. In this case, it will get this null value.
848 IncomingVals.resize(Allocas.size());
849 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
850 IncomingVals.init(i, UndefValue::get(Allocas[i]->getAllocatedType()));
851
852 // When handling debug info, treat all incoming values as if they have
853 // compiler-generated (empty) locations, representing the uninitialized
854 // alloca, until proven otherwise.
855 IncomingLocs.resize(Allocas.size());
856 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
857 IncomingLocs.init(i, DebugLoc::getCompilerGenerated());
858
859 // The renamer uses the Visited set to avoid infinite loops.
860 Visited.resize(F.getMaxBlockNumber(), false);
861
862 // Add the entry block to the worklist, with a null predecessor.
863 pushToWorklist(&F.front(), nullptr);
864
865 do {
866 RenamePassData RPD = popFromWorklist();
867 RenamePass(RPD.BB, RPD.Pred);
868 } while (!Worklist.empty());
869
870 // Remove the allocas themselves from the function.
871 for (Instruction *A : Allocas) {
872 // Remove dbg.assigns linked to the alloca as these are now redundant.
874 // If there are any uses of the alloca instructions left, they must be in
875 // unreachable basic blocks that were not processed by walking the dominator
876 // tree. Just delete the users now.
877 if (!A->use_empty())
878 A->replaceAllUsesWith(PoisonValue::get(A->getType()));
879 A->eraseFromParent();
880 }
881
882 // Remove alloca's dbg.declare intrinsics from the function.
883 for (auto &DbgUsers : AllocaDPUsers) {
884 for (DbgVariableRecord *DbgItem : DbgUsers)
885 if (DbgItem->isAddressOfVariable() ||
886 DbgItem->getExpression()->startsWithDeref())
887 DbgItem->eraseFromParent();
888 }
889
890 // Loop over all of the PHI nodes and see if there are any that we can get
891 // rid of because they merge all of the same incoming values. This can
892 // happen due to undef values coming into the PHI nodes. This process is
893 // iterative, because eliminating one PHI node can cause others to be removed.
894 bool EliminatedAPHI = true;
895 while (EliminatedAPHI) {
896 EliminatedAPHI = false;
897
898 // Iterating over NewPhiNodes is deterministic, so it is safe to try to
899 // simplify and RAUW them as we go. If it was not, we could add uses to
900 // the values we replace with in a non-deterministic order, thus creating
901 // non-deterministic def->use chains.
902 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
903 I = NewPhiNodes.begin(),
904 E = NewPhiNodes.end();
905 I != E;) {
906 PHINode *PN = I->second;
907
908 // If this PHI node merges one value and/or undefs, get the value.
909 if (Value *V = simplifyInstruction(PN, SQ)) {
910 PN->replaceAllUsesWith(V);
911 PN->eraseFromParent();
912 NewPhiNodes.erase(I++);
913 EliminatedAPHI = true;
914 continue;
915 }
916 ++I;
917 }
918 }
919
920 // At this point, the renamer has added entries to PHI nodes for all reachable
921 // code. Unfortunately, there may be unreachable blocks which the renamer
922 // hasn't traversed. If this is the case, the PHI nodes may not
923 // have incoming values for all predecessors. Loop over all PHI nodes we have
924 // created, inserting poison values if they are missing any incoming values.
925 for (const auto &PhiNode : NewPhiNodes) {
926 // We want to do this once per basic block. As such, only process a block
927 // when we find the PHI that is the first entry in the block.
928 PHINode *SomePHI = PhiNode.second;
929 BasicBlock *BB = SomePHI->getParent();
930 if (&BB->front() != SomePHI)
931 continue;
932
933 // Only do work here if there the PHI nodes are missing incoming values. We
934 // know that all PHI nodes that were inserted in a block will have the same
935 // number of incoming values, so we can just check any of them.
936 if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
937 continue;
938
939 // Get the preds for BB.
940 SmallVector<BasicBlock *, 16> Preds(predecessors(BB));
941
942 // Ok, now we know that all of the PHI nodes are missing entries for some
943 // basic blocks. Start by sorting the incoming predecessors for efficient
944 // access.
945 auto CompareBBNumbers = [](BasicBlock *A, BasicBlock *B) {
946 return A->getNumber() < B->getNumber();
947 };
948 llvm::sort(Preds, CompareBBNumbers);
949
950 // Now we loop through all BB's which have entries in SomePHI and remove
951 // them from the Preds list.
952 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
953 // Do a log(n) search of the Preds list for the entry we want.
955 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers);
956 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
957 "PHI node has entry for a block which is not a predecessor!");
958
959 // Remove the entry
960 Preds.erase(EntIt);
961 }
962
963 // At this point, the blocks left in the preds list must have dummy
964 // entries inserted into every PHI nodes for the block. Update all the phi
965 // nodes in this block that we are inserting (there could be phis before
966 // mem2reg runs).
967 unsigned NumBadPreds = SomePHI->getNumIncomingValues();
968 BasicBlock::iterator BBI = BB->begin();
969 while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
970 SomePHI->getNumIncomingValues() == NumBadPreds) {
971 Value *PoisonVal = PoisonValue::get(SomePHI->getType());
972 for (BasicBlock *Pred : Preds)
973 SomePHI->addIncoming(PoisonVal, Pred);
974 }
975 }
976
977 NewPhiNodes.clear();
978 cleanUpDbgAssigns();
979}
980
981/// Determine which blocks the value is live in.
982///
983/// These are blocks which lead to uses. Knowing this allows us to avoid
984/// inserting PHI nodes into blocks which don't lead to uses (thus, the
985/// inserted phi nodes would be dead).
986void PromoteMem2Reg::ComputeLiveInBlocks(
987 AllocaInst *AI, AllocaInfo &Info,
988 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
989 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
990 // To determine liveness, we must iterate through the predecessors of blocks
991 // where the def is live. Blocks are added to the worklist if we need to
992 // check their predecessors. Start with all the using blocks.
993 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
994 Info.UsingBlocks.end());
995
996 // If any of the using blocks is also a definition block, check to see if the
997 // definition occurs before or after the use. If it happens before the use,
998 // the value isn't really live-in.
999 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
1000 BasicBlock *BB = LiveInBlockWorklist[i];
1001 if (!DefBlocks.count(BB))
1002 continue;
1003
1004 // Okay, this is a block that both uses and defines the value. If the first
1005 // reference to the alloca is a def (store), then we know it isn't live-in.
1006 for (BasicBlock::iterator I = BB->begin();; ++I) {
1007 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1008 if (SI->getOperand(1) != AI)
1009 continue;
1010
1011 // We found a store to the alloca before a load. The alloca is not
1012 // actually live-in here.
1013 LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
1014 LiveInBlockWorklist.pop_back();
1015 --i;
1016 --e;
1017 break;
1018 }
1019
1020 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1021 // Okay, we found a load before a store to the alloca. It is actually
1022 // live into this block.
1023 if (LI->getOperand(0) == AI)
1024 break;
1025 }
1026 }
1027
1028 // Now that we have a set of blocks where the phi is live-in, recursively add
1029 // their predecessors until we find the full region the value is live.
1030 while (!LiveInBlockWorklist.empty()) {
1031 BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
1032
1033 // The block really is live in here, insert it into the set. If already in
1034 // the set, then it has already been processed.
1035 if (!LiveInBlocks.insert(BB).second)
1036 continue;
1037
1038 // Since the value is live into BB, it is either defined in a predecessor or
1039 // live into it to. Add the preds to the worklist unless they are a
1040 // defining block.
1041 for (BasicBlock *P : predecessors(BB)) {
1042 // The value is not live into a predecessor if it defines the value.
1043 if (DefBlocks.count(P))
1044 continue;
1045
1046 // Otherwise it is, add to the worklist.
1047 LiveInBlockWorklist.push_back(P);
1048 }
1049 }
1050}
1051
1052/// Queue a phi-node to be added to a basic-block for a specific Alloca.
1053///
1054/// Returns true if there wasn't already a phi-node for that variable
1055bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
1056 unsigned &Version) {
1057 // Look up the basic-block in question.
1058 PHINode *&PN = NewPhiNodes[std::make_pair(BB->getNumber(), AllocaNo)];
1059
1060 // If the BB already has a phi node added for the i'th alloca then we're done!
1061 if (PN)
1062 return false;
1063
1064 // Create a PhiNode using the dereferenced type... and add the phi-node to the
1065 // BasicBlock.
1066 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
1067 Allocas[AllocaNo]->getName() + "." + Twine(Version++));
1068 PN->insertBefore(BB->begin());
1069 ++NumPHIInsert;
1070 PhiToAllocaMap[PN] = AllocaNo;
1071 return true;
1072}
1073
1074/// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
1075/// create a merged location incorporating \p DL, or to set \p DL directly.
1077 bool ApplyMergedLoc) {
1078 if (ApplyMergedLoc)
1080 else
1081 PN->setDebugLoc(DL);
1082}
1083
1084/// Recursively traverse the CFG of the function, renaming loads and
1085/// stores to the allocas which we are promoting.
1086///
1087/// IncomingVals indicates what value each Alloca contains on exit from the
1088/// predecessor block Pred.
1089void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred) {
1090 // If we are inserting any phi nodes into this BB, they will already be in the
1091 // block.
1092 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
1093 // If we have PHI nodes to update, compute the number of edges from Pred to
1094 // BB.
1095 if (PhiToAllocaMap.count(APN)) {
1096 // We want to be able to distinguish between PHI nodes being inserted by
1097 // this invocation of mem2reg from those phi nodes that already existed in
1098 // the IR before mem2reg was run. We determine that APN is being inserted
1099 // because it is missing incoming edges. All other PHI nodes being
1100 // inserted by this pass of mem2reg will have the same number of incoming
1101 // operands so far. Remember this count.
1102 unsigned NewPHINumOperands = APN->getNumOperands();
1103
1104 unsigned NumEdges = llvm::count(successors(Pred), BB);
1105 assert(NumEdges && "Must be at least one edge from Pred to BB!");
1106
1107 // Add entries for all the phis.
1108 BasicBlock::iterator PNI = BB->begin();
1109 do {
1110 unsigned AllocaNo = PhiToAllocaMap[APN];
1111
1112 // Update the location of the phi node.
1113 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
1114 APN->getNumIncomingValues() > 0);
1115
1116 // Add N incoming values to the PHI node.
1117 for (unsigned i = 0; i != NumEdges; ++i)
1118 APN->addIncoming(IncomingVals[AllocaNo], Pred);
1119
1120 // For the sequence `return X > 0.0 ? X : -X`, it is expected that this
1121 // results in fabs intrinsic. However, without no-signed-zeros(nsz) flag
1122 // on the phi node generated at this stage, fabs folding does not
1123 // happen. So, we try to infer nsz flag from the function attributes to
1124 // enable this fabs folding.
1125 if (isa<FPMathOperator>(APN) && NoSignedZeros)
1126 APN->setHasNoSignedZeros(true);
1127
1128 // The currently active variable for this block is now the PHI.
1129 IncomingVals.set(AllocaNo, APN);
1130 AllocaATInfo[AllocaNo].updateForNewPhi(APN, DIB);
1131 for (DbgVariableRecord *DbgItem : AllocaDPUsers[AllocaNo])
1132 if (DbgItem->isAddressOfVariable())
1133 ConvertDebugDeclareToDebugValue(DbgItem, APN, DIB);
1134
1135 // Get the next phi node.
1136 ++PNI;
1137 APN = dyn_cast<PHINode>(PNI);
1138 if (!APN)
1139 break;
1140
1141 // Verify that it is missing entries. If not, it is not being inserted
1142 // by this mem2reg invocation so we want to ignore it.
1143 } while (APN->getNumOperands() == NewPHINumOperands);
1144 }
1145 }
1146
1147 // Don't revisit blocks.
1148 if (Visited.test(BB->getNumber()))
1149 return;
1150 Visited.set(BB->getNumber());
1151
1152 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
1153 Instruction *I = &*II++; // get the instruction, increment iterator
1154
1155 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1156 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
1157 if (!Src)
1158 continue;
1159
1160 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
1161 if (AI == AllocaLookup.end())
1162 continue;
1163
1164 Value *V = IncomingVals[AI->second];
1165 convertMetadataToAssumes(LI, V, SQ.DL, AC, &DT);
1166
1167 // Anything using the load now uses the current value.
1168 LI->replaceAllUsesWith(V);
1169 LI->eraseFromParent();
1170 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1171 // Delete this instruction and mark the name as the current holder of the
1172 // value
1173 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
1174 if (!Dest)
1175 continue;
1176
1177 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
1178 if (ai == AllocaLookup.end())
1179 continue;
1180
1181 // what value were we writing?
1182 unsigned AllocaNo = ai->second;
1183 IncomingVals.set(AllocaNo, SI->getOperand(0));
1184
1185 // Record debuginfo for the store before removing it.
1186 IncomingLocs.set(AllocaNo, SI->getDebugLoc());
1187 AllocaATInfo[AllocaNo].updateForDeletedStore(SI, DIB,
1188 &DVRAssignsToDelete);
1189 for (DbgVariableRecord *DbgItem : AllocaDPUsers[ai->second])
1190 if (DbgItem->isAddressOfVariable())
1191 ConvertDebugDeclareToDebugValue(DbgItem, SI, DIB);
1192 SI->eraseFromParent();
1193 }
1194 }
1195
1196 // 'Recurse' to our successors.
1197
1198 // Keep track of the successors so we don't visit the same successor twice
1199 SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
1200
1201 for (BasicBlock *S : reverse(successors(BB)))
1202 if (VisitedSuccs.insert(S).second)
1203 pushToWorklist(S, BB);
1204}
1205
1207 AssumptionCache *AC) {
1208 // If there is nothing to do, bail out...
1209 if (Allocas.empty())
1210 return;
1211
1212 PromoteMem2Reg(Allocas, DT, AC).run();
1213}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static DeltaTreeNode * getRoot(void *Root)
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T
uint64_t IntrinsicInst * II
#define P(N)
static void convertMetadataToAssumes(LoadInst *LI, Value *Val, const DataLayout &DL, AssumptionCache *AC, const DominatorTree *DT)
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallPtrSet< DbgVariableRecord *, 8 > *DVRAssignsToDelete)
Many allocas are only used within a single basic block.
static void removeIntrinsicUsers(AllocaInst *AI)
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallPtrSet< DbgVariableRecord *, 8 > *DVRAssignsToDelete)
Rewrite as many loads as possible given a single store.
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static StringRef getName(Value *V)
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock * > &UsingBlocks, const SmallPtrSetImpl< BasicBlock * > &DefBlocks, SmallPtrSetImpl< BasicBlock * > &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:167
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
unsigned getNumber() const
Definition BasicBlock.h:95
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
const Instruction & front() const
Definition BasicBlock.h:482
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
Definition BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition BitVector.h:341
BitVector & set()
Definition BitVector.h:351
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ ICMP_NE
not equal
Definition InstrTypes.h:700
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
DWARF expression.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
LLVM_ABI void eraseFromParent()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isValueOfVariable() const
Determine if this describes the value of a local variable.
bool isAddressOfVariable() const
Does this describe the address of a local variable.
static LLVM_ABI DbgVariableRecord * createDbgVariableRecord(Value *Location, DILocalVariable *DV, DIExpression *Expr, const DILocation *DI)
DIExpression * getExpression() const
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getCompilerGenerated()
Definition DebugLoc.h:163
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool erase(const KeyT &Val)
Definition DenseMap.h:303
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:161
iterator end()
Definition DenseMap.h:81
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void applyMergedLocation(DebugLoc LocA, DebugLoc LocB)
Merge 2 debug locations and apply it to the Instruction.
LLVM_ABI void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:226
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
typename SuperClass::iterator iterator
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:115
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
Definition Value.cpp:226
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
iterator_range< use_iterator > uses()
Definition Value.h:380
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:134
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
Definition DebugInfo.h:201
LLVM_ABI void deleteAssignmentMarkers(const Instruction *Inst)
Delete the llvm.dbg.assign intrinsics linked to Inst.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
constexpr double e
Definition MathExtras.h:47
@ User
could "use" a pointer
bool empty() const
Definition BasicBlock.h:101
iterator end() const
Definition BasicBlock.h:89
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1720
LLVM_ABI void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1708
constexpr from_range_t from_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:646
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
auto reverse(ContainerTy &&C)
Definition STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1652
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
IDFCalculator< false > ForwardIDFCalculator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:1996
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:1956
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
const DataLayout & DL
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1455