LLVM 20.0.0git
GVNHoist.cpp
Go to the documentation of this file.
1//===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass hoists expressions from branches to a common dominator. It uses
10// GVN (global value numbering) to discover expressions computing the same
11// values. The primary goals of code-hoisting are:
12// 1. To reduce the code size.
13// 2. In some cases reduce critical path (by exposing more ILP).
14//
15// The algorithm factors out the reachability of values such that multiple
16// queries to find reachability of values are fast. This is based on finding the
17// ANTIC points in the CFG which do not change during hoisting. The ANTIC points
18// are basically the dominance-frontiers in the inverse graph. So we introduce a
19// data structure (CHI nodes) to keep track of values flowing out of a basic
20// block. We only do this for values with multiple occurrences in the function
21// as they are the potential hoistable candidates. This approach allows us to
22// hoist instructions to a basic block with more than two successors, as well as
23// deal with infinite loops in a trivial way.
24//
25// Limitations: This pass does not hoist fully redundant expressions because
26// they are already handled by GVN-PRE. It is advisable to run gvn-hoist before
27// and after gvn-pre because gvn-pre creates opportunities for more instructions
28// to be hoisted.
29//
30// Hoisting may affect the performance in some cases. To mitigate that, hoisting
31// is disabled in the following cases.
32// 1. Scalars across calls.
33// 2. geps when corresponding load/store cannot be hoisted.
34//===----------------------------------------------------------------------===//
35
36#include "llvm/ADT/DenseMap.h"
37#include "llvm/ADT/DenseSet.h"
38#include "llvm/ADT/STLExtras.h"
41#include "llvm/ADT/Statistic.h"
51#include "llvm/IR/Argument.h"
52#include "llvm/IR/BasicBlock.h"
53#include "llvm/IR/CFG.h"
54#include "llvm/IR/Constants.h"
55#include "llvm/IR/Dominators.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/Instruction.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/PassManager.h"
62#include "llvm/IR/Use.h"
63#include "llvm/IR/User.h"
64#include "llvm/IR/Value.h"
67#include "llvm/Support/Debug.h"
71#include <algorithm>
72#include <cassert>
73#include <memory>
74#include <utility>
75#include <vector>
76
77using namespace llvm;
78
79#define DEBUG_TYPE "gvn-hoist"
80
81STATISTIC(NumHoisted, "Number of instructions hoisted");
82STATISTIC(NumRemoved, "Number of instructions removed");
83STATISTIC(NumLoadsHoisted, "Number of loads hoisted");
84STATISTIC(NumLoadsRemoved, "Number of loads removed");
85STATISTIC(NumStoresHoisted, "Number of stores hoisted");
86STATISTIC(NumStoresRemoved, "Number of stores removed");
87STATISTIC(NumCallsHoisted, "Number of calls hoisted");
88STATISTIC(NumCallsRemoved, "Number of calls removed");
89
90static cl::opt<int>
91 MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1),
92 cl::desc("Max number of instructions to hoist "
93 "(default unlimited = -1)"));
94
96 "gvn-hoist-max-bbs", cl::Hidden, cl::init(4),
97 cl::desc("Max number of basic blocks on the path between "
98 "hoisting locations (default = 4, unlimited = -1)"));
99
101 "gvn-hoist-max-depth", cl::Hidden, cl::init(100),
102 cl::desc("Hoist instructions from the beginning of the BB up to the "
103 "maximum specified depth (default = 100, unlimited = -1)"));
104
105static cl::opt<int>
106 MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10),
107 cl::desc("Maximum length of dependent chains to hoist "
108 "(default = 10, unlimited = -1)"));
109
110namespace llvm {
111
115
116// Each element of a hoisting list contains the basic block where to hoist and
117// a list of instructions to be hoisted.
118using HoistingPointInfo = std::pair<BasicBlock *, SmallVecInsn>;
119
121
122// A map from a pair of VNs to all the instructions with those VNs.
123using VNType = std::pair<unsigned, uintptr_t>;
124
126
127// CHI keeps information about values flowing out of a basic block. It is
128// similar to PHI but in the inverse graph, and used for outgoing values on each
129// edge. For conciseness, it is computed only for instructions with multiple
130// occurrences in the CFG because they are the only hoistable candidates.
131// A (CHI[{V, B, I1}, {V, C, I2}]
132// / \
133// / \
134// B(I1) C (I2)
135// The Value number for both I1 and I2 is V, the CHI node will save the
136// instruction as well as the edge where the value is flowing to.
137struct CHIArg {
139
140 // Edge destination (shows the direction of flow), may not be where the I is.
142
143 // The instruction (VN) which uses the values flowing out of CHI.
145
146 bool operator==(const CHIArg &A) const { return VN == A.VN; }
147 bool operator!=(const CHIArg &A) const { return !(*this == A); }
148};
149
155
156// An invalid value number Used when inserting a single value number into
157// VNtoInsns.
158enum : uintptr_t { InvalidVN = ~(uintptr_t)2 };
159
160// Records all scalar instructions candidate for code hoisting.
161class InsnInfo {
162 VNtoInsns VNtoScalars;
163
164public:
165 // Inserts I and its value number in VNtoScalars.
167 // Scalar instruction.
168 unsigned V = VN.lookupOrAdd(I);
169 VNtoScalars[{V, InvalidVN}].push_back(I);
170 }
171
172 const VNtoInsns &getVNTable() const { return VNtoScalars; }
173};
174
175// Records all load instructions candidate for code hoisting.
176class LoadInfo {
177 VNtoInsns VNtoLoads;
178
179public:
180 // Insert Load and the value number of its memory address in VNtoLoads.
182 if (Load->isSimple()) {
183 unsigned V = VN.lookupOrAdd(Load->getPointerOperand());
184 // With opaque pointers we may have loads from the same pointer with
185 // different result types, which should be disambiguated.
186 VNtoLoads[{V, (uintptr_t)Load->getType()}].push_back(Load);
187 }
188 }
189
190 const VNtoInsns &getVNTable() const { return VNtoLoads; }
191};
192
193// Records all store instructions candidate for code hoisting.
195 VNtoInsns VNtoStores;
196
197public:
198 // Insert the Store and a hash number of the store address and the stored
199 // value in VNtoStores.
201 if (!Store->isSimple())
202 return;
203 // Hash the store address and the stored value.
204 Value *Ptr = Store->getPointerOperand();
205 Value *Val = Store->getValueOperand();
206 VNtoStores[{VN.lookupOrAdd(Ptr), VN.lookupOrAdd(Val)}].push_back(Store);
207 }
208
209 const VNtoInsns &getVNTable() const { return VNtoStores; }
210};
211
212// Records all call instructions candidate for code hoisting.
213class CallInfo {
214 VNtoInsns VNtoCallsScalars;
215 VNtoInsns VNtoCallsLoads;
216 VNtoInsns VNtoCallsStores;
217
218public:
219 // Insert Call and its value numbering in one of the VNtoCalls* containers.
221 // A call that doesNotAccessMemory is handled as a Scalar,
222 // onlyReadsMemory will be handled as a Load instruction,
223 // all other calls will be handled as stores.
224 unsigned V = VN.lookupOrAdd(Call);
225 auto Entry = std::make_pair(V, InvalidVN);
226
227 if (Call->doesNotAccessMemory())
228 VNtoCallsScalars[Entry].push_back(Call);
229 else if (Call->onlyReadsMemory())
230 VNtoCallsLoads[Entry].push_back(Call);
231 else
232 VNtoCallsStores[Entry].push_back(Call);
233 }
234
235 const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; }
236 const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; }
237 const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; }
238};
239
240// This pass hoists common computations across branches sharing common
241// dominator. The primary goal is to reduce the code size, and in some
242// cases reduce critical path (by exposing more ILP).
243class GVNHoist {
244public:
247 : DT(DT), PDT(PDT), AA(AA), MD(MD), MSSA(MSSA),
248 MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {
249 MSSA->ensureOptimizedUses();
250 }
251
252 bool run(Function &F);
253
254 // Copied from NewGVN.cpp
255 // This function provides global ranking of operations so that we can place
256 // them in a canonical order. Note that rank alone is not necessarily enough
257 // for a complete ordering, as constants all have the same rank. However,
258 // generally, we will simplify an operation with all constants so that it
259 // doesn't matter what order they appear in.
260 unsigned int rank(const Value *V) const;
261
262private:
264 DominatorTree *DT;
266 AliasAnalysis *AA;
268 MemorySSA *MSSA;
269 std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
271 BBSideEffectsSet BBSideEffects;
272 DenseSet<const BasicBlock *> HoistBarrier;
274 unsigned NumFuncArgs;
275 const bool HoistingGeps = false;
276
277 enum InsKind { Unknown, Scalar, Load, Store };
278
279 // Return true when there are exception handling in BB.
280 bool hasEH(const BasicBlock *BB);
281
282 // Return true when I1 appears before I2 in the instructions of BB.
283 bool firstInBB(const Instruction *I1, const Instruction *I2) {
284 assert(I1->getParent() == I2->getParent());
285 unsigned I1DFS = DFSNumber.lookup(I1);
286 unsigned I2DFS = DFSNumber.lookup(I2);
287 assert(I1DFS && I2DFS);
288 return I1DFS < I2DFS;
289 }
290
291 // Return true when there are memory uses of Def in BB.
292 bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def,
293 const BasicBlock *BB);
294
295 bool hasEHhelper(const BasicBlock *BB, const BasicBlock *SrcBB,
296 int &NBBsOnAllPaths);
297
298 // Return true when there are exception handling or loads of memory Def
299 // between Def and NewPt. This function is only called for stores: Def is
300 // the MemoryDef of the store to be hoisted.
301
302 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
303 // return true when the counter NBBsOnAllPaths reaces 0, except when it is
304 // initialized to -1 which is unlimited.
305 bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def,
306 int &NBBsOnAllPaths);
307
308 // Return true when there are exception handling between HoistPt and BB.
309 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
310 // return true when the counter NBBsOnAllPaths reaches 0, except when it is
311 // initialized to -1 which is unlimited.
312 bool hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *SrcBB,
313 int &NBBsOnAllPaths);
314
315 // Return true when it is safe to hoist a memory load or store U from OldPt
316 // to NewPt.
317 bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt,
318 MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths);
319
320 // Return true when it is safe to hoist scalar instructions from all blocks in
321 // WL to HoistBB.
322 bool safeToHoistScalar(const BasicBlock *HoistBB, const BasicBlock *BB,
323 int &NBBsOnAllPaths) {
324 return !hasEHOnPath(HoistBB, BB, NBBsOnAllPaths);
325 }
326
327 // In the inverse CFG, the dominance frontier of basic block (BB) is the
328 // point where ANTIC needs to be computed for instructions which are going
329 // to be hoisted. Since this point does not change during gvn-hoist,
330 // we compute it only once (on demand).
331 // The ides is inspired from:
332 // "Partial Redundancy Elimination in SSA Form"
333 // ROBERT KENNEDY, SUN CHAN, SHIN-MING LIU, RAYMOND LO, PENG TU and FRED CHOW
334 // They use similar idea in the forward graph to find fully redundant and
335 // partially redundant expressions, here it is used in the inverse graph to
336 // find fully anticipable instructions at merge point (post-dominator in
337 // the inverse CFG).
338 // Returns the edge via which an instruction in BB will get the values from.
339
340 // Returns true when the values are flowing out to each edge.
341 bool valueAnticipable(CHIArgs C, Instruction *TI) const;
342
343 // Check if it is safe to hoist values tracked by CHI in the range
344 // [Begin, End) and accumulate them in Safe.
345 void checkSafety(CHIArgs C, BasicBlock *BB, InsKind K,
347
348 using RenameStackType = DenseMap<VNType, SmallVector<Instruction *, 2>>;
349
350 // Push all the VNs corresponding to BB into RenameStack.
351 void fillRenameStack(BasicBlock *BB, InValuesType &ValueBBs,
352 RenameStackType &RenameStack);
353
354 void fillChiArgs(BasicBlock *BB, OutValuesType &CHIBBs,
355 RenameStackType &RenameStack);
356
357 // Walk the post-dominator tree top-down and use a stack for each value to
358 // store the last value you see. When you hit a CHI from a given edge, the
359 // value to use as the argument is at the top of the stack, add the value to
360 // CHI and pop.
361 void insertCHI(InValuesType &ValueBBs, OutValuesType &CHIBBs) {
362 auto Root = PDT->getNode(nullptr);
363 if (!Root)
364 return;
365 // Depth first walk on PDom tree to fill the CHIargs at each PDF.
366 for (auto *Node : depth_first(Root)) {
367 BasicBlock *BB = Node->getBlock();
368 if (!BB)
369 continue;
370
371 RenameStackType RenameStack;
372 // Collect all values in BB and push to stack.
373 fillRenameStack(BB, ValueBBs, RenameStack);
374
375 // Fill outgoing values in each CHI corresponding to BB.
376 fillChiArgs(BB, CHIBBs, RenameStack);
377 }
378 }
379
380 // Walk all the CHI-nodes to find ones which have a empty-entry and remove
381 // them Then collect all the instructions which are safe to hoist and see if
382 // they form a list of anticipable values. OutValues contains CHIs
383 // corresponding to each basic block.
384 void findHoistableCandidates(OutValuesType &CHIBBs, InsKind K,
385 HoistingPointList &HPL);
386
387 // Compute insertion points for each values which can be fully anticipated at
388 // a dominator. HPL contains all such values.
389 void computeInsertionPoints(const VNtoInsns &Map, HoistingPointList &HPL,
390 InsKind K) {
391 // Sort VNs based on their rankings
392 std::vector<VNType> Ranks;
393 for (const auto &Entry : Map) {
394 Ranks.push_back(Entry.first);
395 }
396
397 // TODO: Remove fully-redundant expressions.
398 // Get instruction from the Map, assume that all the Instructions
399 // with same VNs have same rank (this is an approximation).
400 llvm::sort(Ranks, [this, &Map](const VNType &r1, const VNType &r2) {
401 return (rank(*Map.lookup(r1).begin()) < rank(*Map.lookup(r2).begin()));
402 });
403
404 // - Sort VNs according to their rank, and start with lowest ranked VN
405 // - Take a VN and for each instruction with same VN
406 // - Find the dominance frontier in the inverse graph (PDF)
407 // - Insert the chi-node at PDF
408 // - Remove the chi-nodes with missing entries
409 // - Remove values from CHI-nodes which do not truly flow out, e.g.,
410 // modified along the path.
411 // - Collect the remaining values that are still anticipable
413 ReverseIDFCalculator IDFs(*PDT);
414 OutValuesType OutValue;
415 InValuesType InValue;
416 for (const auto &R : Ranks) {
417 const SmallVecInsn &V = Map.lookup(R);
418 if (V.size() < 2)
419 continue;
420 const VNType &VN = R;
422 for (const auto &I : V) {
423 BasicBlock *BBI = I->getParent();
424 if (!hasEH(BBI))
425 VNBlocks.insert(BBI);
426 }
427 // Compute the Post Dominance Frontiers of each basic block
428 // The dominance frontier of a live block X in the reverse
429 // control graph is the set of blocks upon which X is control
430 // dependent. The following sequence computes the set of blocks
431 // which currently have dead terminators that are control
432 // dependence sources of a block which is in NewLiveBlocks.
433 IDFs.setDefiningBlocks(VNBlocks);
434 IDFBlocks.clear();
435 IDFs.calculate(IDFBlocks);
436
437 // Make a map of BB vs instructions to be hoisted.
438 for (unsigned i = 0; i < V.size(); ++i) {
439 InValue[V[i]->getParent()].push_back(std::make_pair(VN, V[i]));
440 }
441 // Insert empty CHI node for this VN. This is used to factor out
442 // basic blocks where the ANTIC can potentially change.
443 CHIArg EmptyChi = {VN, nullptr, nullptr};
444 for (auto *IDFBB : IDFBlocks) {
445 for (unsigned i = 0; i < V.size(); ++i) {
446 // Ignore spurious PDFs.
447 if (DT->properlyDominates(IDFBB, V[i]->getParent())) {
448 OutValue[IDFBB].push_back(EmptyChi);
449 LLVM_DEBUG(dbgs() << "\nInserting a CHI for BB: "
450 << IDFBB->getName() << ", for Insn: " << *V[i]);
451 }
452 }
453 }
454 }
455
456 // Insert CHI args at each PDF to iterate on factored graph of
457 // control dependence.
458 insertCHI(InValue, OutValue);
459 // Using the CHI args inserted at each PDF, find fully anticipable values.
460 findHoistableCandidates(OutValue, K, HPL);
461 }
462
463 // Return true when all operands of Instr are available at insertion point
464 // HoistPt. When limiting the number of hoisted expressions, one could hoist
465 // a load without hoisting its access function. So before hoisting any
466 // expression, make sure that all its operands are available at insert point.
467 bool allOperandsAvailable(const Instruction *I,
468 const BasicBlock *HoistPt) const;
469
470 // Same as allOperandsAvailable with recursive check for GEP operands.
471 bool allGepOperandsAvailable(const Instruction *I,
472 const BasicBlock *HoistPt) const;
473
474 // Make all operands of the GEP available.
475 void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt,
476 const SmallVecInsn &InstructionsToHoist,
477 Instruction *Gep) const;
478
479 void updateAlignment(Instruction *I, Instruction *Repl);
480
481 // Remove all the instructions in Candidates and replace their usage with
482 // Repl. Returns the number of instructions removed.
483 unsigned rauw(const SmallVecInsn &Candidates, Instruction *Repl,
484 MemoryUseOrDef *NewMemAcc);
485
486 // Replace all Memory PHI usage with NewMemAcc.
487 void raMPHIuw(MemoryUseOrDef *NewMemAcc);
488
489 // Remove all other instructions and replace them with Repl.
490 unsigned removeAndReplace(const SmallVecInsn &Candidates, Instruction *Repl,
491 BasicBlock *DestBB, bool MoveAccess);
492
493 // In the case Repl is a load or a store, we make all their GEPs
494 // available: GEPs are not hoisted by default to avoid the address
495 // computations to be hoisted without the associated load or store.
496 bool makeGepOperandsAvailable(Instruction *Repl, BasicBlock *HoistPt,
497 const SmallVecInsn &InstructionsToHoist) const;
498
499 std::pair<unsigned, unsigned> hoist(HoistingPointList &HPL);
500
501 // Hoist all expressions. Returns Number of scalars hoisted
502 // and number of non-scalars hoisted.
503 std::pair<unsigned, unsigned> hoistExpressions(Function &F);
504};
505
507 NumFuncArgs = F.arg_size();
508 VN.setDomTree(DT);
509 VN.setAliasAnalysis(AA);
510 VN.setMemDep(MD);
511 bool Res = false;
512 // Perform DFS Numbering of instructions.
513 unsigned BBI = 0;
514 for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) {
515 DFSNumber[BB] = ++BBI;
516 unsigned I = 0;
517 for (const auto &Inst : *BB)
518 DFSNumber[&Inst] = ++I;
519 }
520
521 int ChainLength = 0;
522
523 // FIXME: use lazy evaluation of VN to avoid the fix-point computation.
524 while (true) {
525 if (MaxChainLength != -1 && ++ChainLength >= MaxChainLength)
526 return Res;
527
528 auto HoistStat = hoistExpressions(F);
529 if (HoistStat.first + HoistStat.second == 0)
530 return Res;
531
532 if (HoistStat.second > 0)
533 // To address a limitation of the current GVN, we need to rerun the
534 // hoisting after we hoisted loads or stores in order to be able to
535 // hoist all scalars dependent on the hoisted ld/st.
536 VN.clear();
537
538 Res = true;
539 }
540
541 return Res;
542}
543
544unsigned int GVNHoist::rank(const Value *V) const {
545 // Prefer constants to undef to anything else
546 // Undef is a constant, have to check it first.
547 // Prefer smaller constants to constantexprs
548 if (isa<ConstantExpr>(V))
549 return 2;
550 if (isa<UndefValue>(V))
551 return 1;
552 if (isa<Constant>(V))
553 return 0;
554 else if (auto *A = dyn_cast<Argument>(V))
555 return 3 + A->getArgNo();
556
557 // Need to shift the instruction DFS by number of arguments + 3 to account
558 // for the constant and argument ranking above.
559 auto Result = DFSNumber.lookup(V);
560 if (Result > 0)
561 return 4 + NumFuncArgs + Result;
562 // Unreachable or something else, just return a really large number.
563 return ~0;
564}
565
566bool GVNHoist::hasEH(const BasicBlock *BB) {
567 auto It = BBSideEffects.find(BB);
568 if (It != BBSideEffects.end())
569 return It->second;
570
571 if (BB->isEHPad() || BB->hasAddressTaken()) {
572 BBSideEffects[BB] = true;
573 return true;
574 }
575
576 if (BB->getTerminator()->mayThrow()) {
577 BBSideEffects[BB] = true;
578 return true;
579 }
580
581 BBSideEffects[BB] = false;
582 return false;
583}
584
585bool GVNHoist::hasMemoryUse(const Instruction *NewPt, MemoryDef *Def,
586 const BasicBlock *BB) {
587 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
588 if (!Acc)
589 return false;
590
591 Instruction *OldPt = Def->getMemoryInst();
592 const BasicBlock *OldBB = OldPt->getParent();
593 const BasicBlock *NewBB = NewPt->getParent();
594 bool ReachedNewPt = false;
595
596 for (const MemoryAccess &MA : *Acc)
597 if (const MemoryUse *MU = dyn_cast<MemoryUse>(&MA)) {
598 Instruction *Insn = MU->getMemoryInst();
599
600 // Do not check whether MU aliases Def when MU occurs after OldPt.
601 if (BB == OldBB && firstInBB(OldPt, Insn))
602 break;
603
604 // Do not check whether MU aliases Def when MU occurs before NewPt.
605 if (BB == NewBB) {
606 if (!ReachedNewPt) {
607 if (firstInBB(Insn, NewPt))
608 continue;
609 ReachedNewPt = true;
610 }
611 }
612 if (MemorySSAUtil::defClobbersUseOrDef(Def, MU, *AA))
613 return true;
614 }
615
616 return false;
617}
618
619bool GVNHoist::hasEHhelper(const BasicBlock *BB, const BasicBlock *SrcBB,
620 int &NBBsOnAllPaths) {
621 // Stop walk once the limit is reached.
622 if (NBBsOnAllPaths == 0)
623 return true;
624
625 // Impossible to hoist with exceptions on the path.
626 if (hasEH(BB))
627 return true;
628
629 // No such instruction after HoistBarrier in a basic block was
630 // selected for hoisting so instructions selected within basic block with
631 // a hoist barrier can be hoisted.
632 if ((BB != SrcBB) && HoistBarrier.count(BB))
633 return true;
634
635 return false;
636}
637
638bool GVNHoist::hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def,
639 int &NBBsOnAllPaths) {
640 const BasicBlock *NewBB = NewPt->getParent();
641 const BasicBlock *OldBB = Def->getBlock();
642 assert(DT->dominates(NewBB, OldBB) && "invalid path");
643 assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) &&
644 "def does not dominate new hoisting point");
645
646 // Walk all basic blocks reachable in depth-first iteration on the inverse
647 // CFG from OldBB to NewBB. These blocks are all the blocks that may be
648 // executed between the execution of NewBB and OldBB. Hoisting an expression
649 // from OldBB into NewBB has to be safe on all execution paths.
650 for (auto I = idf_begin(OldBB), E = idf_end(OldBB); I != E;) {
651 const BasicBlock *BB = *I;
652 if (BB == NewBB) {
653 // Stop traversal when reaching HoistPt.
654 I.skipChildren();
655 continue;
656 }
657
658 if (hasEHhelper(BB, OldBB, NBBsOnAllPaths))
659 return true;
660
661 // Check that we do not move a store past loads.
662 if (hasMemoryUse(NewPt, Def, BB))
663 return true;
664
665 // -1 is unlimited number of blocks on all paths.
666 if (NBBsOnAllPaths != -1)
667 --NBBsOnAllPaths;
668
669 ++I;
670 }
671
672 return false;
673}
674
675bool GVNHoist::hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *SrcBB,
676 int &NBBsOnAllPaths) {
677 assert(DT->dominates(HoistPt, SrcBB) && "Invalid path");
678
679 // Walk all basic blocks reachable in depth-first iteration on
680 // the inverse CFG from BBInsn to NewHoistPt. These blocks are all the
681 // blocks that may be executed between the execution of NewHoistPt and
682 // BBInsn. Hoisting an expression from BBInsn into NewHoistPt has to be safe
683 // on all execution paths.
684 for (auto I = idf_begin(SrcBB), E = idf_end(SrcBB); I != E;) {
685 const BasicBlock *BB = *I;
686 if (BB == HoistPt) {
687 // Stop traversal when reaching NewHoistPt.
688 I.skipChildren();
689 continue;
690 }
691
692 if (hasEHhelper(BB, SrcBB, NBBsOnAllPaths))
693 return true;
694
695 // -1 is unlimited number of blocks on all paths.
696 if (NBBsOnAllPaths != -1)
697 --NBBsOnAllPaths;
698
699 ++I;
700 }
701
702 return false;
703}
704
705bool GVNHoist::safeToHoistLdSt(const Instruction *NewPt,
706 const Instruction *OldPt, MemoryUseOrDef *U,
707 GVNHoist::InsKind K, int &NBBsOnAllPaths) {
708 // In place hoisting is safe.
709 if (NewPt == OldPt)
710 return true;
711
712 const BasicBlock *NewBB = NewPt->getParent();
713 const BasicBlock *OldBB = OldPt->getParent();
714 const BasicBlock *UBB = U->getBlock();
715
716 // Check for dependences on the Memory SSA.
717 MemoryAccess *D = U->getDefiningAccess();
718 BasicBlock *DBB = D->getBlock();
719 if (DT->properlyDominates(NewBB, DBB))
720 // Cannot move the load or store to NewBB above its definition in DBB.
721 return false;
722
723 if (NewBB == DBB && !MSSA->isLiveOnEntryDef(D))
724 if (auto *UD = dyn_cast<MemoryUseOrDef>(D))
725 if (!firstInBB(UD->getMemoryInst(), NewPt))
726 // Cannot move the load or store to NewPt above its definition in D.
727 return false;
728
729 // Check for unsafe hoistings due to side effects.
730 if (K == InsKind::Store) {
731 if (hasEHOrLoadsOnPath(NewPt, cast<MemoryDef>(U), NBBsOnAllPaths))
732 return false;
733 } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths))
734 return false;
735
736 if (UBB == NewBB) {
737 if (DT->properlyDominates(DBB, NewBB))
738 return true;
739 assert(UBB == DBB);
740 assert(MSSA->locallyDominates(D, U));
741 }
742
743 // No side effects: it is safe to hoist.
744 return true;
745}
746
747bool GVNHoist::valueAnticipable(CHIArgs C, Instruction *TI) const {
748 if (TI->getNumSuccessors() > (unsigned)size(C))
749 return false; // Not enough args in this CHI.
750
751 for (auto CHI : C) {
752 // Find if all the edges have values flowing out of BB.
753 if (!llvm::is_contained(successors(TI), CHI.Dest))
754 return false;
755 }
756 return true;
757}
758
759void GVNHoist::checkSafety(CHIArgs C, BasicBlock *BB, GVNHoist::InsKind K,
761 int NumBBsOnAllPaths = MaxNumberOfBBSInPath;
762 const Instruction *T = BB->getTerminator();
763 for (auto CHI : C) {
764 Instruction *Insn = CHI.I;
765 if (!Insn) // No instruction was inserted in this CHI.
766 continue;
767 // If the Terminator is some kind of "exotic terminator" that produces a
768 // value (such as InvokeInst, CallBrInst, or CatchSwitchInst) which the CHI
769 // uses, it is not safe to hoist the use above the def.
770 if (!T->use_empty() && is_contained(Insn->operands(), cast<const Value>(T)))
771 continue;
772 if (K == InsKind::Scalar) {
773 if (safeToHoistScalar(BB, Insn->getParent(), NumBBsOnAllPaths))
774 Safe.push_back(CHI);
775 } else {
776 if (MemoryUseOrDef *UD = MSSA->getMemoryAccess(Insn))
777 if (safeToHoistLdSt(T, Insn, UD, K, NumBBsOnAllPaths))
778 Safe.push_back(CHI);
779 }
780 }
781}
782
783void GVNHoist::fillRenameStack(BasicBlock *BB, InValuesType &ValueBBs,
784 GVNHoist::RenameStackType &RenameStack) {
785 auto it1 = ValueBBs.find(BB);
786 if (it1 != ValueBBs.end()) {
787 // Iterate in reverse order to keep lower ranked values on the top.
788 LLVM_DEBUG(dbgs() << "\nVisiting: " << BB->getName()
789 << " for pushing instructions on stack";);
790 for (std::pair<VNType, Instruction *> &VI : reverse(it1->second)) {
791 // Get the value of instruction I
792 LLVM_DEBUG(dbgs() << "\nPushing on stack: " << *VI.second);
793 RenameStack[VI.first].push_back(VI.second);
794 }
795 }
796}
797
798void GVNHoist::fillChiArgs(BasicBlock *BB, OutValuesType &CHIBBs,
799 GVNHoist::RenameStackType &RenameStack) {
800 // For each *predecessor* (because Post-DOM) of BB check if it has a CHI
801 for (auto *Pred : predecessors(BB)) {
802 auto P = CHIBBs.find(Pred);
803 if (P == CHIBBs.end()) {
804 continue;
805 }
806 LLVM_DEBUG(dbgs() << "\nLooking at CHIs in: " << Pred->getName(););
807 // A CHI is found (BB -> Pred is an edge in the CFG)
808 // Pop the stack until Top(V) = Ve.
809 auto &VCHI = P->second;
810 for (auto It = VCHI.begin(), E = VCHI.end(); It != E;) {
811 CHIArg &C = *It;
812 if (!C.Dest) {
813 auto si = RenameStack.find(C.VN);
814 // The Basic Block where CHI is must dominate the value we want to
815 // track in a CHI. In the PDom walk, there can be values in the
816 // stack which are not control dependent e.g., nested loop.
817 if (si != RenameStack.end() && si->second.size() &&
818 DT->properlyDominates(Pred, si->second.back()->getParent())) {
819 C.Dest = BB; // Assign the edge
820 C.I = si->second.pop_back_val(); // Assign the argument
822 << "\nCHI Inserted in BB: " << C.Dest->getName() << *C.I
823 << ", VN: " << C.VN.first << ", " << C.VN.second);
824 }
825 // Move to next CHI of a different value
826 It = std::find_if(It, VCHI.end(), [It](CHIArg &A) { return A != *It; });
827 } else
828 ++It;
829 }
830 }
831}
832
833void GVNHoist::findHoistableCandidates(OutValuesType &CHIBBs,
834 GVNHoist::InsKind K,
835 HoistingPointList &HPL) {
836 auto cmpVN = [](const CHIArg &A, const CHIArg &B) { return A.VN < B.VN; };
837
838 // CHIArgs now have the outgoing values, so check for anticipability and
839 // accumulate hoistable candidates in HPL.
840 for (std::pair<BasicBlock *, SmallVector<CHIArg, 2>> &A : CHIBBs) {
841 BasicBlock *BB = A.first;
842 SmallVectorImpl<CHIArg> &CHIs = A.second;
843 // Vector of PHIs contains PHIs for different instructions.
844 // Sort the args according to their VNs, such that identical
845 // instructions are together.
846 llvm::stable_sort(CHIs, cmpVN);
847 auto TI = BB->getTerminator();
848 auto B = CHIs.begin();
849 // [PreIt, PHIIt) form a range of CHIs which have identical VNs.
850 auto PHIIt = llvm::find_if(CHIs, [B](CHIArg &A) { return A != *B; });
851 auto PrevIt = CHIs.begin();
852 while (PrevIt != PHIIt) {
853 // Collect values which satisfy safety checks.
855 // We check for safety first because there might be multiple values in
856 // the same path, some of which are not safe to be hoisted, but overall
857 // each edge has at least one value which can be hoisted, making the
858 // value anticipable along that path.
859 checkSafety(make_range(PrevIt, PHIIt), BB, K, Safe);
860
861 // List of safe values should be anticipable at TI.
862 if (valueAnticipable(make_range(Safe.begin(), Safe.end()), TI)) {
863 HPL.push_back({BB, SmallVecInsn()});
864 SmallVecInsn &V = HPL.back().second;
865 for (auto B : Safe)
866 V.push_back(B.I);
867 }
868
869 // Check other VNs
870 PrevIt = PHIIt;
871 PHIIt = std::find_if(PrevIt, CHIs.end(),
872 [PrevIt](CHIArg &A) { return A != *PrevIt; });
873 }
874 }
875}
876
877bool GVNHoist::allOperandsAvailable(const Instruction *I,
878 const BasicBlock *HoistPt) const {
879 for (const Use &Op : I->operands())
880 if (const auto *Inst = dyn_cast<Instruction>(&Op))
881 if (!DT->dominates(Inst->getParent(), HoistPt))
882 return false;
883
884 return true;
885}
886
887bool GVNHoist::allGepOperandsAvailable(const Instruction *I,
888 const BasicBlock *HoistPt) const {
889 for (const Use &Op : I->operands())
890 if (const auto *Inst = dyn_cast<Instruction>(&Op))
891 if (!DT->dominates(Inst->getParent(), HoistPt)) {
892 if (const GetElementPtrInst *GepOp =
893 dyn_cast<GetElementPtrInst>(Inst)) {
894 if (!allGepOperandsAvailable(GepOp, HoistPt))
895 return false;
896 // Gep is available if all operands of GepOp are available.
897 } else {
898 // Gep is not available if it has operands other than GEPs that are
899 // defined in blocks not dominating HoistPt.
900 return false;
901 }
902 }
903 return true;
904}
905
906void GVNHoist::makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt,
907 const SmallVecInsn &InstructionsToHoist,
908 Instruction *Gep) const {
909 assert(allGepOperandsAvailable(Gep, HoistPt) && "GEP operands not available");
910
911 Instruction *ClonedGep = Gep->clone();
912 for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i)
913 if (Instruction *Op = dyn_cast<Instruction>(Gep->getOperand(i))) {
914 // Check whether the operand is already available.
915 if (DT->dominates(Op->getParent(), HoistPt))
916 continue;
917
918 // As a GEP can refer to other GEPs, recursively make all the operands
919 // of this GEP available at HoistPt.
920 if (GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Op))
921 makeGepsAvailable(ClonedGep, HoistPt, InstructionsToHoist, GepOp);
922 }
923
924 // Copy Gep and replace its uses in Repl with ClonedGep.
925 ClonedGep->insertBefore(HoistPt->getTerminator());
926
927 // Conservatively discard any optimization hints, they may differ on the
928 // other paths.
929 ClonedGep->dropUnknownNonDebugMetadata();
930
931 // If we have optimization hints which agree with each other along different
932 // paths, preserve them.
933 for (const Instruction *OtherInst : InstructionsToHoist) {
934 const GetElementPtrInst *OtherGep;
935 if (auto *OtherLd = dyn_cast<LoadInst>(OtherInst))
936 OtherGep = cast<GetElementPtrInst>(OtherLd->getPointerOperand());
937 else
938 OtherGep = cast<GetElementPtrInst>(
939 cast<StoreInst>(OtherInst)->getPointerOperand());
940 ClonedGep->andIRFlags(OtherGep);
941
942 // Merge debug locations of GEPs, because the hoisted GEP replaces those
943 // in branches. When cloning, ClonedGep preserves the debug location of
944 // Gepd, so Gep is skipped to avoid merging it twice.
945 if (OtherGep != Gep) {
946 ClonedGep->applyMergedLocation(ClonedGep->getDebugLoc(),
947 OtherGep->getDebugLoc());
948 }
949 }
950
951 // Replace uses of Gep with ClonedGep in Repl.
952 Repl->replaceUsesOfWith(Gep, ClonedGep);
953}
954
955void GVNHoist::updateAlignment(Instruction *I, Instruction *Repl) {
956 if (auto *ReplacementLoad = dyn_cast<LoadInst>(Repl)) {
957 ReplacementLoad->setAlignment(
958 std::min(ReplacementLoad->getAlign(), cast<LoadInst>(I)->getAlign()));
959 ++NumLoadsRemoved;
960 } else if (auto *ReplacementStore = dyn_cast<StoreInst>(Repl)) {
961 ReplacementStore->setAlignment(
962 std::min(ReplacementStore->getAlign(), cast<StoreInst>(I)->getAlign()));
963 ++NumStoresRemoved;
964 } else if (auto *ReplacementAlloca = dyn_cast<AllocaInst>(Repl)) {
965 ReplacementAlloca->setAlignment(std::max(ReplacementAlloca->getAlign(),
966 cast<AllocaInst>(I)->getAlign()));
967 } else if (isa<CallInst>(Repl)) {
968 ++NumCallsRemoved;
969 }
970}
971
972unsigned GVNHoist::rauw(const SmallVecInsn &Candidates, Instruction *Repl,
973 MemoryUseOrDef *NewMemAcc) {
974 unsigned NR = 0;
975 for (Instruction *I : Candidates) {
976 if (I != Repl) {
977 ++NR;
978 updateAlignment(I, Repl);
979 if (NewMemAcc) {
980 // Update the uses of the old MSSA access with NewMemAcc.
981 MemoryAccess *OldMA = MSSA->getMemoryAccess(I);
982 OldMA->replaceAllUsesWith(NewMemAcc);
983 MSSAUpdater->removeMemoryAccess(OldMA);
984 }
985
986 combineMetadataForCSE(Repl, I, true);
987 Repl->andIRFlags(I);
988 I->replaceAllUsesWith(Repl);
989 // Also invalidate the Alias Analysis cache.
990 MD->removeInstruction(I);
991 I->eraseFromParent();
992 }
993 }
994 return NR;
995}
996
997void GVNHoist::raMPHIuw(MemoryUseOrDef *NewMemAcc) {
999 for (User *U : NewMemAcc->users())
1000 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(U))
1001 UsePhis.insert(Phi);
1002
1003 for (MemoryPhi *Phi : UsePhis) {
1004 auto In = Phi->incoming_values();
1005 if (llvm::all_of(In, [&](Use &U) { return U == NewMemAcc; })) {
1006 Phi->replaceAllUsesWith(NewMemAcc);
1007 MSSAUpdater->removeMemoryAccess(Phi);
1008 }
1009 }
1010}
1011
1012unsigned GVNHoist::removeAndReplace(const SmallVecInsn &Candidates,
1013 Instruction *Repl, BasicBlock *DestBB,
1014 bool MoveAccess) {
1015 MemoryUseOrDef *NewMemAcc = MSSA->getMemoryAccess(Repl);
1016 if (MoveAccess && NewMemAcc) {
1017 // The definition of this ld/st will not change: ld/st hoisting is
1018 // legal when the ld/st is not moved past its current definition.
1019 MSSAUpdater->moveToPlace(NewMemAcc, DestBB, MemorySSA::BeforeTerminator);
1020 }
1021
1022 // Replace all other instructions with Repl with memory access NewMemAcc.
1023 unsigned NR = rauw(Candidates, Repl, NewMemAcc);
1024
1025 // Remove MemorySSA phi nodes with the same arguments.
1026 if (NewMemAcc)
1027 raMPHIuw(NewMemAcc);
1028 return NR;
1029}
1030
1031bool GVNHoist::makeGepOperandsAvailable(
1032 Instruction *Repl, BasicBlock *HoistPt,
1033 const SmallVecInsn &InstructionsToHoist) const {
1034 // Check whether the GEP of a ld/st can be synthesized at HoistPt.
1035 GetElementPtrInst *Gep = nullptr;
1036 Instruction *Val = nullptr;
1037 if (auto *Ld = dyn_cast<LoadInst>(Repl)) {
1038 Gep = dyn_cast<GetElementPtrInst>(Ld->getPointerOperand());
1039 } else if (auto *St = dyn_cast<StoreInst>(Repl)) {
1040 Gep = dyn_cast<GetElementPtrInst>(St->getPointerOperand());
1041 Val = dyn_cast<Instruction>(St->getValueOperand());
1042 // Check that the stored value is available.
1043 if (Val) {
1044 if (isa<GetElementPtrInst>(Val)) {
1045 // Check whether we can compute the GEP at HoistPt.
1046 if (!allGepOperandsAvailable(Val, HoistPt))
1047 return false;
1048 } else if (!DT->dominates(Val->getParent(), HoistPt))
1049 return false;
1050 }
1051 }
1052
1053 // Check whether we can compute the Gep at HoistPt.
1054 if (!Gep || !allGepOperandsAvailable(Gep, HoistPt))
1055 return false;
1056
1057 makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Gep);
1058
1059 if (Val && isa<GetElementPtrInst>(Val))
1060 makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Val);
1061
1062 return true;
1063}
1064
1065std::pair<unsigned, unsigned> GVNHoist::hoist(HoistingPointList &HPL) {
1066 unsigned NI = 0, NL = 0, NS = 0, NC = 0, NR = 0;
1067 for (const HoistingPointInfo &HP : HPL) {
1068 // Find out whether we already have one of the instructions in HoistPt,
1069 // in which case we do not have to move it.
1070 BasicBlock *DestBB = HP.first;
1071 const SmallVecInsn &InstructionsToHoist = HP.second;
1072 Instruction *Repl = nullptr;
1073 for (Instruction *I : InstructionsToHoist)
1074 if (I->getParent() == DestBB)
1075 // If there are two instructions in HoistPt to be hoisted in place:
1076 // update Repl to be the first one, such that we can rename the uses
1077 // of the second based on the first.
1078 if (!Repl || firstInBB(I, Repl))
1079 Repl = I;
1080
1081 // Keep track of whether we moved the instruction so we know whether we
1082 // should move the MemoryAccess.
1083 bool MoveAccess = true;
1084 if (Repl) {
1085 // Repl is already in HoistPt: it remains in place.
1086 assert(allOperandsAvailable(Repl, DestBB) &&
1087 "instruction depends on operands that are not available");
1088 MoveAccess = false;
1089 } else {
1090 // When we do not find Repl in HoistPt, select the first in the list
1091 // and move it to HoistPt.
1092 Repl = InstructionsToHoist.front();
1093
1094 // We can move Repl in HoistPt only when all operands are available.
1095 // The order in which hoistings are done may influence the availability
1096 // of operands.
1097 if (!allOperandsAvailable(Repl, DestBB)) {
1098 // When HoistingGeps there is nothing more we can do to make the
1099 // operands available: just continue.
1100 if (HoistingGeps)
1101 continue;
1102
1103 // When not HoistingGeps we need to copy the GEPs.
1104 if (!makeGepOperandsAvailable(Repl, DestBB, InstructionsToHoist))
1105 continue;
1106 }
1107
1108 // Move the instruction at the end of HoistPt.
1109 Instruction *Last = DestBB->getTerminator();
1110 MD->removeInstruction(Repl);
1111 Repl->moveBefore(Last);
1112
1113 DFSNumber[Repl] = DFSNumber[Last]++;
1114 }
1115
1116 // Drop debug location as per debug info update guide.
1117 Repl->dropLocation();
1118 NR += removeAndReplace(InstructionsToHoist, Repl, DestBB, MoveAccess);
1119
1120 if (isa<LoadInst>(Repl))
1121 ++NL;
1122 else if (isa<StoreInst>(Repl))
1123 ++NS;
1124 else if (isa<CallInst>(Repl))
1125 ++NC;
1126 else // Scalar
1127 ++NI;
1128 }
1129
1130 if (MSSA && VerifyMemorySSA)
1131 MSSA->verifyMemorySSA();
1132
1133 NumHoisted += NL + NS + NC + NI;
1134 NumRemoved += NR;
1135 NumLoadsHoisted += NL;
1136 NumStoresHoisted += NS;
1137 NumCallsHoisted += NC;
1138 return {NI, NL + NC + NS};
1139}
1140
1141std::pair<unsigned, unsigned> GVNHoist::hoistExpressions(Function &F) {
1142 InsnInfo II;
1143 LoadInfo LI;
1144 StoreInfo SI;
1145 CallInfo CI;
1146 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) {
1147 int InstructionNb = 0;
1148 for (Instruction &I1 : *BB) {
1149 // If I1 cannot guarantee progress, subsequent instructions
1150 // in BB cannot be hoisted anyways.
1152 HoistBarrier.insert(BB);
1153 break;
1154 }
1155 // Only hoist the first instructions in BB up to MaxDepthInBB. Hoisting
1156 // deeper may increase the register pressure and compilation time.
1157 if (MaxDepthInBB != -1 && InstructionNb++ >= MaxDepthInBB)
1158 break;
1159
1160 // Do not value number terminator instructions.
1161 if (I1.isTerminator())
1162 break;
1163
1164 if (auto *Load = dyn_cast<LoadInst>(&I1))
1165 LI.insert(Load, VN);
1166 else if (auto *Store = dyn_cast<StoreInst>(&I1))
1167 SI.insert(Store, VN);
1168 else if (auto *Call = dyn_cast<CallInst>(&I1)) {
1169 if (auto *Intr = dyn_cast<IntrinsicInst>(Call)) {
1170 if (isa<DbgInfoIntrinsic>(Intr) ||
1171 Intr->getIntrinsicID() == Intrinsic::assume ||
1172 Intr->getIntrinsicID() == Intrinsic::sideeffect)
1173 continue;
1174 }
1175 if (Call->mayHaveSideEffects())
1176 break;
1177
1178 if (Call->isConvergent())
1179 break;
1180
1181 CI.insert(Call, VN);
1182 } else if (HoistingGeps || !isa<GetElementPtrInst>(&I1))
1183 // Do not hoist scalars past calls that may write to memory because
1184 // that could result in spills later. geps are handled separately.
1185 // TODO: We can relax this for targets like AArch64 as they have more
1186 // registers than X86.
1187 II.insert(&I1, VN);
1188 }
1189 }
1190
1192 computeInsertionPoints(II.getVNTable(), HPL, InsKind::Scalar);
1193 computeInsertionPoints(LI.getVNTable(), HPL, InsKind::Load);
1194 computeInsertionPoints(SI.getVNTable(), HPL, InsKind::Store);
1195 computeInsertionPoints(CI.getScalarVNTable(), HPL, InsKind::Scalar);
1196 computeInsertionPoints(CI.getLoadVNTable(), HPL, InsKind::Load);
1197 computeInsertionPoints(CI.getStoreVNTable(), HPL, InsKind::Store);
1198 return hoist(HPL);
1199}
1200
1201} // end namespace llvm
1202
1208 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
1209 GVNHoist G(&DT, &PDT, &AA, &MD, &MSSA);
1210 if (!G.run(F))
1211 return PreservedAnalyses::all();
1212
1216 return PA;
1217}
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
unsigned Intr
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
static cl::opt< int > MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1), cl::desc("Max number of instructions to hoist " "(default unlimited = -1)"))
static cl::opt< int > MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10), cl::desc("Maximum length of dependent chains to hoist " "(default = 10, unlimited = -1)"))
static cl::opt< int > MaxDepthInBB("gvn-hoist-max-depth", cl::Hidden, cl::init(100), cl::desc("Hoist instructions from the beginning of the BB up to the " "maximum specified depth (default = 100, unlimited = -1)"))
static cl::opt< int > MaxNumberOfBBSInPath("gvn-hoist-max-bbs", cl::Hidden, cl::init(4), cl::desc("Max number of basic blocks on the path between " "hoisting locations (default = 4, unlimited = -1)"))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
#define P(N)
static void r2(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E, int I, uint32_t *Buf)
Definition: SHA1.cpp:51
static void r1(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E, int I, uint32_t *Buf)
Definition: SHA1.cpp:45
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
A manager for alias analyses.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:658
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:675
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
void insert(CallInst *Call, GVNPass::ValueTable &VN)
Definition: GVNHoist.cpp:220
const VNtoInsns & getLoadVNTable() const
Definition: GVNHoist.cpp:236
const VNtoInsns & getScalarVNTable() const
Definition: GVNHoist.cpp:235
const VNtoInsns & getStoreVNTable() const
Definition: GVNHoist.cpp:237
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
iterator end()
Definition: DenseMap.h:84
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
bool run(Function &F)
Definition: GVNHoist.cpp:506
GVNHoist(DominatorTree *DT, PostDominatorTree *PDT, AliasAnalysis *AA, MemoryDependenceResults *MD, MemorySSA *MSSA)
Definition: GVNHoist.cpp:245
unsigned int rank(const Value *V) const
Definition: GVNHoist.cpp:544
This class holds the mapping between values and value numbers.
Definition: GVN.h:151
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:608
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
const VNtoInsns & getVNTable() const
Definition: GVNHoist.cpp:172
void insert(Instruction *I, GVNPass::ValueTable &VN)
Definition: GVNHoist.cpp:166
bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void dropLocation()
Drop the instruction's debug location.
Definition: DebugInfo.cpp:984
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:471
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
Definition: Metadata.cpp:1633
void applyMergedLocation(DILocation *LocA, DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:949
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
const VNtoInsns & getVNTable() const
Definition: GVNHoist.cpp:190
void insert(LoadInst *Load, GVNPass::ValueTable &VN)
Definition: GVNHoist.cpp:181
An instruction for reading from memory.
Definition: Instructions.h:176
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:370
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:478
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:928
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:340
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:701
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:759
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1905
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:719
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
Definition: MemorySSA.cpp:2200
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2142
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:739
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:249
Represents read-only accesses to memory.
Definition: MemorySSA.h:309
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
typename SuperClass::iterator iterator
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
void insert(StoreInst *Store, GVNPass::ValueTable &VN)
Definition: GVNHoist.cpp:200
const VNtoInsns & getVNTable() const
Definition: GVNHoist.cpp:209
An instruction for storing to memory.
Definition: Instructions.h:292
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
LLVM Value Representation.
Definition: Value.h:74
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:95
const ParentTy * getParent() const
Definition: ilist_node.h:32
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition: ilist.h:328
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
@ Entry
Definition: COFF.h:844
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void stable_sort(R &&Range)
Definition: STLExtras.h:2037
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
std::pair< BasicBlock *, SmallVecInsn > HoistingPointInfo
Definition: GVNHoist.cpp:118
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
SmallVectorImpl< CHIArg >::iterator CHIIt
Definition: GVNHoist.cpp:150
@ InvalidVN
Definition: GVNHoist.cpp:158
idf_iterator< T > idf_end(const T &G)
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3423
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
idf_iterator< T > idf_begin(const T &G)
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
SmallVector< Instruction *, 4 > SmallVecInsn
Definition: GVNHoist.cpp:113
iterator_range< df_iterator< T > > depth_first(const T &G)
std::pair< unsigned, uintptr_t > VNType
Definition: GVNHoist.cpp:123
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define NC
Definition: regutils.h:42
bool operator!=(const CHIArg &A) const
Definition: GVNHoist.cpp:147
BasicBlock * Dest
Definition: GVNHoist.cpp:141
Instruction * I
Definition: GVNHoist.cpp:144
bool operator==(const CHIArg &A) const
Definition: GVNHoist.cpp:146
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVNHoist.cpp:1203
Utility type to build an inheritance chain that makes it easy to rank overload candidates.
Definition: STLExtras.h:1496