LLVM 20.0.0git
GVN.cpp
Go to the documentation of this file.
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs global value numbering to eliminate fully redundant
10// instructions. It also performs simple dead load elimination.
11//
12// Note that this pass does the value numbering itself; it does not use the
13// ValueNumbering analysis passes.
14//
15//===----------------------------------------------------------------------===//
16
18#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/MapVector.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/CFG.h"
36#include "llvm/Analysis/Loads.h"
46#include "llvm/IR/Attributes.h"
47#include "llvm/IR/BasicBlock.h"
48#include "llvm/IR/Constant.h"
49#include "llvm/IR/Constants.h"
50#include "llvm/IR/DebugLoc.h"
51#include "llvm/IR/Dominators.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Metadata.h"
59#include "llvm/IR/Module.h"
60#include "llvm/IR/PassManager.h"
62#include "llvm/IR/Type.h"
63#include "llvm/IR/Use.h"
64#include "llvm/IR/Value.h"
66#include "llvm/Pass.h"
70#include "llvm/Support/Debug.h"
77#include <algorithm>
78#include <cassert>
79#include <cstdint>
80#include <optional>
81#include <utility>
82
83using namespace llvm;
84using namespace llvm::gvn;
85using namespace llvm::VNCoercion;
86using namespace PatternMatch;
87
88#define DEBUG_TYPE "gvn"
89
90STATISTIC(NumGVNInstr, "Number of instructions deleted");
91STATISTIC(NumGVNLoad, "Number of loads deleted");
92STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
93STATISTIC(NumGVNBlocks, "Number of blocks merged");
94STATISTIC(NumGVNSimpl, "Number of instructions simplified");
95STATISTIC(NumGVNEqProp, "Number of equalities propagated");
96STATISTIC(NumPRELoad, "Number of loads PRE'd");
97STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
98STATISTIC(NumPRELoadMoved2CEPred,
99 "Number of loads moved to predecessor of a critical edge in PRE");
100
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
104STATISTIC(MaxBBSpeculationCutoffReachedTimes,
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
107
108static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
109static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
110static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
111 cl::init(true));
112static cl::opt<bool>
113GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
114 cl::init(false));
115static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
116
118 "gvn-max-num-deps", cl::Hidden, cl::init(100),
119 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
120
121// This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
123 "gvn-max-block-speculations", cl::Hidden, cl::init(600),
124 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
125 "into) when deducing if a value is fully available or not in GVN "
126 "(default = 600)"));
127
129 "gvn-max-num-visited-insts", cl::Hidden, cl::init(100),
130 cl::desc("Max number of visited instructions when trying to find "
131 "dominating value of select dependency (default = 100)"));
132
134 "gvn-max-num-insns", cl::Hidden, cl::init(100),
135 cl::desc("Max number of instructions to scan in each basic block in GVN "
136 "(default = 100)"));
137
140 bool commutative = false;
141 // The type is not necessarily the result type of the expression, it may be
142 // any additional type needed to disambiguate the expression.
143 Type *type = nullptr;
145
147
148 Expression(uint32_t o = ~2U) : opcode(o) {}
149
150 bool operator==(const Expression &other) const {
151 if (opcode != other.opcode)
152 return false;
153 if (opcode == ~0U || opcode == ~1U)
154 return true;
155 if (type != other.type)
156 return false;
157 if (varargs != other.varargs)
158 return false;
159 if ((!attrs.isEmpty() || !other.attrs.isEmpty()) &&
160 !attrs.intersectWith(type->getContext(), other.attrs).has_value())
161 return false;
162 return true;
163 }
164
166 return hash_combine(
167 Value.opcode, Value.type,
168 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
169 }
170};
171
172namespace llvm {
173
174template <> struct DenseMapInfo<GVNPass::Expression> {
175 static inline GVNPass::Expression getEmptyKey() { return ~0U; }
176 static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
177
178 static unsigned getHashValue(const GVNPass::Expression &e) {
179 using llvm::hash_value;
180
181 return static_cast<unsigned>(hash_value(e));
182 }
183
184 static bool isEqual(const GVNPass::Expression &LHS,
185 const GVNPass::Expression &RHS) {
186 return LHS == RHS;
187 }
188};
189
190} // end namespace llvm
191
192/// Represents a particular available value that we know how to materialize.
193/// Materialization of an AvailableValue never fails. An AvailableValue is
194/// implicitly associated with a rematerialization point which is the
195/// location of the instruction from which it was formed.
197 enum class ValType {
198 SimpleVal, // A simple offsetted value that is accessed.
199 LoadVal, // A value produced by a load.
200 MemIntrin, // A memory intrinsic which is loaded from.
201 UndefVal, // A UndefValue representing a value from dead block (which
202 // is not yet physically removed from the CFG).
203 SelectVal, // A pointer select which is loaded from and for which the load
204 // can be replace by a value select.
205 };
206
207 /// Val - The value that is live out of the block.
209 /// Kind of the live-out value.
211
212 /// Offset - The byte offset in Val that is interesting for the load query.
213 unsigned Offset = 0;
214 /// V1, V2 - The dominating non-clobbered values of SelectVal.
215 Value *V1 = nullptr, *V2 = nullptr;
216
217 static AvailableValue get(Value *V, unsigned Offset = 0) {
218 AvailableValue Res;
219 Res.Val = V;
221 Res.Offset = Offset;
222 return Res;
223 }
224
225 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
226 AvailableValue Res;
227 Res.Val = MI;
229 Res.Offset = Offset;
230 return Res;
231 }
232
233 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
234 AvailableValue Res;
235 Res.Val = Load;
237 Res.Offset = Offset;
238 return Res;
239 }
240
242 AvailableValue Res;
243 Res.Val = nullptr;
245 Res.Offset = 0;
246 return Res;
247 }
248
250 AvailableValue Res;
251 Res.Val = Sel;
253 Res.Offset = 0;
254 Res.V1 = V1;
255 Res.V2 = V2;
256 return Res;
257 }
258
259 bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
260 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
261 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
262 bool isUndefValue() const { return Kind == ValType::UndefVal; }
263 bool isSelectValue() const { return Kind == ValType::SelectVal; }
264
266 assert(isSimpleValue() && "Wrong accessor");
267 return Val;
268 }
269
271 assert(isCoercedLoadValue() && "Wrong accessor");
272 return cast<LoadInst>(Val);
273 }
274
276 assert(isMemIntrinValue() && "Wrong accessor");
277 return cast<MemIntrinsic>(Val);
278 }
279
281 assert(isSelectValue() && "Wrong accessor");
282 return cast<SelectInst>(Val);
283 }
284
285 /// Emit code at the specified insertion point to adjust the value defined
286 /// here to the specified type. This handles various coercion cases.
288 GVNPass &gvn) const;
289};
290
291/// Represents an AvailableValue which can be rematerialized at the end of
292/// the associated BasicBlock.
294 /// BB - The basic block in question.
295 BasicBlock *BB = nullptr;
296
297 /// AV - The actual available value
299
302 Res.BB = BB;
303 Res.AV = std::move(AV);
304 return Res;
305 }
306
308 unsigned Offset = 0) {
309 return get(BB, AvailableValue::get(V, Offset));
310 }
311
314 }
315
317 Value *V1, Value *V2) {
318 return get(BB, AvailableValue::getSelect(Sel, V1, V2));
319 }
320
321 /// Emit code at the end of this block to adjust the value defined here to
322 /// the specified type. This handles various coercion cases.
324 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
325 }
326};
327
328//===----------------------------------------------------------------------===//
329// ValueTable Internal Functions
330//===----------------------------------------------------------------------===//
331
332GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
333 Expression e;
334 e.type = I->getType();
335 e.opcode = I->getOpcode();
336 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
337 // gc.relocate is 'special' call: its second and third operands are
338 // not real values, but indices into statepoint's argument list.
339 // Use the refered to values for purposes of identity.
340 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
341 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
342 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
343 } else {
344 for (Use &Op : I->operands())
345 e.varargs.push_back(lookupOrAdd(Op));
346 }
347 if (I->isCommutative()) {
348 // Ensure that commutative instructions that only differ by a permutation
349 // of their operands get the same value number by sorting the operand value
350 // numbers. Since commutative operands are the 1st two operands it is more
351 // efficient to sort by hand rather than using, say, std::sort.
352 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
353 if (e.varargs[0] > e.varargs[1])
354 std::swap(e.varargs[0], e.varargs[1]);
355 e.commutative = true;
356 }
357
358 if (auto *C = dyn_cast<CmpInst>(I)) {
359 // Sort the operand value numbers so x<y and y>x get the same value number.
360 CmpInst::Predicate Predicate = C->getPredicate();
361 if (e.varargs[0] > e.varargs[1]) {
362 std::swap(e.varargs[0], e.varargs[1]);
364 }
365 e.opcode = (C->getOpcode() << 8) | Predicate;
366 e.commutative = true;
367 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
368 e.varargs.append(E->idx_begin(), E->idx_end());
369 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
370 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
371 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
372 } else if (auto *CB = dyn_cast<CallBase>(I)) {
373 e.attrs = CB->getAttributes();
374 }
375
376 return e;
377}
378
379GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
380 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
381 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
382 "Not a comparison!");
385 e.varargs.push_back(lookupOrAdd(LHS));
386 e.varargs.push_back(lookupOrAdd(RHS));
387
388 // Sort the operand value numbers so x<y and y>x get the same value number.
389 if (e.varargs[0] > e.varargs[1]) {
390 std::swap(e.varargs[0], e.varargs[1]);
392 }
393 e.opcode = (Opcode << 8) | Predicate;
394 e.commutative = true;
395 return e;
396}
397
399GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
400 assert(EI && "Not an ExtractValueInst?");
402 e.type = EI->getType();
403 e.opcode = 0;
404
405 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
406 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
407 // EI is an extract from one of our with.overflow intrinsics. Synthesize
408 // a semantically equivalent expression instead of an extract value
409 // expression.
410 e.opcode = WO->getBinaryOp();
411 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
412 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
413 return e;
414 }
415
416 // Not a recognised intrinsic. Fall back to producing an extract value
417 // expression.
418 e.opcode = EI->getOpcode();
419 for (Use &Op : EI->operands())
420 e.varargs.push_back(lookupOrAdd(Op));
421
422 append_range(e.varargs, EI->indices());
423
424 return e;
425}
426
427GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
428 Expression E;
429 Type *PtrTy = GEP->getType()->getScalarType();
430 const DataLayout &DL = GEP->getDataLayout();
431 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
432 SmallMapVector<Value *, APInt, 4> VariableOffsets;
433 APInt ConstantOffset(BitWidth, 0);
434 if (GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
435 // Convert into offset representation, to recognize equivalent address
436 // calculations that use different type encoding.
437 LLVMContext &Context = GEP->getContext();
438 E.opcode = GEP->getOpcode();
439 E.type = nullptr;
440 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
441 for (const auto &Pair : VariableOffsets) {
442 E.varargs.push_back(lookupOrAdd(Pair.first));
443 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
444 }
445 if (!ConstantOffset.isZero())
446 E.varargs.push_back(
447 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
448 } else {
449 // If converting to offset representation fails (for scalable vectors),
450 // fall back to type-based implementation:
451 E.opcode = GEP->getOpcode();
452 E.type = GEP->getSourceElementType();
453 for (Use &Op : GEP->operands())
454 E.varargs.push_back(lookupOrAdd(Op));
455 }
456 return E;
457}
458
459//===----------------------------------------------------------------------===//
460// ValueTable External Functions
461//===----------------------------------------------------------------------===//
462
463GVNPass::ValueTable::ValueTable() = default;
464GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
465GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
466GVNPass::ValueTable::~ValueTable() = default;
469
470/// add - Insert a value into the table with a specified value number.
471void GVNPass::ValueTable::add(Value *V, uint32_t num) {
472 valueNumbering.insert(std::make_pair(V, num));
473 if (PHINode *PN = dyn_cast<PHINode>(V))
474 NumberingPhi[num] = PN;
475}
476
477uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
478 // FIXME: Currently the calls which may access the thread id may
479 // be considered as not accessing the memory. But this is
480 // problematic for coroutines, since coroutines may resume in a
481 // different thread. So we disable the optimization here for the
482 // correctness. However, it may block many other correct
483 // optimizations. Revert this one when we detect the memory
484 // accessing kind more precisely.
485 if (C->getFunction()->isPresplitCoroutine()) {
486 valueNumbering[C] = nextValueNumber;
487 return nextValueNumber++;
488 }
489
490 // Do not combine convergent calls since they implicitly depend on the set of
491 // threads that is currently executing, and they might be in different basic
492 // blocks.
493 if (C->isConvergent()) {
494 valueNumbering[C] = nextValueNumber;
495 return nextValueNumber++;
496 }
497
498 if (AA->doesNotAccessMemory(C)) {
499 Expression exp = createExpr(C);
500 uint32_t e = assignExpNewValueNum(exp).first;
501 valueNumbering[C] = e;
502 return e;
503 }
504
505 if (MD && AA->onlyReadsMemory(C)) {
506 Expression exp = createExpr(C);
507 auto ValNum = assignExpNewValueNum(exp);
508 if (ValNum.second) {
509 valueNumbering[C] = ValNum.first;
510 return ValNum.first;
511 }
512
513 MemDepResult local_dep = MD->getDependency(C);
514
515 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
516 valueNumbering[C] = nextValueNumber;
517 return nextValueNumber++;
518 }
519
520 if (local_dep.isDef()) {
521 // For masked load/store intrinsics, the local_dep may actually be
522 // a normal load or store instruction.
523 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
524
525 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
526 valueNumbering[C] = nextValueNumber;
527 return nextValueNumber++;
528 }
529
530 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
531 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
532 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
533 if (c_vn != cd_vn) {
534 valueNumbering[C] = nextValueNumber;
535 return nextValueNumber++;
536 }
537 }
538
539 uint32_t v = lookupOrAdd(local_cdep);
540 valueNumbering[C] = v;
541 return v;
542 }
543
544 // Non-local case.
547 // FIXME: Move the checking logic to MemDep!
548 CallInst* cdep = nullptr;
549
550 // Check to see if we have a single dominating call instruction that is
551 // identical to C.
552 for (const NonLocalDepEntry &I : deps) {
553 if (I.getResult().isNonLocal())
554 continue;
555
556 // We don't handle non-definitions. If we already have a call, reject
557 // instruction dependencies.
558 if (!I.getResult().isDef() || cdep != nullptr) {
559 cdep = nullptr;
560 break;
561 }
562
563 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I.getResult().getInst());
564 // FIXME: All duplicated with non-local case.
565 if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
566 cdep = NonLocalDepCall;
567 continue;
568 }
569
570 cdep = nullptr;
571 break;
572 }
573
574 if (!cdep) {
575 valueNumbering[C] = nextValueNumber;
576 return nextValueNumber++;
577 }
578
579 if (cdep->arg_size() != C->arg_size()) {
580 valueNumbering[C] = nextValueNumber;
581 return nextValueNumber++;
582 }
583 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
584 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
585 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
586 if (c_vn != cd_vn) {
587 valueNumbering[C] = nextValueNumber;
588 return nextValueNumber++;
589 }
590 }
591
592 uint32_t v = lookupOrAdd(cdep);
593 valueNumbering[C] = v;
594 return v;
595 }
596
597 valueNumbering[C] = nextValueNumber;
598 return nextValueNumber++;
599}
600
601/// Returns true if a value number exists for the specified value.
602bool GVNPass::ValueTable::exists(Value *V) const {
603 return valueNumbering.contains(V);
604}
605
606/// lookup_or_add - Returns the value number for the specified value, assigning
607/// it a new number if it did not have one before.
608uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
609 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
610 if (VI != valueNumbering.end())
611 return VI->second;
612
613 auto *I = dyn_cast<Instruction>(V);
614 if (!I) {
615 valueNumbering[V] = nextValueNumber;
616 return nextValueNumber++;
617 }
618
619 Expression exp;
620 switch (I->getOpcode()) {
621 case Instruction::Call:
622 return lookupOrAddCall(cast<CallInst>(I));
623 case Instruction::FNeg:
624 case Instruction::Add:
625 case Instruction::FAdd:
626 case Instruction::Sub:
627 case Instruction::FSub:
628 case Instruction::Mul:
629 case Instruction::FMul:
630 case Instruction::UDiv:
631 case Instruction::SDiv:
632 case Instruction::FDiv:
633 case Instruction::URem:
634 case Instruction::SRem:
635 case Instruction::FRem:
636 case Instruction::Shl:
637 case Instruction::LShr:
638 case Instruction::AShr:
639 case Instruction::And:
640 case Instruction::Or:
641 case Instruction::Xor:
642 case Instruction::ICmp:
643 case Instruction::FCmp:
644 case Instruction::Trunc:
645 case Instruction::ZExt:
646 case Instruction::SExt:
647 case Instruction::FPToUI:
648 case Instruction::FPToSI:
649 case Instruction::UIToFP:
650 case Instruction::SIToFP:
651 case Instruction::FPTrunc:
652 case Instruction::FPExt:
653 case Instruction::PtrToInt:
654 case Instruction::IntToPtr:
655 case Instruction::AddrSpaceCast:
656 case Instruction::BitCast:
657 case Instruction::Select:
658 case Instruction::Freeze:
659 case Instruction::ExtractElement:
660 case Instruction::InsertElement:
661 case Instruction::ShuffleVector:
662 case Instruction::InsertValue:
663 exp = createExpr(I);
664 break;
665 case Instruction::GetElementPtr:
666 exp = createGEPExpr(cast<GetElementPtrInst>(I));
667 break;
668 case Instruction::ExtractValue:
669 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
670 break;
671 case Instruction::PHI:
672 valueNumbering[V] = nextValueNumber;
673 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
674 return nextValueNumber++;
675 default:
676 valueNumbering[V] = nextValueNumber;
677 return nextValueNumber++;
678 }
679
680 uint32_t e = assignExpNewValueNum(exp).first;
681 valueNumbering[V] = e;
682 return e;
683}
684
685/// Returns the value number of the specified value. Fails if
686/// the value has not yet been numbered.
687uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
688 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
689 if (Verify) {
690 assert(VI != valueNumbering.end() && "Value not numbered?");
691 return VI->second;
692 }
693 return (VI != valueNumbering.end()) ? VI->second : 0;
694}
695
696/// Returns the value number of the given comparison,
697/// assigning it a new number if it did not have one before. Useful when
698/// we deduced the result of a comparison, but don't immediately have an
699/// instruction realizing that comparison to hand.
700uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
701 CmpInst::Predicate Predicate,
702 Value *LHS, Value *RHS) {
703 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
704 return assignExpNewValueNum(exp).first;
705}
706
707/// Remove all entries from the ValueTable.
709 valueNumbering.clear();
710 expressionNumbering.clear();
711 NumberingPhi.clear();
712 PhiTranslateTable.clear();
713 nextValueNumber = 1;
714 Expressions.clear();
715 ExprIdx.clear();
716 nextExprNumber = 0;
717}
718
719/// Remove a value from the value numbering.
721 uint32_t Num = valueNumbering.lookup(V);
722 valueNumbering.erase(V);
723 // If V is PHINode, V <--> value number is an one-to-one mapping.
724 if (isa<PHINode>(V))
725 NumberingPhi.erase(Num);
726}
727
728/// verifyRemoved - Verify that the value is removed from all internal data
729/// structures.
730void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
731 assert(!valueNumbering.contains(V) &&
732 "Inst still occurs in value numbering map!");
733}
734
735//===----------------------------------------------------------------------===//
736// LeaderMap External Functions
737//===----------------------------------------------------------------------===//
738
739/// Push a new Value to the LeaderTable onto the list for its value number.
740void GVNPass::LeaderMap::insert(uint32_t N, Value *V, const BasicBlock *BB) {
741 LeaderListNode &Curr = NumToLeaders[N];
742 if (!Curr.Entry.Val) {
743 Curr.Entry.Val = V;
744 Curr.Entry.BB = BB;
745 return;
746 }
747
748 LeaderListNode *Node = TableAllocator.Allocate<LeaderListNode>();
749 Node->Entry.Val = V;
750 Node->Entry.BB = BB;
751 Node->Next = Curr.Next;
752 Curr.Next = Node;
753}
754
755/// Scan the list of values corresponding to a given
756/// value number, and remove the given instruction if encountered.
757void GVNPass::LeaderMap::erase(uint32_t N, Instruction *I,
758 const BasicBlock *BB) {
759 LeaderListNode *Prev = nullptr;
760 LeaderListNode *Curr = &NumToLeaders[N];
761
762 while (Curr && (Curr->Entry.Val != I || Curr->Entry.BB != BB)) {
763 Prev = Curr;
764 Curr = Curr->Next;
765 }
766
767 if (!Curr)
768 return;
769
770 if (Prev) {
771 Prev->Next = Curr->Next;
772 } else {
773 if (!Curr->Next) {
774 Curr->Entry.Val = nullptr;
775 Curr->Entry.BB = nullptr;
776 } else {
777 LeaderListNode *Next = Curr->Next;
778 Curr->Entry.Val = Next->Entry.Val;
779 Curr->Entry.BB = Next->Entry.BB;
780 Curr->Next = Next->Next;
781 }
782 }
783}
784
785void GVNPass::LeaderMap::verifyRemoved(const Value *V) const {
786 // Walk through the value number scope to make sure the instruction isn't
787 // ferreted away in it.
788 for (const auto &I : NumToLeaders) {
789 (void)I;
790 assert(I.second.Entry.Val != V && "Inst still in value numbering scope!");
791 assert(
792 std::none_of(leader_iterator(&I.second), leader_iterator(nullptr),
793 [=](const LeaderTableEntry &E) { return E.Val == V; }) &&
794 "Inst still in value numbering scope!");
795 }
796}
797
798//===----------------------------------------------------------------------===//
799// GVN Pass
800//===----------------------------------------------------------------------===//
801
803 return Options.AllowPRE.value_or(GVNEnablePRE);
804}
805
807 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
808}
809
811 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
812}
813
815 return Options.AllowLoadPRESplitBackedge.value_or(
817}
818
820 return Options.AllowMemDep.value_or(GVNEnableMemDep);
821}
822
824 // FIXME: The order of evaluation of these 'getResult' calls is very
825 // significant! Re-ordering these variables will cause GVN when run alone to
826 // be less effective! We should fix memdep and basic-aa to not exhibit this
827 // behavior, but until then don't change the order here.
828 auto &AC = AM.getResult<AssumptionAnalysis>(F);
829 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
830 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
831 auto &AA = AM.getResult<AAManager>(F);
832 auto *MemDep =
834 auto &LI = AM.getResult<LoopAnalysis>(F);
835 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
837 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
838 MSSA ? &MSSA->getMSSA() : nullptr);
839 if (!Changed)
840 return PreservedAnalyses::all();
844 if (MSSA)
847 return PA;
848}
849
851 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
852 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
853 OS, MapClassName2PassName);
854
855 OS << '<';
856 if (Options.AllowPRE != std::nullopt)
857 OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
858 if (Options.AllowLoadPRE != std::nullopt)
859 OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
860 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
861 OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
862 << "split-backedge-load-pre;";
863 if (Options.AllowMemDep != std::nullopt)
864 OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
865 OS << '>';
866}
867
868#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
869LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
870 errs() << "{\n";
871 for (auto &I : d) {
872 errs() << I.first << "\n";
873 I.second->dump();
874 }
875 errs() << "}\n";
876}
877#endif
878
879enum class AvailabilityState : char {
880 /// We know the block *is not* fully available. This is a fixpoint.
881 Unavailable = 0,
882 /// We know the block *is* fully available. This is a fixpoint.
883 Available = 1,
884 /// We do not know whether the block is fully available or not,
885 /// but we are currently speculating that it will be.
886 /// If it would have turned out that the block was, in fact, not fully
887 /// available, this would have been cleaned up into an Unavailable.
889};
890
891/// Return true if we can prove that the value
892/// we're analyzing is fully available in the specified block. As we go, keep
893/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
894/// map is actually a tri-state map with the following values:
895/// 0) we know the block *is not* fully available.
896/// 1) we know the block *is* fully available.
897/// 2) we do not know whether the block is fully available or not, but we are
898/// currently speculating that it will be.
900 BasicBlock *BB,
901 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
903 std::optional<BasicBlock *> UnavailableBB;
904
905 // The number of times we didn't find an entry for a block in a map and
906 // optimistically inserted an entry marking block as speculatively available.
907 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
908
909#ifndef NDEBUG
910 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
912#endif
913
914 Worklist.emplace_back(BB);
915 while (!Worklist.empty()) {
916 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
917 // Optimistically assume that the block is Speculatively Available and check
918 // to see if we already know about this block in one lookup.
919 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
920 FullyAvailableBlocks.try_emplace(
921 CurrBB, AvailabilityState::SpeculativelyAvailable);
922 AvailabilityState &State = IV.first->second;
923
924 // Did the entry already exist for this block?
925 if (!IV.second) {
926 if (State == AvailabilityState::Unavailable) {
927 UnavailableBB = CurrBB;
928 break; // Backpropagate unavailability info.
929 }
930
931#ifndef NDEBUG
932 AvailableBBs.emplace_back(CurrBB);
933#endif
934 continue; // Don't recurse further, but continue processing worklist.
935 }
936
937 // No entry found for block.
938 ++NumNewNewSpeculativelyAvailableBBs;
939 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
940
941 // If we have exhausted our budget, mark this block as unavailable.
942 // Also, if this block has no predecessors, the value isn't live-in here.
943 if (OutOfBudget || pred_empty(CurrBB)) {
944 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
945 State = AvailabilityState::Unavailable;
946 UnavailableBB = CurrBB;
947 break; // Backpropagate unavailability info.
948 }
949
950 // Tentatively consider this block as speculatively available.
951#ifndef NDEBUG
952 NewSpeculativelyAvailableBBs.insert(CurrBB);
953#endif
954 // And further recurse into block's predecessors, in depth-first order!
955 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
956 }
957
958#if LLVM_ENABLE_STATS
959 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
960 NumNewNewSpeculativelyAvailableBBs);
961#endif
962
963 // If the block isn't marked as fixpoint yet
964 // (the Unavailable and Available states are fixpoints)
965 auto MarkAsFixpointAndEnqueueSuccessors =
966 [&](BasicBlock *BB, AvailabilityState FixpointState) {
967 auto It = FullyAvailableBlocks.find(BB);
968 if (It == FullyAvailableBlocks.end())
969 return; // Never queried this block, leave as-is.
970 switch (AvailabilityState &State = It->second) {
971 case AvailabilityState::Unavailable:
972 case AvailabilityState::Available:
973 return; // Don't backpropagate further, continue processing worklist.
974 case AvailabilityState::SpeculativelyAvailable: // Fix it!
975 State = FixpointState;
976#ifndef NDEBUG
977 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
978 "Found a speculatively available successor leftover?");
979#endif
980 // Queue successors for further processing.
981 Worklist.append(succ_begin(BB), succ_end(BB));
982 return;
983 }
984 };
985
986 if (UnavailableBB) {
987 // Okay, we have encountered an unavailable block.
988 // Mark speculatively available blocks reachable from UnavailableBB as
989 // unavailable as well. Paths are terminated when they reach blocks not in
990 // FullyAvailableBlocks or they are not marked as speculatively available.
991 Worklist.clear();
992 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
993 while (!Worklist.empty())
994 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
995 AvailabilityState::Unavailable);
996 }
997
998#ifndef NDEBUG
999 Worklist.clear();
1000 for (BasicBlock *AvailableBB : AvailableBBs)
1001 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
1002 while (!Worklist.empty())
1003 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
1004 AvailabilityState::Available);
1005
1006 assert(NewSpeculativelyAvailableBBs.empty() &&
1007 "Must have fixed all the new speculatively available blocks.");
1008#endif
1009
1010 return !UnavailableBB;
1011}
1012
1013/// If the specified OldValue exists in ValuesPerBlock, replace its value with
1014/// NewValue.
1016 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, Value *OldValue,
1017 Value *NewValue) {
1018 for (AvailableValueInBlock &V : ValuesPerBlock) {
1019 if (V.AV.Val == OldValue)
1020 V.AV.Val = NewValue;
1021 if (V.AV.isSelectValue()) {
1022 if (V.AV.V1 == OldValue)
1023 V.AV.V1 = NewValue;
1024 if (V.AV.V2 == OldValue)
1025 V.AV.V2 = NewValue;
1026 }
1027 }
1028}
1029
1030/// Given a set of loads specified by ValuesPerBlock,
1031/// construct SSA form, allowing us to eliminate Load. This returns the value
1032/// that should be used at Load's definition site.
1033static Value *
1036 GVNPass &gvn) {
1037 // Check for the fully redundant, dominating load case. In this case, we can
1038 // just use the dominating value directly.
1039 if (ValuesPerBlock.size() == 1 &&
1040 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
1041 Load->getParent())) {
1042 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1043 "Dead BB dominate this block");
1044 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
1045 }
1046
1047 // Otherwise, we have to construct SSA form.
1049 SSAUpdater SSAUpdate(&NewPHIs);
1050 SSAUpdate.Initialize(Load->getType(), Load->getName());
1051
1052 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
1053 BasicBlock *BB = AV.BB;
1054
1055 if (AV.AV.isUndefValue())
1056 continue;
1057
1058 if (SSAUpdate.HasValueForBlock(BB))
1059 continue;
1060
1061 // If the value is the load that we will be eliminating, and the block it's
1062 // available in is the block that the load is in, then don't add it as
1063 // SSAUpdater will resolve the value to the relevant phi which may let it
1064 // avoid phi construction entirely if there's actually only one value.
1065 if (BB == Load->getParent() &&
1066 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1067 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1068 continue;
1069
1070 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
1071 }
1072
1073 // Perform PHI construction.
1074 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
1075}
1076
1078 Instruction *InsertPt,
1079 GVNPass &gvn) const {
1080 Value *Res;
1081 Type *LoadTy = Load->getType();
1082 const DataLayout &DL = Load->getDataLayout();
1083 if (isSimpleValue()) {
1084 Res = getSimpleValue();
1085 if (Res->getType() != LoadTy) {
1086 Res = getValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
1087
1088 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
1089 << " " << *getSimpleValue() << '\n'
1090 << *Res << '\n'
1091 << "\n\n\n");
1092 }
1093 } else if (isCoercedLoadValue()) {
1094 LoadInst *CoercedLoad = getCoercedLoadValue();
1095 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
1096 Res = CoercedLoad;
1097 combineMetadataForCSE(CoercedLoad, Load, false);
1098 } else {
1099 Res = getValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
1100 // We are adding a new user for this load, for which the original
1101 // metadata may not hold. Additionally, the new load may have a different
1102 // size and type, so their metadata cannot be combined in any
1103 // straightforward way.
1104 // Drop all metadata that is not known to cause immediate UB on violation,
1105 // unless the load has !noundef, in which case all metadata violations
1106 // will be promoted to UB.
1107 // TODO: We can combine noalias/alias.scope metadata here, because it is
1108 // independent of the load type.
1109 if (!CoercedLoad->hasMetadata(LLVMContext::MD_noundef))
1110 CoercedLoad->dropUnknownNonDebugMetadata(
1111 {LLVMContext::MD_dereferenceable,
1112 LLVMContext::MD_dereferenceable_or_null,
1113 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1114 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
1115 << " " << *getCoercedLoadValue() << '\n'
1116 << *Res << '\n'
1117 << "\n\n\n");
1118 }
1119 } else if (isMemIntrinValue()) {
1120 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1121 InsertPt, DL);
1122 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1123 << " " << *getMemIntrinValue() << '\n'
1124 << *Res << '\n'
1125 << "\n\n\n");
1126 } else if (isSelectValue()) {
1127 // Introduce a new value select for a load from an eligible pointer select.
1128 SelectInst *Sel = getSelectValue();
1129 assert(V1 && V2 && "both value operands of the select must be present");
1130 Res =
1131 SelectInst::Create(Sel->getCondition(), V1, V2, "", Sel->getIterator());
1132 // We use the DebugLoc from the original load here, as this instruction
1133 // materializes the value that would previously have been loaded.
1134 cast<SelectInst>(Res)->setDebugLoc(Load->getDebugLoc());
1135 } else {
1136 llvm_unreachable("Should not materialize value from dead block");
1137 }
1138 assert(Res && "failed to materialize?");
1139 return Res;
1140}
1141
1142static bool isLifetimeStart(const Instruction *Inst) {
1143 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1144 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1145 return false;
1146}
1147
1148/// Assuming To can be reached from both From and Between, does Between lie on
1149/// every path from From to To?
1150static bool liesBetween(const Instruction *From, Instruction *Between,
1151 const Instruction *To, DominatorTree *DT) {
1152 if (From->getParent() == Between->getParent())
1153 return DT->dominates(From, Between);
1154 SmallSet<BasicBlock *, 1> Exclusion;
1155 Exclusion.insert(Between->getParent());
1156 return !isPotentiallyReachable(From, To, &Exclusion, DT);
1157}
1158
1159/// Try to locate the three instruction involved in a missed
1160/// load-elimination case that is due to an intervening store.
1162 DominatorTree *DT,
1164 using namespace ore;
1165
1166 Instruction *OtherAccess = nullptr;
1167
1168 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1169 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1170 << setExtraArgs();
1171
1172 for (auto *U : Load->getPointerOperand()->users()) {
1173 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1174 auto *I = cast<Instruction>(U);
1175 if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1176 // Use the most immediately dominating value
1177 if (OtherAccess) {
1178 if (DT->dominates(OtherAccess, I))
1179 OtherAccess = I;
1180 else
1181 assert(U == OtherAccess || DT->dominates(I, OtherAccess));
1182 } else
1183 OtherAccess = I;
1184 }
1185 }
1186 }
1187
1188 if (!OtherAccess) {
1189 // There is no dominating use, check if we can find a closest non-dominating
1190 // use that lies between any other potentially available use and Load.
1191 for (auto *U : Load->getPointerOperand()->users()) {
1192 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1193 auto *I = cast<Instruction>(U);
1194 if (I->getFunction() == Load->getFunction() &&
1195 isPotentiallyReachable(I, Load, nullptr, DT)) {
1196 if (OtherAccess) {
1197 if (liesBetween(OtherAccess, I, Load, DT)) {
1198 OtherAccess = I;
1199 } else if (!liesBetween(I, OtherAccess, Load, DT)) {
1200 // These uses are both partially available at Load were it not for
1201 // the clobber, but neither lies strictly after the other.
1202 OtherAccess = nullptr;
1203 break;
1204 } // else: keep current OtherAccess since it lies between U and Load
1205 } else {
1206 OtherAccess = I;
1207 }
1208 }
1209 }
1210 }
1211 }
1212
1213 if (OtherAccess)
1214 R << " in favor of " << NV("OtherAccess", OtherAccess);
1215
1216 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1217
1218 ORE->emit(R);
1219}
1220
1221// Find non-clobbered value for Loc memory location in extended basic block
1222// (chain of basic blocks with single predecessors) starting From instruction.
1223static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy,
1224 Instruction *From, AAResults *AA) {
1225 uint32_t NumVisitedInsts = 0;
1226 BasicBlock *FromBB = From->getParent();
1227 BatchAAResults BatchAA(*AA);
1228 for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor())
1229 for (auto *Inst = BB == FromBB ? From : BB->getTerminator();
1230 Inst != nullptr; Inst = Inst->getPrevNonDebugInstruction()) {
1231 // Stop the search if limit is reached.
1232 if (++NumVisitedInsts > MaxNumVisitedInsts)
1233 return nullptr;
1234 if (isModSet(BatchAA.getModRefInfo(Inst, Loc)))
1235 return nullptr;
1236 if (auto *LI = dyn_cast<LoadInst>(Inst))
1237 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1238 return LI;
1239 }
1240 return nullptr;
1241}
1242
1243std::optional<AvailableValue>
1244GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1245 Value *Address) {
1246 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1247 assert(DepInfo.isLocal() && "expected a local dependence");
1248
1249 Instruction *DepInst = DepInfo.getInst();
1250
1251 const DataLayout &DL = Load->getDataLayout();
1252 if (DepInfo.isClobber()) {
1253 // If the dependence is to a store that writes to a superset of the bits
1254 // read by the load, we can extract the bits we need for the load from the
1255 // stored value.
1256 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1257 // Can't forward from non-atomic to atomic without violating memory model.
1258 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1259 int Offset =
1260 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1261 if (Offset != -1)
1262 return AvailableValue::get(DepSI->getValueOperand(), Offset);
1263 }
1264 }
1265
1266 // Check to see if we have something like this:
1267 // load i32* P
1268 // load i8* (P+1)
1269 // if we have this, replace the later with an extraction from the former.
1270 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1271 // If this is a clobber and L is the first instruction in its block, then
1272 // we have the first instruction in the entry block.
1273 // Can't forward from non-atomic to atomic without violating memory model.
1274 if (DepLoad != Load && Address &&
1275 Load->isAtomic() <= DepLoad->isAtomic()) {
1276 Type *LoadType = Load->getType();
1277 int Offset = -1;
1278
1279 // If MD reported clobber, check it was nested.
1280 if (DepInfo.isClobber() &&
1281 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1282 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1283 // GVN has no deal with a negative offset.
1284 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1285 ? -1
1286 : *ClobberOff;
1287 }
1288 if (Offset == -1)
1289 Offset =
1290 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1291 if (Offset != -1)
1292 return AvailableValue::getLoad(DepLoad, Offset);
1293 }
1294 }
1295
1296 // If the clobbering value is a memset/memcpy/memmove, see if we can
1297 // forward a value on from it.
1298 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1299 if (Address && !Load->isAtomic()) {
1301 DepMI, DL);
1302 if (Offset != -1)
1303 return AvailableValue::getMI(DepMI, Offset);
1304 }
1305 }
1306
1307 // Nothing known about this clobber, have to be conservative
1308 LLVM_DEBUG(
1309 // fast print dep, using operator<< on instruction is too slow.
1310 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1311 dbgs() << " is clobbered by " << *DepInst << '\n';);
1313 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1314
1315 return std::nullopt;
1316 }
1317 assert(DepInfo.isDef() && "follows from above");
1318
1319 // Loading the alloca -> undef.
1320 // Loading immediately after lifetime begin -> undef.
1321 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst))
1322 return AvailableValue::get(UndefValue::get(Load->getType()));
1323
1324 if (Constant *InitVal =
1325 getInitialValueOfAllocation(DepInst, TLI, Load->getType()))
1326 return AvailableValue::get(InitVal);
1327
1328 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1329 // Reject loads and stores that are to the same address but are of
1330 // different types if we have to. If the stored value is convertable to
1331 // the loaded value, we can reuse it.
1332 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1333 DL))
1334 return std::nullopt;
1335
1336 // Can't forward from non-atomic to atomic without violating memory model.
1337 if (S->isAtomic() < Load->isAtomic())
1338 return std::nullopt;
1339
1340 return AvailableValue::get(S->getValueOperand());
1341 }
1342
1343 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1344 // If the types mismatch and we can't handle it, reject reuse of the load.
1345 // If the stored value is larger or equal to the loaded value, we can reuse
1346 // it.
1347 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1348 return std::nullopt;
1349
1350 // Can't forward from non-atomic to atomic without violating memory model.
1351 if (LD->isAtomic() < Load->isAtomic())
1352 return std::nullopt;
1353
1354 return AvailableValue::getLoad(LD);
1355 }
1356
1357 // Check if load with Addr dependent from select can be converted to select
1358 // between load values. There must be no instructions between the found
1359 // loads and DepInst that may clobber the loads.
1360 if (auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1361 assert(Sel->getType() == Load->getPointerOperandType());
1362 auto Loc = MemoryLocation::get(Load);
1363 Value *V1 =
1364 findDominatingValue(Loc.getWithNewPtr(Sel->getTrueValue()),
1365 Load->getType(), DepInst, getAliasAnalysis());
1366 if (!V1)
1367 return std::nullopt;
1368 Value *V2 =
1369 findDominatingValue(Loc.getWithNewPtr(Sel->getFalseValue()),
1370 Load->getType(), DepInst, getAliasAnalysis());
1371 if (!V2)
1372 return std::nullopt;
1373 return AvailableValue::getSelect(Sel, V1, V2);
1374 }
1375
1376 // Unknown def - must be conservative
1377 LLVM_DEBUG(
1378 // fast print dep, using operator<< on instruction is too slow.
1379 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1380 dbgs() << " has unknown def " << *DepInst << '\n';);
1381 return std::nullopt;
1382}
1383
1384void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1385 AvailValInBlkVect &ValuesPerBlock,
1386 UnavailBlkVect &UnavailableBlocks) {
1387 // Filter out useless results (non-locals, etc). Keep track of the blocks
1388 // where we have a value available in repl, also keep track of whether we see
1389 // dependencies that produce an unknown value for the load (such as a call
1390 // that could potentially clobber the load).
1391 for (const auto &Dep : Deps) {
1392 BasicBlock *DepBB = Dep.getBB();
1393 MemDepResult DepInfo = Dep.getResult();
1394
1395 if (DeadBlocks.count(DepBB)) {
1396 // Dead dependent mem-op disguise as a load evaluating the same value
1397 // as the load in question.
1398 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1399 continue;
1400 }
1401
1402 if (!DepInfo.isLocal()) {
1403 UnavailableBlocks.push_back(DepBB);
1404 continue;
1405 }
1406
1407 // The address being loaded in this non-local block may not be the same as
1408 // the pointer operand of the load if PHI translation occurs. Make sure
1409 // to consider the right address.
1410 if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1411 // subtlety: because we know this was a non-local dependency, we know
1412 // it's safe to materialize anywhere between the instruction within
1413 // DepInfo and the end of it's block.
1414 ValuesPerBlock.push_back(
1415 AvailableValueInBlock::get(DepBB, std::move(*AV)));
1416 } else {
1417 UnavailableBlocks.push_back(DepBB);
1418 }
1419 }
1420
1421 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1422 "post condition violation");
1423}
1424
1425/// Given the following code, v1 is partially available on some edges, but not
1426/// available on the edge from PredBB. This function tries to find if there is
1427/// another identical load in the other successor of PredBB.
1428///
1429/// v0 = load %addr
1430/// br %LoadBB
1431///
1432/// LoadBB:
1433/// v1 = load %addr
1434/// ...
1435///
1436/// PredBB:
1437/// ...
1438/// br %cond, label %LoadBB, label %SuccBB
1439///
1440/// SuccBB:
1441/// v2 = load %addr
1442/// ...
1443///
1444LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1445 LoadInst *Load) {
1446 // For simplicity we handle a Pred has 2 successors only.
1447 auto *Term = Pred->getTerminator();
1448 if (Term->getNumSuccessors() != 2 || Term->isSpecialTerminator())
1449 return nullptr;
1450 auto *SuccBB = Term->getSuccessor(0);
1451 if (SuccBB == LoadBB)
1452 SuccBB = Term->getSuccessor(1);
1453 if (!SuccBB->getSinglePredecessor())
1454 return nullptr;
1455
1456 unsigned int NumInsts = MaxNumInsnsPerBlock;
1457 for (Instruction &Inst : *SuccBB) {
1458 if (Inst.isDebugOrPseudoInst())
1459 continue;
1460 if (--NumInsts == 0)
1461 return nullptr;
1462
1463 if (!Inst.isIdenticalTo(Load))
1464 continue;
1465
1466 MemDepResult Dep = MD->getDependency(&Inst);
1467 // If an identical load doesn't depends on any local instructions, it can
1468 // be safely moved to PredBB.
1469 // Also check for the implicit control flow instructions. See the comments
1470 // in PerformLoadPRE for details.
1471 if (Dep.isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1472 return cast<LoadInst>(&Inst);
1473
1474 // Otherwise there is something in the same BB clobbers the memory, we can't
1475 // move this and later load to PredBB.
1476 return nullptr;
1477 }
1478
1479 return nullptr;
1480}
1481
1482void GVNPass::eliminatePartiallyRedundantLoad(
1483 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1484 MapVector<BasicBlock *, Value *> &AvailableLoads,
1485 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1486 for (const auto &AvailableLoad : AvailableLoads) {
1487 BasicBlock *UnavailableBlock = AvailableLoad.first;
1488 Value *LoadPtr = AvailableLoad.second;
1489
1490 auto *NewLoad = new LoadInst(
1491 Load->getType(), LoadPtr, Load->getName() + ".pre", Load->isVolatile(),
1492 Load->getAlign(), Load->getOrdering(), Load->getSyncScopeID(),
1493 UnavailableBlock->getTerminator()->getIterator());
1494 NewLoad->setDebugLoc(Load->getDebugLoc());
1495 if (MSSAU) {
1496 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1497 NewLoad, nullptr, NewLoad->getParent(), MemorySSA::BeforeTerminator);
1498 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1499 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1500 else
1501 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1502 }
1503
1504 // Transfer the old load's AA tags to the new load.
1505 AAMDNodes Tags = Load->getAAMetadata();
1506 if (Tags)
1507 NewLoad->setAAMetadata(Tags);
1508
1509 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1510 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1511 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1512 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1513 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1514 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1515 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1516 if (LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1517 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1518
1519 // We do not propagate the old load's debug location, because the new
1520 // load now lives in a different BB, and we want to avoid a jumpy line
1521 // table.
1522 // FIXME: How do we retain source locations without causing poor debugging
1523 // behavior?
1524
1525 // Add the newly created load.
1526 ValuesPerBlock.push_back(
1527 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1528 MD->invalidateCachedPointerInfo(LoadPtr);
1529 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1530
1531 // For PredBB in CriticalEdgePredAndLoad we need to replace the uses of old
1532 // load instruction with the new created load instruction.
1533 if (CriticalEdgePredAndLoad) {
1534 auto I = CriticalEdgePredAndLoad->find(UnavailableBlock);
1535 if (I != CriticalEdgePredAndLoad->end()) {
1536 ++NumPRELoadMoved2CEPred;
1537 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1538 LoadInst *OldLoad = I->second;
1539 combineMetadataForCSE(NewLoad, OldLoad, false);
1540 OldLoad->replaceAllUsesWith(NewLoad);
1541 replaceValuesPerBlockEntry(ValuesPerBlock, OldLoad, NewLoad);
1542 if (uint32_t ValNo = VN.lookup(OldLoad, false))
1543 LeaderTable.erase(ValNo, OldLoad, OldLoad->getParent());
1544 VN.erase(OldLoad);
1545 removeInstruction(OldLoad);
1546 }
1547 }
1548 }
1549
1550 // Perform PHI construction.
1551 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1552 // ConstructSSAForLoadSet is responsible for combining metadata.
1553 ICF->removeUsersOf(Load);
1554 Load->replaceAllUsesWith(V);
1555 if (isa<PHINode>(V))
1556 V->takeName(Load);
1557 if (Instruction *I = dyn_cast<Instruction>(V))
1558 I->setDebugLoc(Load->getDebugLoc());
1559 if (V->getType()->isPtrOrPtrVectorTy())
1562 ORE->emit([&]() {
1563 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1564 << "load eliminated by PRE";
1565 });
1566}
1567
1568bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1569 UnavailBlkVect &UnavailableBlocks) {
1570 // Okay, we have *some* definitions of the value. This means that the value
1571 // is available in some of our (transitive) predecessors. Lets think about
1572 // doing PRE of this load. This will involve inserting a new load into the
1573 // predecessor when it's not available. We could do this in general, but
1574 // prefer to not increase code size. As such, we only do this when we know
1575 // that we only have to insert *one* load (which means we're basically moving
1576 // the load, not inserting a new one).
1577
1578 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1579 UnavailableBlocks.end());
1580
1581 // Let's find the first basic block with more than one predecessor. Walk
1582 // backwards through predecessors if needed.
1583 BasicBlock *LoadBB = Load->getParent();
1584 BasicBlock *TmpBB = LoadBB;
1585
1586 // Check that there is no implicit control flow instructions above our load in
1587 // its block. If there is an instruction that doesn't always pass the
1588 // execution to the following instruction, then moving through it may become
1589 // invalid. For example:
1590 //
1591 // int arr[LEN];
1592 // int index = ???;
1593 // ...
1594 // guard(0 <= index && index < LEN);
1595 // use(arr[index]);
1596 //
1597 // It is illegal to move the array access to any point above the guard,
1598 // because if the index is out of bounds we should deoptimize rather than
1599 // access the array.
1600 // Check that there is no guard in this block above our instruction.
1601 bool MustEnsureSafetyOfSpeculativeExecution =
1603
1604 while (TmpBB->getSinglePredecessor()) {
1605 TmpBB = TmpBB->getSinglePredecessor();
1606 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1607 return false;
1608 if (Blockers.count(TmpBB))
1609 return false;
1610
1611 // If any of these blocks has more than one successor (i.e. if the edge we
1612 // just traversed was critical), then there are other paths through this
1613 // block along which the load may not be anticipated. Hoisting the load
1614 // above this block would be adding the load to execution paths along
1615 // which it was not previously executed.
1616 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1617 return false;
1618
1619 // Check that there is no implicit control flow in a block above.
1620 MustEnsureSafetyOfSpeculativeExecution =
1621 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1622 }
1623
1624 assert(TmpBB);
1625 LoadBB = TmpBB;
1626
1627 // Check to see how many predecessors have the loaded value fully
1628 // available.
1630 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1631 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1632 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1633 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1634 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1635
1636 // The edge from Pred to LoadBB is a critical edge will be splitted.
1637 SmallVector<BasicBlock *, 4> CriticalEdgePredSplit;
1638 // The edge from Pred to LoadBB is a critical edge, another successor of Pred
1639 // contains a load can be moved to Pred. This data structure maps the Pred to
1640 // the movable load.
1641 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1642 for (BasicBlock *Pred : predecessors(LoadBB)) {
1643 // If any predecessor block is an EH pad that does not allow non-PHI
1644 // instructions before the terminator, we can't PRE the load.
1645 if (Pred->getTerminator()->isEHPad()) {
1646 LLVM_DEBUG(
1647 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1648 << Pred->getName() << "': " << *Load << '\n');
1649 return false;
1650 }
1651
1652 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1653 continue;
1654 }
1655
1656 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1657 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1658 LLVM_DEBUG(
1659 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1660 << Pred->getName() << "': " << *Load << '\n');
1661 return false;
1662 }
1663
1664 if (LoadBB->isEHPad()) {
1665 LLVM_DEBUG(
1666 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1667 << Pred->getName() << "': " << *Load << '\n');
1668 return false;
1669 }
1670
1671 // Do not split backedge as it will break the canonical loop form.
1673 if (DT->dominates(LoadBB, Pred)) {
1674 LLVM_DEBUG(
1675 dbgs()
1676 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1677 << Pred->getName() << "': " << *Load << '\n');
1678 return false;
1679 }
1680
1681 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1682 CriticalEdgePredAndLoad[Pred] = LI;
1683 else
1684 CriticalEdgePredSplit.push_back(Pred);
1685 } else {
1686 // Only add the predecessors that will not be split for now.
1687 PredLoads[Pred] = nullptr;
1688 }
1689 }
1690
1691 // Decide whether PRE is profitable for this load.
1692 unsigned NumInsertPreds = PredLoads.size() + CriticalEdgePredSplit.size();
1693 unsigned NumUnavailablePreds = NumInsertPreds +
1694 CriticalEdgePredAndLoad.size();
1695 assert(NumUnavailablePreds != 0 &&
1696 "Fully available value should already be eliminated!");
1697 (void)NumUnavailablePreds;
1698
1699 // If we need to insert new load in multiple predecessors, reject it.
1700 // FIXME: If we could restructure the CFG, we could make a common pred with
1701 // all the preds that don't have an available Load and insert a new load into
1702 // that one block.
1703 if (NumInsertPreds > 1)
1704 return false;
1705
1706 // Now we know where we will insert load. We must ensure that it is safe
1707 // to speculatively execute the load at that points.
1708 if (MustEnsureSafetyOfSpeculativeExecution) {
1709 if (CriticalEdgePredSplit.size())
1710 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), AC, DT))
1711 return false;
1712 for (auto &PL : PredLoads)
1713 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), AC,
1714 DT))
1715 return false;
1716 for (auto &CEP : CriticalEdgePredAndLoad)
1717 if (!isSafeToSpeculativelyExecute(Load, CEP.first->getTerminator(), AC,
1718 DT))
1719 return false;
1720 }
1721
1722 // Split critical edges, and update the unavailable predecessors accordingly.
1723 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1724 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1725 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1726 PredLoads[NewPred] = nullptr;
1727 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1728 << LoadBB->getName() << '\n');
1729 }
1730
1731 for (auto &CEP : CriticalEdgePredAndLoad)
1732 PredLoads[CEP.first] = nullptr;
1733
1734 // Check if the load can safely be moved to all the unavailable predecessors.
1735 bool CanDoPRE = true;
1736 const DataLayout &DL = Load->getDataLayout();
1738 for (auto &PredLoad : PredLoads) {
1739 BasicBlock *UnavailablePred = PredLoad.first;
1740
1741 // Do PHI translation to get its value in the predecessor if necessary. The
1742 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1743 // We do the translation for each edge we skipped by going from Load's block
1744 // to LoadBB, otherwise we might miss pieces needing translation.
1745
1746 // If all preds have a single successor, then we know it is safe to insert
1747 // the load on the pred (?!?), so we can insert code to materialize the
1748 // pointer if it is not available.
1749 Value *LoadPtr = Load->getPointerOperand();
1750 BasicBlock *Cur = Load->getParent();
1751 while (Cur != LoadBB) {
1752 PHITransAddr Address(LoadPtr, DL, AC);
1753 LoadPtr = Address.translateWithInsertion(Cur, Cur->getSinglePredecessor(),
1754 *DT, NewInsts);
1755 if (!LoadPtr) {
1756 CanDoPRE = false;
1757 break;
1758 }
1759 Cur = Cur->getSinglePredecessor();
1760 }
1761
1762 if (LoadPtr) {
1763 PHITransAddr Address(LoadPtr, DL, AC);
1764 LoadPtr = Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1765 NewInsts);
1766 }
1767 // If we couldn't find or insert a computation of this phi translated value,
1768 // we fail PRE.
1769 if (!LoadPtr) {
1770 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1771 << *Load->getPointerOperand() << "\n");
1772 CanDoPRE = false;
1773 break;
1774 }
1775
1776 PredLoad.second = LoadPtr;
1777 }
1778
1779 if (!CanDoPRE) {
1780 while (!NewInsts.empty()) {
1781 // Erase instructions generated by the failed PHI translation before
1782 // trying to number them. PHI translation might insert instructions
1783 // in basic blocks other than the current one, and we delete them
1784 // directly, as markInstructionForDeletion only allows removing from the
1785 // current basic block.
1786 NewInsts.pop_back_val()->eraseFromParent();
1787 }
1788 // HINT: Don't revert the edge-splitting as following transformation may
1789 // also need to split these critical edges.
1790 return !CriticalEdgePredSplit.empty();
1791 }
1792
1793 // Okay, we can eliminate this load by inserting a reload in the predecessor
1794 // and using PHI construction to get the value in the other predecessors, do
1795 // it.
1796 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1797 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1798 << " INSTS: " << *NewInsts.back()
1799 << '\n');
1800
1801 // Assign value numbers to the new instructions.
1802 for (Instruction *I : NewInsts) {
1803 // Instructions that have been inserted in predecessor(s) to materialize
1804 // the load address do not retain their original debug locations. Doing
1805 // so could lead to confusing (but correct) source attributions.
1806 I->updateLocationAfterHoist();
1807
1808 // FIXME: We really _ought_ to insert these value numbers into their
1809 // parent's availability map. However, in doing so, we risk getting into
1810 // ordering issues. If a block hasn't been processed yet, we would be
1811 // marking a value as AVAIL-IN, which isn't what we intend.
1812 VN.lookupOrAdd(I);
1813 }
1814
1815 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1816 &CriticalEdgePredAndLoad);
1817 ++NumPRELoad;
1818 return true;
1819}
1820
1821bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1822 AvailValInBlkVect &ValuesPerBlock,
1823 UnavailBlkVect &UnavailableBlocks) {
1824 const Loop *L = LI->getLoopFor(Load->getParent());
1825 // TODO: Generalize to other loop blocks that dominate the latch.
1826 if (!L || L->getHeader() != Load->getParent())
1827 return false;
1828
1829 BasicBlock *Preheader = L->getLoopPreheader();
1830 BasicBlock *Latch = L->getLoopLatch();
1831 if (!Preheader || !Latch)
1832 return false;
1833
1834 Value *LoadPtr = Load->getPointerOperand();
1835 // Must be available in preheader.
1836 if (!L->isLoopInvariant(LoadPtr))
1837 return false;
1838
1839 // We plan to hoist the load to preheader without introducing a new fault.
1840 // In order to do it, we need to prove that we cannot side-exit the loop
1841 // once loop header is first entered before execution of the load.
1842 if (ICF->isDominatedByICFIFromSameBlock(Load))
1843 return false;
1844
1845 BasicBlock *LoopBlock = nullptr;
1846 for (auto *Blocker : UnavailableBlocks) {
1847 // Blockers from outside the loop are handled in preheader.
1848 if (!L->contains(Blocker))
1849 continue;
1850
1851 // Only allow one loop block. Loop header is not less frequently executed
1852 // than each loop block, and likely it is much more frequently executed. But
1853 // in case of multiple loop blocks, we need extra information (such as block
1854 // frequency info) to understand whether it is profitable to PRE into
1855 // multiple loop blocks.
1856 if (LoopBlock)
1857 return false;
1858
1859 // Do not sink into inner loops. This may be non-profitable.
1860 if (L != LI->getLoopFor(Blocker))
1861 return false;
1862
1863 // Blocks that dominate the latch execute on every single iteration, maybe
1864 // except the last one. So PREing into these blocks doesn't make much sense
1865 // in most cases. But the blocks that do not necessarily execute on each
1866 // iteration are sometimes much colder than the header, and this is when
1867 // PRE is potentially profitable.
1868 if (DT->dominates(Blocker, Latch))
1869 return false;
1870
1871 // Make sure that the terminator itself doesn't clobber.
1872 if (Blocker->getTerminator()->mayWriteToMemory())
1873 return false;
1874
1875 LoopBlock = Blocker;
1876 }
1877
1878 if (!LoopBlock)
1879 return false;
1880
1881 // Make sure the memory at this pointer cannot be freed, therefore we can
1882 // safely reload from it after clobber.
1883 if (LoadPtr->canBeFreed())
1884 return false;
1885
1886 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1887 MapVector<BasicBlock *, Value *> AvailableLoads;
1888 AvailableLoads[LoopBlock] = LoadPtr;
1889 AvailableLoads[Preheader] = LoadPtr;
1890
1891 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1892 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1893 /*CriticalEdgePredAndLoad*/ nullptr);
1894 ++NumPRELoopLoad;
1895 return true;
1896}
1897
1900 using namespace ore;
1901
1902 ORE->emit([&]() {
1903 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1904 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1905 << setExtraArgs() << " in favor of "
1906 << NV("InfavorOfValue", AvailableValue);
1907 });
1908}
1909
1910/// Attempt to eliminate a load whose dependencies are
1911/// non-local by performing PHI construction.
1912bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1913 // non-local speculations are not allowed under asan.
1914 if (Load->getParent()->getParent()->hasFnAttribute(
1915 Attribute::SanitizeAddress) ||
1916 Load->getParent()->getParent()->hasFnAttribute(
1917 Attribute::SanitizeHWAddress))
1918 return false;
1919
1920 // Step 1: Find the non-local dependencies of the load.
1921 LoadDepVect Deps;
1922 MD->getNonLocalPointerDependency(Load, Deps);
1923
1924 // If we had to process more than one hundred blocks to find the
1925 // dependencies, this load isn't worth worrying about. Optimizing
1926 // it will be too expensive.
1927 unsigned NumDeps = Deps.size();
1928 if (NumDeps > MaxNumDeps)
1929 return false;
1930
1931 // If we had a phi translation failure, we'll have a single entry which is a
1932 // clobber in the current block. Reject this early.
1933 if (NumDeps == 1 &&
1934 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1935 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1936 dbgs() << " has unknown dependencies\n";);
1937 return false;
1938 }
1939
1940 bool Changed = false;
1941 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1942 if (GetElementPtrInst *GEP =
1943 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1944 for (Use &U : GEP->indices())
1945 if (Instruction *I = dyn_cast<Instruction>(U.get()))
1946 Changed |= performScalarPRE(I);
1947 }
1948
1949 // Step 2: Analyze the availability of the load
1950 AvailValInBlkVect ValuesPerBlock;
1951 UnavailBlkVect UnavailableBlocks;
1952 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1953
1954 // If we have no predecessors that produce a known value for this load, exit
1955 // early.
1956 if (ValuesPerBlock.empty())
1957 return Changed;
1958
1959 // Step 3: Eliminate fully redundancy.
1960 //
1961 // If all of the instructions we depend on produce a known value for this
1962 // load, then it is fully redundant and we can use PHI insertion to compute
1963 // its value. Insert PHIs and remove the fully redundant value now.
1964 if (UnavailableBlocks.empty()) {
1965 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1966
1967 // Perform PHI construction.
1968 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1969 // ConstructSSAForLoadSet is responsible for combining metadata.
1970 ICF->removeUsersOf(Load);
1971 Load->replaceAllUsesWith(V);
1972
1973 if (isa<PHINode>(V))
1974 V->takeName(Load);
1975 if (Instruction *I = dyn_cast<Instruction>(V))
1976 // If instruction I has debug info, then we should not update it.
1977 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1978 // to propagate Load's DebugLoc because Load may not post-dominate I.
1979 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1980 I->setDebugLoc(Load->getDebugLoc());
1981 if (V->getType()->isPtrOrPtrVectorTy())
1984 ++NumGVNLoad;
1985 reportLoadElim(Load, V, ORE);
1986 return true;
1987 }
1988
1989 // Step 4: Eliminate partial redundancy.
1990 if (!isPREEnabled() || !isLoadPREEnabled())
1991 return Changed;
1992 if (!isLoadInLoopPREEnabled() && LI->getLoopFor(Load->getParent()))
1993 return Changed;
1994
1995 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1996 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1997 return true;
1998
1999 return Changed;
2000}
2001
2002static bool hasUsersIn(Value *V, BasicBlock *BB) {
2003 return llvm::any_of(V->users(), [BB](User *U) {
2004 auto *I = dyn_cast<Instruction>(U);
2005 return I && I->getParent() == BB;
2006 });
2007}
2008
2009bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2010 Value *V = IntrinsicI->getArgOperand(0);
2011
2012 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
2013 if (Cond->isZero()) {
2014 Type *Int8Ty = Type::getInt8Ty(V->getContext());
2015 Type *PtrTy = PointerType::get(V->getContext(), 0);
2016 // Insert a new store to null instruction before the load to indicate that
2017 // this code is not reachable. FIXME: We could insert unreachable
2018 // instruction directly because we can modify the CFG.
2019 auto *NewS =
2021 IntrinsicI->getIterator());
2022 if (MSSAU) {
2023 const MemoryUseOrDef *FirstNonDom = nullptr;
2024 const auto *AL =
2025 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
2026
2027 // If there are accesses in the current basic block, find the first one
2028 // that does not come before NewS. The new memory access is inserted
2029 // after the found access or before the terminator if no such access is
2030 // found.
2031 if (AL) {
2032 for (const auto &Acc : *AL) {
2033 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2034 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2035 FirstNonDom = Current;
2036 break;
2037 }
2038 }
2039 }
2040
2041 auto *NewDef =
2042 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2043 NewS, nullptr,
2044 const_cast<MemoryUseOrDef *>(FirstNonDom))
2045 : MSSAU->createMemoryAccessInBB(
2046 NewS, nullptr,
2047 NewS->getParent(), MemorySSA::BeforeTerminator);
2048
2049 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
2050 }
2051 }
2052 if (isAssumeWithEmptyBundle(*IntrinsicI)) {
2053 markInstructionForDeletion(IntrinsicI);
2054 return true;
2055 }
2056 return false;
2057 }
2058
2059 if (isa<Constant>(V)) {
2060 // If it's not false, and constant, it must evaluate to true. This means our
2061 // assume is assume(true), and thus, pointless, and we don't want to do
2062 // anything more here.
2063 return false;
2064 }
2065
2066 Constant *True = ConstantInt::getTrue(V->getContext());
2067 bool Changed = false;
2068
2069 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
2070 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
2071
2072 // This property is only true in dominated successors, propagateEquality
2073 // will check dominance for us.
2074 Changed |= propagateEquality(V, True, Edge, false);
2075 }
2076
2077 // We can replace assume value with true, which covers cases like this:
2078 // call void @llvm.assume(i1 %cmp)
2079 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
2080 ReplaceOperandsWithMap[V] = True;
2081
2082 // Similarly, after assume(!NotV) we know that NotV == false.
2083 Value *NotV;
2084 if (match(V, m_Not(m_Value(NotV))))
2085 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
2086
2087 // If we find an equality fact, canonicalize all dominated uses in this block
2088 // to one of the two values. We heuristically choice the "oldest" of the
2089 // two where age is determined by value number. (Note that propagateEquality
2090 // above handles the cross block case.)
2091 //
2092 // Key case to cover are:
2093 // 1)
2094 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
2095 // call void @llvm.assume(i1 %cmp)
2096 // ret float %0 ; will change it to ret float 3.000000e+00
2097 // 2)
2098 // %load = load float, float* %addr
2099 // %cmp = fcmp oeq float %load, %0
2100 // call void @llvm.assume(i1 %cmp)
2101 // ret float %load ; will change it to ret float %0
2102 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
2103 if (CmpI->isEquivalence()) {
2104 Value *CmpLHS = CmpI->getOperand(0);
2105 Value *CmpRHS = CmpI->getOperand(1);
2106 // Heuristically pick the better replacement -- the choice of heuristic
2107 // isn't terribly important here, but the fact we canonicalize on some
2108 // replacement is for exposing other simplifications.
2109 // TODO: pull this out as a helper function and reuse w/existing
2110 // (slightly different) logic.
2111 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2112 std::swap(CmpLHS, CmpRHS);
2113 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2114 std::swap(CmpLHS, CmpRHS);
2115 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2116 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2117 // Move the 'oldest' value to the right-hand side, using the value
2118 // number as a proxy for age.
2119 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
2120 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
2121 if (LVN < RVN)
2122 std::swap(CmpLHS, CmpRHS);
2123 }
2124
2125 // Handle degenerate case where we either haven't pruned a dead path or a
2126 // removed a trivial assume yet.
2127 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2128 return Changed;
2129
2130 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
2131 << *CmpLHS << " with "
2132 << *CmpRHS << " in block "
2133 << IntrinsicI->getParent()->getName() << "\n");
2134
2135
2136 // Setup the replacement map - this handles uses within the same block
2137 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
2138 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2139
2140 // NOTE: The non-block local cases are handled by the call to
2141 // propagateEquality above; this block is just about handling the block
2142 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
2143 // isn't duplicated for the block local case, can we share it somehow?
2144 }
2145 }
2146 return Changed;
2147}
2148
2151 I->replaceAllUsesWith(Repl);
2152}
2153
2154/// Attempt to eliminate a load, first by eliminating it
2155/// locally, and then attempting non-local elimination if that fails.
2156bool GVNPass::processLoad(LoadInst *L) {
2157 if (!MD)
2158 return false;
2159
2160 // This code hasn't been audited for ordered or volatile memory access
2161 if (!L->isUnordered())
2162 return false;
2163
2164 if (L->use_empty()) {
2166 return true;
2167 }
2168
2169 // ... to a pointer that has been loaded from before...
2170 MemDepResult Dep = MD->getDependency(L);
2171
2172 // If it is defined in another block, try harder.
2173 if (Dep.isNonLocal())
2174 return processNonLocalLoad(L);
2175
2176 // Only handle the local case below
2177 if (!Dep.isLocal()) {
2178 // This might be a NonFuncLocal or an Unknown
2179 LLVM_DEBUG(
2180 // fast print dep, using operator<< on instruction is too slow.
2181 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2182 dbgs() << " has unknown dependence\n";);
2183 return false;
2184 }
2185
2186 auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2187 if (!AV)
2188 return false;
2189
2190 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
2191
2192 // MaterializeAdjustedValue is responsible for combining metadata.
2193 ICF->removeUsersOf(L);
2194 L->replaceAllUsesWith(AvailableValue);
2196 if (MSSAU)
2197 MSSAU->removeMemoryAccess(L);
2198 ++NumGVNLoad;
2200 // Tell MDA to reexamine the reused pointer since we might have more
2201 // information after forwarding it.
2202 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2204 return true;
2205}
2206
2207/// Return a pair the first field showing the value number of \p Exp and the
2208/// second field showing whether it is a value number newly created.
2209std::pair<uint32_t, bool>
2210GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2211 uint32_t &e = expressionNumbering[Exp];
2212 bool CreateNewValNum = !e;
2213 if (CreateNewValNum) {
2214 Expressions.push_back(Exp);
2215 if (ExprIdx.size() < nextValueNumber + 1)
2216 ExprIdx.resize(nextValueNumber * 2);
2217 e = nextValueNumber;
2218 ExprIdx[nextValueNumber++] = nextExprNumber++;
2219 }
2220 return {e, CreateNewValNum};
2221}
2222
2223/// Return whether all the values related with the same \p num are
2224/// defined in \p BB.
2225bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2226 GVNPass &Gvn) {
2227 return all_of(
2228 Gvn.LeaderTable.getLeaders(Num),
2229 [=](const LeaderMap::LeaderTableEntry &L) { return L.BB == BB; });
2230}
2231
2232/// Wrap phiTranslateImpl to provide caching functionality.
2233uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2234 const BasicBlock *PhiBlock,
2235 uint32_t Num, GVNPass &Gvn) {
2236 auto FindRes = PhiTranslateTable.find({Num, Pred});
2237 if (FindRes != PhiTranslateTable.end())
2238 return FindRes->second;
2239 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2240 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2241 return NewNum;
2242}
2243
2244// Return true if the value number \p Num and NewNum have equal value.
2245// Return false if the result is unknown.
2246bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2247 const BasicBlock *Pred,
2248 const BasicBlock *PhiBlock,
2249 GVNPass &Gvn) {
2250 CallInst *Call = nullptr;
2251 auto Leaders = Gvn.LeaderTable.getLeaders(Num);
2252 for (const auto &Entry : Leaders) {
2253 Call = dyn_cast<CallInst>(Entry.Val);
2254 if (Call && Call->getParent() == PhiBlock)
2255 break;
2256 }
2257
2258 if (AA->doesNotAccessMemory(Call))
2259 return true;
2260
2261 if (!MD || !AA->onlyReadsMemory(Call))
2262 return false;
2263
2264 MemDepResult local_dep = MD->getDependency(Call);
2265 if (!local_dep.isNonLocal())
2266 return false;
2267
2269 MD->getNonLocalCallDependency(Call);
2270
2271 // Check to see if the Call has no function local clobber.
2272 for (const NonLocalDepEntry &D : deps) {
2273 if (D.getResult().isNonFuncLocal())
2274 return true;
2275 }
2276 return false;
2277}
2278
2279/// Translate value number \p Num using phis, so that it has the values of
2280/// the phis in BB.
2281uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2282 const BasicBlock *PhiBlock,
2283 uint32_t Num, GVNPass &Gvn) {
2284 if (PHINode *PN = NumberingPhi[Num]) {
2285 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2286 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2287 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2288 return TransVal;
2289 }
2290 return Num;
2291 }
2292
2293 // If there is any value related with Num is defined in a BB other than
2294 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2295 // a backedge. We can do an early exit in that case to save compile time.
2296 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2297 return Num;
2298
2299 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2300 return Num;
2301 Expression Exp = Expressions[ExprIdx[Num]];
2302
2303 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2304 // For InsertValue and ExtractValue, some varargs are index numbers
2305 // instead of value numbers. Those index numbers should not be
2306 // translated.
2307 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2308 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2309 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2310 continue;
2311 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2312 }
2313
2314 if (Exp.commutative) {
2315 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2316 if (Exp.varargs[0] > Exp.varargs[1]) {
2317 std::swap(Exp.varargs[0], Exp.varargs[1]);
2318 uint32_t Opcode = Exp.opcode >> 8;
2319 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2320 Exp.opcode = (Opcode << 8) |
2322 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2323 }
2324 }
2325
2326 if (uint32_t NewNum = expressionNumbering[Exp]) {
2327 if (Exp.opcode == Instruction::Call && NewNum != Num)
2328 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2329 return NewNum;
2330 }
2331 return Num;
2332}
2333
2334/// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2335/// again.
2336void GVNPass::ValueTable::eraseTranslateCacheEntry(
2337 uint32_t Num, const BasicBlock &CurrBlock) {
2338 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2339 PhiTranslateTable.erase({Num, Pred});
2340}
2341
2342// In order to find a leader for a given value number at a
2343// specific basic block, we first obtain the list of all Values for that number,
2344// and then scan the list to find one whose block dominates the block in
2345// question. This is fast because dominator tree queries consist of only
2346// a few comparisons of DFS numbers.
2347Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2348 auto Leaders = LeaderTable.getLeaders(num);
2349 if (Leaders.empty())
2350 return nullptr;
2351
2352 Value *Val = nullptr;
2353 for (const auto &Entry : Leaders) {
2354 if (DT->dominates(Entry.BB, BB)) {
2355 Val = Entry.Val;
2356 if (isa<Constant>(Val))
2357 return Val;
2358 }
2359 }
2360
2361 return Val;
2362}
2363
2364/// There is an edge from 'Src' to 'Dst'. Return
2365/// true if every path from the entry block to 'Dst' passes via this edge. In
2366/// particular 'Dst' must not be reachable via another edge from 'Src'.
2368 DominatorTree *DT) {
2369 // While in theory it is interesting to consider the case in which Dst has
2370 // more than one predecessor, because Dst might be part of a loop which is
2371 // only reachable from Src, in practice it is pointless since at the time
2372 // GVN runs all such loops have preheaders, which means that Dst will have
2373 // been changed to have only one predecessor, namely Src.
2374 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2375 assert((!Pred || Pred == E.getStart()) &&
2376 "No edge between these basic blocks!");
2377 return Pred != nullptr;
2378}
2379
2380void GVNPass::assignBlockRPONumber(Function &F) {
2381 BlockRPONumber.clear();
2382 uint32_t NextBlockNumber = 1;
2384 for (BasicBlock *BB : RPOT)
2385 BlockRPONumber[BB] = NextBlockNumber++;
2386 InvalidBlockRPONumbers = false;
2387}
2388
2389bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2390 bool Changed = false;
2391 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2392 Value *Operand = Instr->getOperand(OpNum);
2393 auto it = ReplaceOperandsWithMap.find(Operand);
2394 if (it != ReplaceOperandsWithMap.end()) {
2395 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2396 << *it->second << " in instruction " << *Instr << '\n');
2397 Instr->setOperand(OpNum, it->second);
2398 Changed = true;
2399 }
2400 }
2401 return Changed;
2402}
2403
2404/// The given values are known to be equal in every block
2405/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2406/// 'RHS' everywhere in the scope. Returns whether a change was made.
2407/// If DominatesByEdge is false, then it means that we will propagate the RHS
2408/// value starting from the end of Root.Start.
2409bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2410 const BasicBlockEdge &Root,
2411 bool DominatesByEdge) {
2413 Worklist.push_back(std::make_pair(LHS, RHS));
2414 bool Changed = false;
2415 // For speed, compute a conservative fast approximation to
2416 // DT->dominates(Root, Root.getEnd());
2417 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2418
2419 while (!Worklist.empty()) {
2420 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2421 LHS = Item.first; RHS = Item.second;
2422
2423 if (LHS == RHS)
2424 continue;
2425 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2426
2427 // Don't try to propagate equalities between constants.
2428 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2429 continue;
2430
2431 // Prefer a constant on the right-hand side, or an Argument if no constants.
2432 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2433 std::swap(LHS, RHS);
2434 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2435 const DataLayout &DL =
2436 isa<Argument>(LHS)
2437 ? cast<Argument>(LHS)->getParent()->getDataLayout()
2438 : cast<Instruction>(LHS)->getDataLayout();
2439
2440 // If there is no obvious reason to prefer the left-hand side over the
2441 // right-hand side, ensure the longest lived term is on the right-hand side,
2442 // so the shortest lived term will be replaced by the longest lived.
2443 // This tends to expose more simplifications.
2444 uint32_t LVN = VN.lookupOrAdd(LHS);
2445 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2446 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2447 // Move the 'oldest' value to the right-hand side, using the value number
2448 // as a proxy for age.
2449 uint32_t RVN = VN.lookupOrAdd(RHS);
2450 if (LVN < RVN) {
2451 std::swap(LHS, RHS);
2452 LVN = RVN;
2453 }
2454 }
2455
2456 // If value numbering later sees that an instruction in the scope is equal
2457 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2458 // the invariant that instructions only occur in the leader table for their
2459 // own value number (this is used by removeFromLeaderTable), do not do this
2460 // if RHS is an instruction (if an instruction in the scope is morphed into
2461 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2462 // using the leader table is about compiling faster, not optimizing better).
2463 // The leader table only tracks basic blocks, not edges. Only add to if we
2464 // have the simple case where the edge dominates the end.
2465 if (RootDominatesEnd && !isa<Instruction>(RHS) &&
2466 canReplacePointersIfEqual(LHS, RHS, DL))
2467 LeaderTable.insert(LVN, RHS, Root.getEnd());
2468
2469 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2470 // LHS always has at least one use that is not dominated by Root, this will
2471 // never do anything if LHS has only one use.
2472 if (!LHS->hasOneUse()) {
2473 // Create a callback that captures the DL.
2474 auto canReplacePointersCallBack = [&DL](const Use &U, const Value *To) {
2475 return canReplacePointersInUseIfEqual(U, To, DL);
2476 };
2477 unsigned NumReplacements =
2478 DominatesByEdge
2479 ? replaceDominatedUsesWithIf(LHS, RHS, *DT, Root,
2480 canReplacePointersCallBack)
2481 : replaceDominatedUsesWithIf(LHS, RHS, *DT, Root.getStart(),
2482 canReplacePointersCallBack);
2483
2484 if (NumReplacements > 0) {
2485 Changed = true;
2486 NumGVNEqProp += NumReplacements;
2487 // Cached information for anything that uses LHS will be invalid.
2488 if (MD)
2490 }
2491 }
2492
2493 // Now try to deduce additional equalities from this one. For example, if
2494 // the known equality was "(A != B)" == "false" then it follows that A and B
2495 // are equal in the scope. Only boolean equalities with an explicit true or
2496 // false RHS are currently supported.
2497 if (!RHS->getType()->isIntegerTy(1))
2498 // Not a boolean equality - bail out.
2499 continue;
2500 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2501 if (!CI)
2502 // RHS neither 'true' nor 'false' - bail out.
2503 continue;
2504 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2505 bool isKnownTrue = CI->isMinusOne();
2506 bool isKnownFalse = !isKnownTrue;
2507
2508 // If "A && B" is known true then both A and B are known true. If "A || B"
2509 // is known false then both A and B are known false.
2510 Value *A, *B;
2511 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2512 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2513 Worklist.push_back(std::make_pair(A, RHS));
2514 Worklist.push_back(std::make_pair(B, RHS));
2515 continue;
2516 }
2517
2518 // If we are propagating an equality like "(A == B)" == "true" then also
2519 // propagate the equality A == B. When propagating a comparison such as
2520 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2521 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2522 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2523
2524 // If "A == B" is known true, or "A != B" is known false, then replace
2525 // A with B everywhere in the scope. For floating point operations, we
2526 // have to be careful since equality does not always imply equivalance.
2527 if (Cmp->isEquivalence(isKnownFalse))
2528 Worklist.push_back(std::make_pair(Op0, Op1));
2529
2530 // If "A >= B" is known true, replace "A < B" with false everywhere.
2531 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2532 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2533 // Since we don't have the instruction "A < B" immediately to hand, work
2534 // out the value number that it would have and use that to find an
2535 // appropriate instruction (if any).
2536 uint32_t NextNum = VN.getNextUnusedValueNumber();
2537 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2538 // If the number we were assigned was brand new then there is no point in
2539 // looking for an instruction realizing it: there cannot be one!
2540 if (Num < NextNum) {
2541 Value *NotCmp = findLeader(Root.getEnd(), Num);
2542 if (NotCmp && isa<Instruction>(NotCmp)) {
2543 unsigned NumReplacements =
2544 DominatesByEdge
2545 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2546 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2547 Root.getStart());
2548 Changed |= NumReplacements > 0;
2549 NumGVNEqProp += NumReplacements;
2550 // Cached information for anything that uses NotCmp will be invalid.
2551 if (MD)
2552 MD->invalidateCachedPointerInfo(NotCmp);
2553 }
2554 }
2555 // Ensure that any instruction in scope that gets the "A < B" value number
2556 // is replaced with false.
2557 // The leader table only tracks basic blocks, not edges. Only add to if we
2558 // have the simple case where the edge dominates the end.
2559 if (RootDominatesEnd)
2560 LeaderTable.insert(Num, NotVal, Root.getEnd());
2561
2562 continue;
2563 }
2564 }
2565
2566 return Changed;
2567}
2568
2569/// When calculating availability, handle an instruction
2570/// by inserting it into the appropriate sets
2571bool GVNPass::processInstruction(Instruction *I) {
2572 // Ignore dbg info intrinsics.
2573 if (isa<DbgInfoIntrinsic>(I))
2574 return false;
2575
2576 // If the instruction can be easily simplified then do so now in preference
2577 // to value numbering it. Value numbering often exposes redundancies, for
2578 // example if it determines that %y is equal to %x then the instruction
2579 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2580 const DataLayout &DL = I->getDataLayout();
2581 if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
2582 bool Changed = false;
2583 if (!I->use_empty()) {
2584 // Simplification can cause a special instruction to become not special.
2585 // For example, devirtualization to a willreturn function.
2586 ICF->removeUsersOf(I);
2587 I->replaceAllUsesWith(V);
2588 Changed = true;
2589 }
2590 if (isInstructionTriviallyDead(I, TLI)) {
2592 Changed = true;
2593 }
2594 if (Changed) {
2595 if (MD && V->getType()->isPtrOrPtrVectorTy())
2597 ++NumGVNSimpl;
2598 return true;
2599 }
2600 }
2601
2602 if (auto *Assume = dyn_cast<AssumeInst>(I))
2603 return processAssumeIntrinsic(Assume);
2604
2605 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2606 if (processLoad(Load))
2607 return true;
2608
2609 unsigned Num = VN.lookupOrAdd(Load);
2610 LeaderTable.insert(Num, Load, Load->getParent());
2611 return false;
2612 }
2613
2614 // For conditional branches, we can perform simple conditional propagation on
2615 // the condition value itself.
2616 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2617 if (!BI->isConditional())
2618 return false;
2619
2620 if (isa<Constant>(BI->getCondition()))
2621 return processFoldableCondBr(BI);
2622
2623 Value *BranchCond = BI->getCondition();
2624 BasicBlock *TrueSucc = BI->getSuccessor(0);
2625 BasicBlock *FalseSucc = BI->getSuccessor(1);
2626 // Avoid multiple edges early.
2627 if (TrueSucc == FalseSucc)
2628 return false;
2629
2630 BasicBlock *Parent = BI->getParent();
2631 bool Changed = false;
2632
2634 BasicBlockEdge TrueE(Parent, TrueSucc);
2635 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2636
2638 BasicBlockEdge FalseE(Parent, FalseSucc);
2639 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2640
2641 return Changed;
2642 }
2643
2644 // For switches, propagate the case values into the case destinations.
2645 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2646 Value *SwitchCond = SI->getCondition();
2647 BasicBlock *Parent = SI->getParent();
2648 bool Changed = false;
2649
2650 // Remember how many outgoing edges there are to every successor.
2652 for (BasicBlock *Succ : successors(Parent))
2653 ++SwitchEdges[Succ];
2654
2655 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2656 i != e; ++i) {
2657 BasicBlock *Dst = i->getCaseSuccessor();
2658 // If there is only a single edge, propagate the case value into it.
2659 if (SwitchEdges.lookup(Dst) == 1) {
2660 BasicBlockEdge E(Parent, Dst);
2661 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2662 }
2663 }
2664 return Changed;
2665 }
2666
2667 // Instructions with void type don't return a value, so there's
2668 // no point in trying to find redundancies in them.
2669 if (I->getType()->isVoidTy())
2670 return false;
2671
2672 uint32_t NextNum = VN.getNextUnusedValueNumber();
2673 unsigned Num = VN.lookupOrAdd(I);
2674
2675 // Allocations are always uniquely numbered, so we can save time and memory
2676 // by fast failing them.
2677 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2678 LeaderTable.insert(Num, I, I->getParent());
2679 return false;
2680 }
2681
2682 // If the number we were assigned was a brand new VN, then we don't
2683 // need to do a lookup to see if the number already exists
2684 // somewhere in the domtree: it can't!
2685 if (Num >= NextNum) {
2686 LeaderTable.insert(Num, I, I->getParent());
2687 return false;
2688 }
2689
2690 // Perform fast-path value-number based elimination of values inherited from
2691 // dominators.
2692 Value *Repl = findLeader(I->getParent(), Num);
2693 if (!Repl) {
2694 // Failure, just remember this instance for future use.
2695 LeaderTable.insert(Num, I, I->getParent());
2696 return false;
2697 }
2698
2699 if (Repl == I) {
2700 // If I was the result of a shortcut PRE, it might already be in the table
2701 // and the best replacement for itself. Nothing to do.
2702 return false;
2703 }
2704
2705 // Remove it!
2707 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2710 return true;
2711}
2712
2713/// runOnFunction - This is the main transformation entry point for a function.
2714bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2715 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2717 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2718 AC = &RunAC;
2719 DT = &RunDT;
2720 VN.setDomTree(DT);
2721 TLI = &RunTLI;
2722 VN.setAliasAnalysis(&RunAA);
2723 MD = RunMD;
2724 ImplicitControlFlowTracking ImplicitCFT;
2725 ICF = &ImplicitCFT;
2726 this->LI = &LI;
2727 VN.setMemDep(MD);
2728 ORE = RunORE;
2729 InvalidBlockRPONumbers = true;
2730 MemorySSAUpdater Updater(MSSA);
2731 MSSAU = MSSA ? &Updater : nullptr;
2732
2733 bool Changed = false;
2734 bool ShouldContinue = true;
2735
2736 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2737 // Merge unconditional branches, allowing PRE to catch more
2738 // optimization opportunities.
2740 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, &LI, MSSAU, MD);
2741 if (removedBlock)
2742 ++NumGVNBlocks;
2743
2744 Changed |= removedBlock;
2745 }
2746 DTU.flush();
2747
2748 unsigned Iteration = 0;
2749 while (ShouldContinue) {
2750 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2751 (void) Iteration;
2752 ShouldContinue = iterateOnFunction(F);
2753 Changed |= ShouldContinue;
2754 ++Iteration;
2755 }
2756
2757 if (isPREEnabled()) {
2758 // Fabricate val-num for dead-code in order to suppress assertion in
2759 // performPRE().
2760 assignValNumForDeadCode();
2761 bool PREChanged = true;
2762 while (PREChanged) {
2763 PREChanged = performPRE(F);
2764 Changed |= PREChanged;
2765 }
2766 }
2767
2768 // FIXME: Should perform GVN again after PRE does something. PRE can move
2769 // computations into blocks where they become fully redundant. Note that
2770 // we can't do this until PRE's critical edge splitting updates memdep.
2771 // Actually, when this happens, we should just fully integrate PRE into GVN.
2772
2773 cleanupGlobalSets();
2774 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2775 // iteration.
2776 DeadBlocks.clear();
2777
2778 if (MSSA && VerifyMemorySSA)
2779 MSSA->verifyMemorySSA();
2780
2781 return Changed;
2782}
2783
2784bool GVNPass::processBlock(BasicBlock *BB) {
2785 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2786 // (and incrementing BI before processing an instruction).
2787 assert(InstrsToErase.empty() &&
2788 "We expect InstrsToErase to be empty across iterations");
2789 if (DeadBlocks.count(BB))
2790 return false;
2791
2792 // Clearing map before every BB because it can be used only for single BB.
2793 ReplaceOperandsWithMap.clear();
2794 bool ChangedFunction = false;
2795
2796 // Since we may not have visited the input blocks of the phis, we can't
2797 // use our normal hash approach for phis. Instead, simply look for
2798 // obvious duplicates. The first pass of GVN will tend to create
2799 // identical phis, and the second or later passes can eliminate them.
2800 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2801 ChangedFunction |= EliminateDuplicatePHINodes(BB, PHINodesToRemove);
2802 for (PHINode *PN : PHINodesToRemove) {
2803 VN.erase(PN);
2804 removeInstruction(PN);
2805 }
2806
2807 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2808 BI != BE;) {
2809 if (!ReplaceOperandsWithMap.empty())
2810 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2811 ChangedFunction |= processInstruction(&*BI);
2812
2813 if (InstrsToErase.empty()) {
2814 ++BI;
2815 continue;
2816 }
2817
2818 // If we need some instructions deleted, do it now.
2819 NumGVNInstr += InstrsToErase.size();
2820
2821 // Avoid iterator invalidation.
2822 bool AtStart = BI == BB->begin();
2823 if (!AtStart)
2824 --BI;
2825
2826 for (auto *I : InstrsToErase) {
2827 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2828 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2829 salvageKnowledge(I, AC);
2831 removeInstruction(I);
2832 }
2833 InstrsToErase.clear();
2834
2835 if (AtStart)
2836 BI = BB->begin();
2837 else
2838 ++BI;
2839 }
2840
2841 return ChangedFunction;
2842}
2843
2844// Instantiate an expression in a predecessor that lacked it.
2845bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2846 BasicBlock *Curr, unsigned int ValNo) {
2847 // Because we are going top-down through the block, all value numbers
2848 // will be available in the predecessor by the time we need them. Any
2849 // that weren't originally present will have been instantiated earlier
2850 // in this loop.
2851 bool success = true;
2852 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2853 Value *Op = Instr->getOperand(i);
2854 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2855 continue;
2856 // This could be a newly inserted instruction, in which case, we won't
2857 // find a value number, and should give up before we hurt ourselves.
2858 // FIXME: Rewrite the infrastructure to let it easier to value number
2859 // and process newly inserted instructions.
2860 if (!VN.exists(Op)) {
2861 success = false;
2862 break;
2863 }
2864 uint32_t TValNo =
2865 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2866 if (Value *V = findLeader(Pred, TValNo)) {
2867 Instr->setOperand(i, V);
2868 } else {
2869 success = false;
2870 break;
2871 }
2872 }
2873
2874 // Fail out if we encounter an operand that is not available in
2875 // the PRE predecessor. This is typically because of loads which
2876 // are not value numbered precisely.
2877 if (!success)
2878 return false;
2879
2880 Instr->insertBefore(Pred->getTerminator());
2881 Instr->setName(Instr->getName() + ".pre");
2882 Instr->setDebugLoc(Instr->getDebugLoc());
2883
2884 ICF->insertInstructionTo(Instr, Pred);
2885
2886 unsigned Num = VN.lookupOrAdd(Instr);
2887 VN.add(Instr, Num);
2888
2889 // Update the availability map to include the new instruction.
2890 LeaderTable.insert(Num, Instr, Pred);
2891 return true;
2892}
2893
2894bool GVNPass::performScalarPRE(Instruction *CurInst) {
2895 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2896 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2897 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2898 isa<DbgInfoIntrinsic>(CurInst))
2899 return false;
2900
2901 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2902 // sinking the compare again, and it would force the code generator to
2903 // move the i1 from processor flags or predicate registers into a general
2904 // purpose register.
2905 if (isa<CmpInst>(CurInst))
2906 return false;
2907
2908 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2909 // sinking the addressing mode computation back to its uses. Extending the
2910 // GEP's live range increases the register pressure, and therefore it can
2911 // introduce unnecessary spills.
2912 //
2913 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2914 // to the load by moving it to the predecessor block if necessary.
2915 if (isa<GetElementPtrInst>(CurInst))
2916 return false;
2917
2918 if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2919 // We don't currently value number ANY inline asm calls.
2920 if (CallB->isInlineAsm())
2921 return false;
2922 }
2923
2924 uint32_t ValNo = VN.lookup(CurInst);
2925
2926 // Look for the predecessors for PRE opportunities. We're
2927 // only trying to solve the basic diamond case, where
2928 // a value is computed in the successor and one predecessor,
2929 // but not the other. We also explicitly disallow cases
2930 // where the successor is its own predecessor, because they're
2931 // more complicated to get right.
2932 unsigned NumWith = 0;
2933 unsigned NumWithout = 0;
2934 BasicBlock *PREPred = nullptr;
2935 BasicBlock *CurrentBlock = CurInst->getParent();
2936
2937 // Update the RPO numbers for this function.
2938 if (InvalidBlockRPONumbers)
2939 assignBlockRPONumber(*CurrentBlock->getParent());
2940
2942 for (BasicBlock *P : predecessors(CurrentBlock)) {
2943 // We're not interested in PRE where blocks with predecessors that are
2944 // not reachable.
2945 if (!DT->isReachableFromEntry(P)) {
2946 NumWithout = 2;
2947 break;
2948 }
2949 // It is not safe to do PRE when P->CurrentBlock is a loop backedge.
2950 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2951 "Invalid BlockRPONumber map.");
2952 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2953 NumWithout = 2;
2954 break;
2955 }
2956
2957 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2958 Value *predV = findLeader(P, TValNo);
2959 if (!predV) {
2960 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2961 PREPred = P;
2962 ++NumWithout;
2963 } else if (predV == CurInst) {
2964 /* CurInst dominates this predecessor. */
2965 NumWithout = 2;
2966 break;
2967 } else {
2968 predMap.push_back(std::make_pair(predV, P));
2969 ++NumWith;
2970 }
2971 }
2972
2973 // Don't do PRE when it might increase code size, i.e. when
2974 // we would need to insert instructions in more than one pred.
2975 if (NumWithout > 1 || NumWith == 0)
2976 return false;
2977
2978 // We may have a case where all predecessors have the instruction,
2979 // and we just need to insert a phi node. Otherwise, perform
2980 // insertion.
2981 Instruction *PREInstr = nullptr;
2982
2983 if (NumWithout != 0) {
2984 if (!isSafeToSpeculativelyExecute(CurInst)) {
2985 // It is only valid to insert a new instruction if the current instruction
2986 // is always executed. An instruction with implicit control flow could
2987 // prevent us from doing it. If we cannot speculate the execution, then
2988 // PRE should be prohibited.
2989 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2990 return false;
2991 }
2992
2993 // Don't do PRE across indirect branch.
2994 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2995 return false;
2996
2997 // We can't do PRE safely on a critical edge, so instead we schedule
2998 // the edge to be split and perform the PRE the next time we iterate
2999 // on the function.
3000 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
3001 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
3002 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
3003 return false;
3004 }
3005 // We need to insert somewhere, so let's give it a shot
3006 PREInstr = CurInst->clone();
3007 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3008 // If we failed insertion, make sure we remove the instruction.
3009#ifndef NDEBUG
3010 verifyRemoved(PREInstr);
3011#endif
3012 PREInstr->deleteValue();
3013 return false;
3014 }
3015 }
3016
3017 // Either we should have filled in the PRE instruction, or we should
3018 // not have needed insertions.
3019 assert(PREInstr != nullptr || NumWithout == 0);
3020
3021 ++NumGVNPRE;
3022
3023 // Create a PHI to make the value available in this block.
3024 PHINode *Phi = PHINode::Create(CurInst->getType(), predMap.size(),
3025 CurInst->getName() + ".pre-phi");
3026 Phi->insertBefore(CurrentBlock->begin());
3027 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
3028 if (Value *V = predMap[i].first) {
3029 // If we use an existing value in this phi, we have to patch the original
3030 // value because the phi will be used to replace a later value.
3031 patchReplacementInstruction(CurInst, V);
3032 Phi->addIncoming(V, predMap[i].second);
3033 } else
3034 Phi->addIncoming(PREInstr, PREPred);
3035 }
3036
3037 VN.add(Phi, ValNo);
3038 // After creating a new PHI for ValNo, the phi translate result for ValNo will
3039 // be changed, so erase the related stale entries in phi translate cache.
3040 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3041 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3042 Phi->setDebugLoc(CurInst->getDebugLoc());
3043 CurInst->replaceAllUsesWith(Phi);
3044 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
3046 VN.erase(CurInst);
3047 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3048
3049 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
3050 removeInstruction(CurInst);
3051 ++NumGVNInstr;
3052
3053 return true;
3054}
3055
3056/// Perform a purely local form of PRE that looks for diamond
3057/// control flow patterns and attempts to perform simple PRE at the join point.
3058bool GVNPass::performPRE(Function &F) {
3059 bool Changed = false;
3060 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
3061 // Nothing to PRE in the entry block.
3062 if (CurrentBlock == &F.getEntryBlock())
3063 continue;
3064
3065 // Don't perform PRE on an EH pad.
3066 if (CurrentBlock->isEHPad())
3067 continue;
3068
3069 for (BasicBlock::iterator BI = CurrentBlock->begin(),
3070 BE = CurrentBlock->end();
3071 BI != BE;) {
3072 Instruction *CurInst = &*BI++;
3073 Changed |= performScalarPRE(CurInst);
3074 }
3075 }
3076
3077 if (splitCriticalEdges())
3078 Changed = true;
3079
3080 return Changed;
3081}
3082
3083/// Split the critical edge connecting the given two blocks, and return
3084/// the block inserted to the critical edge.
3085BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3086 // GVN does not require loop-simplify, do not try to preserve it if it is not
3087 // possible.
3089 Pred, Succ,
3090 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3091 if (BB) {
3092 if (MD)
3094 InvalidBlockRPONumbers = true;
3095 }
3096 return BB;
3097}
3098
3099/// Split critical edges found during the previous
3100/// iteration that may enable further optimization.
3101bool GVNPass::splitCriticalEdges() {
3102 if (toSplit.empty())
3103 return false;
3104
3105 bool Changed = false;
3106 do {
3107 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3108 Changed |= SplitCriticalEdge(Edge.first, Edge.second,
3109 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3110 nullptr;
3111 } while (!toSplit.empty());
3112 if (Changed) {
3113 if (MD)
3115 InvalidBlockRPONumbers = true;
3116 }
3117 return Changed;
3118}
3119
3120/// Executes one iteration of GVN
3121bool GVNPass::iterateOnFunction(Function &F) {
3122 cleanupGlobalSets();
3123
3124 // Top-down walk of the dominator tree
3125 bool Changed = false;
3126 // Needed for value numbering with phi construction to work.
3127 // RPOT walks the graph in its constructor and will not be invalidated during
3128 // processBlock.
3130
3131 for (BasicBlock *BB : RPOT)
3132 Changed |= processBlock(BB);
3133
3134 return Changed;
3135}
3136
3137void GVNPass::cleanupGlobalSets() {
3138 VN.clear();
3139 LeaderTable.clear();
3140 BlockRPONumber.clear();
3141 ICF->clear();
3142 InvalidBlockRPONumbers = true;
3143}
3144
3145void GVNPass::removeInstruction(Instruction *I) {
3146 if (MD) MD->removeInstruction(I);
3147 if (MSSAU)
3148 MSSAU->removeMemoryAccess(I);
3149#ifndef NDEBUG
3150 verifyRemoved(I);
3151#endif
3152 ICF->removeInstruction(I);
3153 I->eraseFromParent();
3154}
3155
3156/// Verify that the specified instruction does not occur in our
3157/// internal data structures.
3158void GVNPass::verifyRemoved(const Instruction *Inst) const {
3159 VN.verifyRemoved(Inst);
3160 LeaderTable.verifyRemoved(Inst);
3161}
3162
3163/// BB is declared dead, which implied other blocks become dead as well. This
3164/// function is to add all these blocks to "DeadBlocks". For the dead blocks'
3165/// live successors, update their phi nodes by replacing the operands
3166/// corresponding to dead blocks with UndefVal.
3167void GVNPass::addDeadBlock(BasicBlock *BB) {
3170
3171 NewDead.push_back(BB);
3172 while (!NewDead.empty()) {
3173 BasicBlock *D = NewDead.pop_back_val();
3174 if (DeadBlocks.count(D))
3175 continue;
3176
3177 // All blocks dominated by D are dead.
3179 DT->getDescendants(D, Dom);
3180 DeadBlocks.insert(Dom.begin(), Dom.end());
3181
3182 // Figure out the dominance-frontier(D).
3183 for (BasicBlock *B : Dom) {
3184 for (BasicBlock *S : successors(B)) {
3185 if (DeadBlocks.count(S))
3186 continue;
3187
3188 bool AllPredDead = true;
3189 for (BasicBlock *P : predecessors(S))
3190 if (!DeadBlocks.count(P)) {
3191 AllPredDead = false;
3192 break;
3193 }
3194
3195 if (!AllPredDead) {
3196 // S could be proved dead later on. That is why we don't update phi
3197 // operands at this moment.
3198 DF.insert(S);
3199 } else {
3200 // While S is not dominated by D, it is dead by now. This could take
3201 // place if S already have a dead predecessor before D is declared
3202 // dead.
3203 NewDead.push_back(S);
3204 }
3205 }
3206 }
3207 }
3208
3209 // For the dead blocks' live successors, update their phi nodes by replacing
3210 // the operands corresponding to dead blocks with UndefVal.
3211 for (BasicBlock *B : DF) {
3212 if (DeadBlocks.count(B))
3213 continue;
3214
3215 // First, split the critical edges. This might also create additional blocks
3216 // to preserve LoopSimplify form and adjust edges accordingly.
3218 for (BasicBlock *P : Preds) {
3219 if (!DeadBlocks.count(P))
3220 continue;
3221
3223 isCriticalEdge(P->getTerminator(), B)) {
3224 if (BasicBlock *S = splitCriticalEdges(P, B))
3225 DeadBlocks.insert(P = S);
3226 }
3227 }
3228
3229 // Now poison the incoming values from the dead predecessors.
3230 for (BasicBlock *P : predecessors(B)) {
3231 if (!DeadBlocks.count(P))
3232 continue;
3233 for (PHINode &Phi : B->phis()) {
3234 Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType()));
3235 if (MD)
3237 }
3238 }
3239 }
3240}
3241
3242// If the given branch is recognized as a foldable branch (i.e. conditional
3243// branch with constant condition), it will perform following analyses and
3244// transformation.
3245// 1) If the dead out-coming edge is a critical-edge, split it. Let
3246// R be the target of the dead out-coming edge.
3247// 1) Identify the set of dead blocks implied by the branch's dead outcoming
3248// edge. The result of this step will be {X| X is dominated by R}
3249// 2) Identify those blocks which haves at least one dead predecessor. The
3250// result of this step will be dominance-frontier(R).
3251// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
3252// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3253//
3254// Return true iff *NEW* dead code are found.
3255bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3256 if (!BI || BI->isUnconditional())
3257 return false;
3258
3259 // If a branch has two identical successors, we cannot declare either dead.
3260 if (BI->getSuccessor(0) == BI->getSuccessor(1))
3261 return false;
3262
3263 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3264 if (!Cond)
3265 return false;
3266
3267 BasicBlock *DeadRoot =
3268 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3269 if (DeadBlocks.count(DeadRoot))
3270 return false;
3271
3272 if (!DeadRoot->getSinglePredecessor())
3273 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3274
3275 addDeadBlock(DeadRoot);
3276 return true;
3277}
3278
3279// performPRE() will trigger assert if it comes across an instruction without
3280// associated val-num. As it normally has far more live instructions than dead
3281// instructions, it makes more sense just to "fabricate" a val-number for the
3282// dead code than checking if instruction involved is dead or not.
3283void GVNPass::assignValNumForDeadCode() {
3284 for (BasicBlock *BB : DeadBlocks) {
3285 for (Instruction &Inst : *BB) {
3286 unsigned ValNum = VN.lookupOrAdd(&Inst);
3287 LeaderTable.insert(ValNum, &Inst, BB);
3288 }
3289 }
3290}
3291
3293public:
3294 static char ID; // Pass identification, replacement for typeid
3295
3296 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3297 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3299 }
3300
3301 bool runOnFunction(Function &F) override {
3302 if (skipFunction(F))
3303 return false;
3304
3305 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3306 return Impl.runImpl(
3307 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3308 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3309 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3310 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3311 Impl.isMemDepEnabled()
3312 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3313 : nullptr,
3314 getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
3315 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3316 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3317 }
3318
3319 void getAnalysisUsage(AnalysisUsage &AU) const override {
3324 if (Impl.isMemDepEnabled())
3333 }
3334
3335private:
3336 GVNPass Impl;
3337};
3338
3339char GVNLegacyPass::ID = 0;
3340
3341INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3350
3351// The public interface to this file...
3352FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3353 return new GVNLegacyPass(NoMemDepAnalysis);
3354}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:622
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
Definition: GVN.cpp:2002
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
Definition: GVN.cpp:1161
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
Definition: GVN.cpp:1898
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
Definition: GVN.cpp:1034
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
Definition: GVN.cpp:2367
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
Definition: GVN.cpp:899
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
Definition: GVN.cpp:1223
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1142
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Definition: GVN.cpp:2149
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
Definition: GVN.cpp:1015
AvailabilityState
Definition: GVN.cpp:879
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
Definition: GVN.cpp:1150
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
Definition: InlineInfo.cpp:108
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
iterator begin() const
Definition: ArrayRef.h:156
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
bool isEmpty() const
Return true if there are no attributes.
Definition: Attributes.h:1021
std::optional< AttributeList > intersectWith(LLVMContext &C, AttributeList Other) const
Try to intersect this AttributeList with Other.
const BasicBlock * getEnd() const
Definition: Dominators.h:112
const BasicBlock * getStart() const
Definition: Dominators.h:108
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:675
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Value * getRHS() const
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
unsigned arg_size() const
Definition: InstrTypes.h:1292
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:988
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:220
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:226
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
Definition: GVN.h:151
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
Definition: GVN.cpp:700
uint32_t getNextUnusedValueNumber()
Definition: GVN.h:213
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:608
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
Definition: GVN.cpp:687
void setAliasAnalysis(AAResults *A)
Definition: GVN.h:209
void clear()
Remove all entries from the ValueTable.
Definition: GVN.cpp:708
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
Definition: GVN.cpp:602
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
Definition: GVN.cpp:2233
void setMemDep(MemoryDependenceResults *M)
Definition: GVN.h:211
void erase(Value *v)
Remove a value from the value numbering.
Definition: GVN.cpp:720
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
Definition: GVN.cpp:471
void setDomTree(DominatorTree *D)
Definition: GVN.h:212
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
Definition: GVN.cpp:2336
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
Definition: GVN.cpp:730
The core GVN pass object.
Definition: GVN.h:117
friend class gvn::GVNLegacyPass
Definition: GVN.h:218
bool isPREEnabled() const
Definition: GVN.cpp:802
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVN.cpp:823
AAResults * getAliasAnalysis() const
Definition: GVN.h:139
bool isLoadPREEnabled() const
Definition: GVN.cpp:806
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: GVN.cpp:850
DominatorTree & getDominatorTree() const
Definition: GVN.h:138
bool isLoadInLoopPREEnabled() const
Definition: GVN.cpp:810
bool isLoadPRESplitBackedgeEnabled() const
Definition: GVN.cpp:814
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
Definition: GVN.h:133
bool isMemDepEnabled() const
Definition: GVN.cpp:819
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:471
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:368
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:829
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
Definition: Instruction.h:277
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
Definition: Metadata.cpp:1633
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:593
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
size_type size() const
Definition: MapVector.h:60
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:928
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void insertUse(MemoryUse *Use, bool RenameUses=false)
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:985
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:701
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:759
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1905
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:249
This is an entry in the NonLocalDepInfo cache.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
bool allowExtraAnalysis(StringRef PassName) const
Whether we allow for extra compile-time budget to perform more analysis to produce fewer false positi...
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:35
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:40
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Definition: SSAUpdater.cpp:52
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
Definition: SSAUpdater.cpp:97
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
Definition: SSAUpdater.cpp:61
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
Definition: SSAUpdater.cpp:69
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getCondition() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool empty() const
Definition: SmallSet.h:168
bool erase(const T &V)
Definition: SmallSet.h:193
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
SmallVector & operator=(const SmallVector &RHS)
Definition: SmallVector.h:1241
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition: Value.cpp:790
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:110
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: GVN.cpp:3319
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)
Definition: GVN.cpp:3296
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: GVN.cpp:3301
An opaque object representing a hash code.
Definition: Hashing.h:75
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
Definition: VNCoercion.cpp:334
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Definition: VNCoercion.cpp:210
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
Definition: VNCoercion.cpp:359
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Definition: VNCoercion.cpp:231
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
Definition: VNCoercion.cpp:245
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
Definition: VNCoercion.cpp:17
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
A private "module" namespace for types and utilities used by GVN.
Definition: GVN.h:59
constexpr double e
Definition: MathExtras.h:47
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition: Local.cpp:3596
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
Definition: CFG.cpp:79
auto pred_end(const MachineBasicBlock *BB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1656
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:787
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition: Loads.cpp:799
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3498
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3578
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3423
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3352
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
Definition: CFG.cpp:95
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:590
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1521
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:468
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:281
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
Definition: GVN.cpp:176
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
Definition: GVN.cpp:184
static unsigned getHashValue(const GVNPass::Expression &e)
Definition: GVN.cpp:178
static GVNPass::Expression getEmptyKey()
Definition: GVN.cpp:175
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
A set of parameters to control various transforms performed by GVN pass.
Definition: GVN.h:74
std::optional< bool > AllowLoadPRESplitBackedge
Definition: GVN.h:78
std::optional< bool > AllowPRE
Definition: GVN.h:75
std::optional< bool > AllowLoadInLoopPRE
Definition: GVN.h:77
std::optional< bool > AllowMemDep
Definition: GVN.h:79
std::optional< bool > AllowLoadPRE
Definition: GVN.h:76
SmallVector< uint32_t, 4 > varargs
Definition: GVN.cpp:144
bool operator==(const Expression &other) const
Definition: GVN.cpp:150
friend hash_code hash_value(const Expression &Value)
Definition: GVN.cpp:165
Expression(uint32_t o=~2U)
Definition: GVN.cpp:148
AttributeList attrs
Definition: GVN.cpp:146
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
Definition: GVN.cpp:293
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
Definition: GVN.cpp:307
static AvailableValueInBlock getUndef(BasicBlock *BB)
Definition: GVN.cpp:312
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Definition: GVN.cpp:300
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Definition: GVN.cpp:323
AvailableValue AV
AV - The actual available value.
Definition: GVN.cpp:298
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:316
BasicBlock * BB
BB - The basic block in question.
Definition: GVN.cpp:295
Represents a particular available value that we know how to materialize.
Definition: GVN.cpp:196
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
Definition: GVN.cpp:213
bool isSimpleValue() const
Definition: GVN.cpp:259
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:249
bool isCoercedLoadValue() const
Definition: GVN.cpp:260
static AvailableValue get(Value *V, unsigned Offset=0)
Definition: GVN.cpp:217
ValType Kind
Kind of the live-out value.
Definition: GVN.cpp:210
LoadInst * getCoercedLoadValue() const
Definition: GVN.cpp:270
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
Definition: GVN.cpp:233
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
Definition: GVN.cpp:225
bool isUndefValue() const
Definition: GVN.cpp:262
bool isSelectValue() const
Definition: GVN.cpp:263
Value * Val
Val - The value that is live out of the block.
Definition: GVN.cpp:208
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Definition: GVN.cpp:215
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
Definition: GVN.cpp:1077
static AvailableValue getUndef()
Definition: GVN.cpp:241
SelectInst * getSelectValue() const
Definition: GVN.cpp:280
Value * getSimpleValue() const
Definition: GVN.cpp:265
bool isMemIntrinValue() const
Definition: GVN.cpp:261
MemIntrinsic * getMemIntrinValue() const
Definition: GVN.cpp:275