55 using namespace PatternMatch;
57 #define DEBUG_TYPE "gvn"
59 STATISTIC(NumGVNInstr,
"Number of instructions deleted");
60 STATISTIC(NumGVNLoad,
"Number of loads deleted");
61 STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
62 STATISTIC(NumGVNBlocks,
"Number of blocks merged");
63 STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
64 STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
65 STATISTIC(NumPRELoad,
"Number of loads PRE'd");
74 cl::desc(
"Max recurse depth (default = 1000)"));
89 Expression(uint32_t o = ~2U) : opcode(o) { }
91 bool operator==(
const Expression &other)
const {
92 if (opcode != other.opcode)
94 if (opcode == ~0U || opcode == ~1U)
96 if (type != other.type)
98 if (varargs != other.varargs)
106 Value.varargs.end()));
117 uint32_t nextValueNumber;
120 Expression create_cmp_expression(
unsigned Opcode,
122 Value *LHS, Value *RHS);
124 uint32_t lookup_or_add_call(
CallInst* C);
126 ValueTable() : nextValueNumber(1) { }
127 uint32_t lookup_or_add(Value *V);
128 uint32_t
lookup(Value *V)
const;
130 Value *LHS, Value *RHS);
131 void add(Value *V, uint32_t num);
133 void erase(Value *v);
138 uint32_t getNextUnusedValueNumber() {
return nextValueNumber; }
139 void verifyRemoved(
const Value *)
const;
157 static bool isEqual(
const Expression &LHS,
const Expression &RHS) {
168 Expression ValueTable::create_expression(
Instruction *
I) {
174 e.varargs.push_back(lookup_or_add(*OI));
180 assert(I->
getNumOperands() == 2 &&
"Unsupported commutative instruction!");
181 if (e.varargs[0] > e.varargs[1])
185 if (
CmpInst *C = dyn_cast<CmpInst>(I)) {
188 if (e.varargs[0] > e.varargs[1]) {
192 e.opcode = (
C->getOpcode() << 8) | Predicate;
196 e.varargs.push_back(*II);
202 Expression ValueTable::create_cmp_expression(
unsigned Opcode,
205 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
206 "Not a comparison!");
209 e.varargs.push_back(lookup_or_add(LHS));
210 e.varargs.push_back(lookup_or_add(RHS));
213 if (e.varargs[0] > e.varargs[1]) {
217 e.opcode = (Opcode << 8) | Predicate;
221 Expression ValueTable::create_extractvalue_expression(
ExtractValueInst *EI) {
222 assert(EI &&
"Not an ExtractValueInst?");
233 case Intrinsic::sadd_with_overflow:
234 case Intrinsic::uadd_with_overflow:
235 e.opcode = Instruction::Add;
237 case Intrinsic::ssub_with_overflow:
238 case Intrinsic::usub_with_overflow:
239 e.opcode = Instruction::Sub;
241 case Intrinsic::smul_with_overflow:
242 case Intrinsic::umul_with_overflow:
243 e.opcode = Instruction::Mul;
252 "Expect two args for recognised intrinsics.");
264 e.varargs.push_back(lookup_or_add(*OI));
268 e.varargs.push_back(*II);
279 valueNumbering.insert(std::make_pair(V, num));
282 uint32_t ValueTable::lookup_or_add_call(
CallInst *C) {
283 if (AA->doesNotAccessMemory(C)) {
284 Expression exp = create_expression(C);
285 uint32_t &e = expressionNumbering[exp];
286 if (!e) e = nextValueNumber++;
287 valueNumbering[C] = e;
289 }
else if (AA->onlyReadsMemory(C)) {
290 Expression exp = create_expression(C);
291 uint32_t &e = expressionNumbering[exp];
293 e = nextValueNumber++;
294 valueNumbering[C] = e;
298 e = nextValueNumber++;
299 valueNumbering[C] = e;
306 valueNumbering[C] = nextValueNumber;
307 return nextValueNumber++;
310 if (local_dep.
isDef()) {
314 valueNumbering[C] = nextValueNumber;
315 return nextValueNumber++;
320 uint32_t cd_vn = lookup_or_add(local_cdep->
getArgOperand(i));
322 valueNumbering[C] = nextValueNumber;
323 return nextValueNumber++;
327 uint32_t v = lookup_or_add(local_cdep);
328 valueNumbering[C] = v;
334 MD->getNonLocalCallDependency(
CallSite(C));
340 for (
unsigned i = 0, e = deps.size(); i != e; ++i) {
354 if (NonLocalDepCall && DT->properlyDominates(I->
getBB(), C->
getParent())){
355 cdep = NonLocalDepCall;
364 valueNumbering[C] = nextValueNumber;
365 return nextValueNumber++;
369 valueNumbering[C] = nextValueNumber;
370 return nextValueNumber++;
376 valueNumbering[C] = nextValueNumber;
377 return nextValueNumber++;
381 uint32_t v = lookup_or_add(cdep);
382 valueNumbering[C] = v;
386 valueNumbering[C] = nextValueNumber;
387 return nextValueNumber++;
393 uint32_t ValueTable::lookup_or_add(
Value *V) {
395 if (VI != valueNumbering.
end())
398 if (!isa<Instruction>(V)) {
399 valueNumbering[V] = nextValueNumber;
400 return nextValueNumber++;
407 return lookup_or_add_call(cast<CallInst>(I));
408 case Instruction::Add:
409 case Instruction::FAdd:
410 case Instruction::Sub:
411 case Instruction::FSub:
412 case Instruction::Mul:
413 case Instruction::FMul:
414 case Instruction::UDiv:
415 case Instruction::SDiv:
416 case Instruction::FDiv:
417 case Instruction::URem:
418 case Instruction::SRem:
419 case Instruction::FRem:
420 case Instruction::Shl:
421 case Instruction::LShr:
422 case Instruction::AShr:
426 case Instruction::ICmp:
427 case Instruction::FCmp:
428 case Instruction::Trunc:
429 case Instruction::ZExt:
430 case Instruction::SExt:
431 case Instruction::FPToUI:
432 case Instruction::FPToSI:
433 case Instruction::UIToFP:
434 case Instruction::SIToFP:
435 case Instruction::FPTrunc:
436 case Instruction::FPExt:
437 case Instruction::PtrToInt:
438 case Instruction::IntToPtr:
439 case Instruction::BitCast:
442 case Instruction::InsertElement:
443 case Instruction::ShuffleVector:
444 case Instruction::InsertValue:
445 case Instruction::GetElementPtr:
446 exp = create_expression(I);
448 case Instruction::ExtractValue:
449 exp = create_extractvalue_expression(cast<ExtractValueInst>(I));
452 valueNumbering[V] = nextValueNumber;
453 return nextValueNumber++;
456 uint32_t& e = expressionNumbering[exp];
457 if (!e) e = nextValueNumber++;
458 valueNumbering[V] = e;
466 assert(VI != valueNumbering.
end() &&
"Value not numbered?");
474 uint32_t ValueTable::lookup_or_add_cmp(
unsigned Opcode,
477 Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS);
478 uint32_t& e = expressionNumbering[exp];
479 if (!e) e = nextValueNumber++;
484 void ValueTable::clear() {
485 valueNumbering.clear();
486 expressionNumbering.clear();
491 void ValueTable::erase(
Value *V) {
492 valueNumbering.erase(V);
497 void ValueTable::verifyRemoved(
const Value *V)
const {
499 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++
I) {
500 assert(I->first != V &&
"Inst still occurs in value numbering map!");
510 struct AvailableValueInBlock {
528 unsigned Offset = 0) {
529 AvailableValueInBlock Res;
531 Res.Val.setPointer(V);
532 Res.Val.setInt(SimpleVal);
538 unsigned Offset = 0) {
539 AvailableValueInBlock Res;
541 Res.Val.setPointer(MI);
542 Res.Val.setInt(MemIntrin);
548 unsigned Offset = 0) {
549 AvailableValueInBlock Res;
551 Res.Val.setPointer(LI);
552 Res.Val.setInt(LoadVal);
557 static AvailableValueInBlock getUndef(
BasicBlock *BB) {
558 AvailableValueInBlock Res;
560 Res.Val.setPointer(
nullptr);
561 Res.Val.setInt(UndefVal);
566 bool isSimpleValue()
const {
return Val.getInt() == SimpleVal; }
567 bool isCoercedLoadValue()
const {
return Val.getInt() == LoadVal; }
568 bool isMemIntrinValue()
const {
return Val.getInt() == MemIntrin; }
569 bool isUndefValue()
const {
return Val.getInt() == UndefVal; }
571 Value *getSimpleValue()
const {
572 assert(isSimpleValue() &&
"Wrong accessor");
573 return Val.getPointer();
576 LoadInst *getCoercedLoadValue()
const {
577 assert(isCoercedLoadValue() &&
"Wrong accessor");
578 return cast<LoadInst>(Val.getPointer());
582 assert(isMemIntrinValue() &&
"Wrong accessor");
583 return cast<MemIntrinsic>(Val.getPointer());
588 Value *MaterializeAdjustedValue(
LoadInst *LI, GVN &gvn)
const;
603 struct LeaderTableEntry {
606 LeaderTableEntry *Next;
619 explicit GVN(
bool noloads =
false)
624 bool runOnFunction(
Function &
F)
override;
630 InstrsToErase.push_back(I);
634 AliasAnalysis *getAliasAnalysis()
const {
return VN.getAliasAnalysis(); }
639 LeaderTableEntry &Curr = LeaderTable[
N];
646 LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
649 Node->Next = Curr.Next;
656 LeaderTableEntry* Prev =
nullptr;
657 LeaderTableEntry* Curr = &LeaderTable[
N];
659 while (Curr && (Curr->Val != I || Curr->BB != BB)) {
668 Prev->Next = Curr->Next;
674 LeaderTableEntry* Next = Curr->Next;
675 Curr->Val = Next->Val;
677 Curr->Next = Next->Next;
701 bool processNonLocalLoad(
LoadInst *L);
702 void AnalyzeLoadAvailability(
LoadInst *LI, LoadDepVect &Deps,
703 AvailValInBlkVect &ValuesPerBlock,
704 UnavailBlkVect &UnavailableBlocks);
705 bool PerformLoadPRE(
LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
706 UnavailBlkVect &UnavailableBlocks);
718 void cleanupGlobalSets();
720 bool splitCriticalEdges();
725 void assignValNumForDeadCode();
733 return new GVN(NoLoads);
744 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
748 E = d.
end(); I != E; ++
I) {
749 errs() << I->first <<
"\n";
768 uint32_t RecurseDepth) {
774 std::pair<DenseMap<BasicBlock*, char>::iterator,
char> IV =
775 FullyAvailableBlocks.
insert(std::make_pair(BB, 2));
781 if (IV.first->second == 2)
782 IV.first->second = 3;
783 return IV.first->second != 0;
791 goto SpeculationFailure;
793 for (; PI != PE; ++PI)
798 goto SpeculationFailure;
806 char &BBVal = FullyAvailableBlocks[BB];
824 char &EntryVal = FullyAvailableBlocks[Entry];
825 if (EntryVal == 0)
continue;
831 }
while (!BBWorklist.
empty());
875 if (StoreSize == LoadSize) {
887 Type *TypeToCastTo = LoadedTy;
891 if (StoredValTy != TypeToCastTo)
904 assert(StoreSize >= LoadSize &&
"CanCoerceMustAliasedValueToLoad fail");
921 StoredVal = IRB.
CreateLShr(StoredVal, StoreSize - LoadSize,
"tmp");
926 StoredVal = IRB.
CreateTrunc(StoredVal, NewIntTy,
"trunc");
928 if (LoadedTy == NewIntTy)
949 uint64_t WriteSizeInBits,
956 int64_t StoreOffset = 0, LoadOffset = 0;
960 if (StoreBase != LoadBase)
968 if (LoadOffset == StoreOffset) {
969 dbgs() <<
"STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
970 <<
"Base = " << *StoreBase <<
"\n"
971 <<
"Store Ptr = " << *WritePtr <<
"\n"
972 <<
"Store Offs = " << StoreOffset <<
"\n"
973 <<
"Load Ptr = " << *LoadPtr <<
"\n";
983 if ((WriteSizeInBits & 7) | (LoadSize & 7))
985 uint64_t StoreSize = WriteSizeInBits >> 3;
989 bool isAAFailure =
false;
990 if (StoreOffset < LoadOffset)
991 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
993 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
997 dbgs() <<
"STORE LOAD DEP WITH COMMON BASE:\n"
998 <<
"Base = " << *StoreBase <<
"\n"
999 <<
"Store Ptr = " << *WritePtr <<
"\n"
1000 <<
"Store Offs = " << StoreOffset <<
"\n"
1001 <<
"Load Ptr = " << *LoadPtr <<
"\n";
1011 if (StoreOffset > LoadOffset ||
1012 StoreOffset+StoreSize < LoadOffset+LoadSize)
1017 return LoadOffset-StoreOffset;
1033 StorePtr, StoreSize, DL);
1048 if (R != -1)
return R;
1052 int64_t LoadOffs = 0;
1053 const Value *LoadBase =
1058 LoadBase, LoadOffs, LoadSize, DepLI);
1059 if (Size == 0)
return -1;
1071 if (!SizeCst)
return -1;
1086 if (!Src)
return -1;
1093 MI->
getDest(), MemSizeInBits, DL);
1097 unsigned AS = Src->getType()->getPointerAddressSpace();
1139 ShiftAmt = Offset*8;
1141 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1144 SrcVal = Builder.
CreateLShr(SrcVal, ShiftAmt);
1146 if (LoadSize != StoreSize)
1165 if (Offset+LoadSize > SrcValSize) {
1166 assert(SrcVal->
isSimple() &&
"Cannot widen volatile/atomic load!");
1170 unsigned NewLoadSize = Offset+LoadSize;
1184 Builder.SetCurrentDebugLocation(SrcVal->
getDebugLoc());
1185 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
1186 LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
1190 DEBUG(
dbgs() <<
"GVN WIDENED LOAD: " << *SrcVal <<
"\n");
1191 DEBUG(
dbgs() <<
"TO: " << *NewLoad <<
"\n");
1195 Value *RV = NewLoad;
1197 RV = Builder.CreateLShr(RV,
1199 RV = Builder.CreateTrunc(RV, SrcVal->
getType());
1207 gvn.getMemDep().removeInstruction(SrcVal);
1227 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1230 Value *Val = MSI->getValue();
1234 Value *OneElt = Val;
1237 for (
unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1239 if (NumBytesSet*2 <= LoadSize) {
1241 Val = Builder.
CreateOr(Val, ShVal);
1248 Val = Builder.
CreateOr(OneElt, ShVal);
1281 if (ValuesPerBlock.
size() == 1 &&
1282 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
1284 assert(!ValuesPerBlock[0].isUndefValue() &&
"Dead BB dominate this block");
1285 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
1293 for (
unsigned i = 0, e = ValuesPerBlock.
size(); i != e; ++i) {
1294 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1312 for (
unsigned i = 0, e = NewPHIs.
size(); i != e; ++i) {
1324 Value *AvailableValueInBlock::MaterializeAdjustedValue(
LoadInst *LI,
1329 if (isSimpleValue()) {
1330 Res = getSimpleValue();
1331 if (Res->
getType() != LoadTy) {
1334 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL VAL:\nOffset: " << Offset <<
" "
1335 << *getSimpleValue() <<
'\n'
1336 << *Res <<
'\n' <<
"\n\n\n");
1338 }
else if (isCoercedLoadValue()) {
1340 if (Load->
getType() == LoadTy && Offset == 0) {
1346 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset <<
" "
1347 << *getCoercedLoadValue() <<
'\n'
1348 << *Res <<
'\n' <<
"\n\n\n");
1350 }
else if (isMemIntrinValue()) {
1353 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1354 <<
" " << *getMemIntrinValue() <<
'\n'
1355 << *Res <<
'\n' <<
"\n\n\n");
1357 assert(isUndefValue() &&
"Should be UndefVal");
1358 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL Undef:\n";);
1365 if (
const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1366 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1370 void GVN::AnalyzeLoadAvailability(
LoadInst *LI, LoadDepVect &Deps,
1371 AvailValInBlkVect &ValuesPerBlock,
1372 UnavailBlkVect &UnavailableBlocks) {
1378 unsigned NumDeps = Deps.size();
1380 for (
unsigned i = 0, e = NumDeps; i != e; ++i) {
1384 if (DeadBlocks.count(DepBB)) {
1387 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1392 UnavailableBlocks.push_back(DepBB);
1410 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1411 DepSI->getValueOperand(),
1425 if (DepLI != LI && Address) {
1430 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
1444 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1451 UnavailableBlocks.push_back(DepBB);
1463 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1470 ValuesPerBlock.push_back(AvailableValueInBlock::get(
1475 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1478 if (S->getValueOperand()->getType() != LI->
getType()) {
1483 UnavailableBlocks.push_back(DepBB);
1488 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1489 S->getValueOperand()));
1493 if (
LoadInst *
LD = dyn_cast<LoadInst>(DepInst)) {
1499 UnavailableBlocks.push_back(DepBB);
1503 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,
LD));
1507 UnavailableBlocks.push_back(DepBB);
1511 bool GVN::PerformLoadPRE(
LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1512 UnavailBlkVect &UnavailableBlocks) {
1522 for (
unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1523 Blockers.
insert(UnavailableBlocks[i]);
1532 if (TmpBB == LoadBB)
1534 if (Blockers.
count(TmpBB))
1553 for (
unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1554 FullyAvailableBlocks[ValuesPerBlock[i].BB] =
true;
1555 for (
unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1556 FullyAvailableBlocks[UnavailableBlocks[i]] =
false;
1568 DEBUG(
dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1569 << Pred->
getName() <<
"': " << *LI <<
'\n');
1575 <<
"COULD NOT PRE LOAD BECAUSE OF LANDING PAD CRITICAL EDGE '"
1576 << Pred->
getName() <<
"': " << *LI <<
'\n');
1583 PredLoads[Pred] =
nullptr;
1588 unsigned NumUnavailablePreds = PredLoads.
size() + CriticalEdgePred.
size();
1589 assert(NumUnavailablePreds != 0 &&
1590 "Fully available value should already be eliminated!");
1596 if (NumUnavailablePreds != 1)
1600 for (
BasicBlock *OrigPred : CriticalEdgePred) {
1601 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1602 assert(!PredLoads.
count(OrigPred) &&
"Split edges shouldn't be in map!");
1603 PredLoads[NewPred] =
nullptr;
1604 DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1605 << LoadBB->
getName() <<
'\n');
1609 bool CanDoPRE =
true;
1612 for (
auto &PredLoad : PredLoads) {
1613 BasicBlock *UnavailablePred = PredLoad.first;
1622 Value *LoadPtr =
nullptr;
1623 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1629 DEBUG(
dbgs() <<
"COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1635 PredLoad.second = LoadPtr;
1639 while (!NewInsts.
empty()) {
1641 if (MD) MD->removeInstruction(I);
1646 return !CriticalEdgePred.
empty();
1652 DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *LI <<
'\n');
1654 dbgs() <<
"INSERTED " << NewInsts.
size() <<
" INSTS: "
1655 << *NewInsts.
back() <<
'\n');
1658 for (
unsigned i = 0, e = NewInsts.
size(); i != e; ++i) {
1663 VN.lookup_or_add(NewInsts[i]);
1666 for (
const auto &PredLoad : PredLoads) {
1667 BasicBlock *UnavailablePred = PredLoad.first;
1668 Value *LoadPtr = PredLoad.second;
1684 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1686 MD->invalidateCachedPointerInfo(LoadPtr);
1687 DEBUG(
dbgs() <<
"GVN INSERTED " << *NewLoad <<
'\n');
1693 if (isa<PHINode>(V))
1698 MD->invalidateCachedPointerInfo(V);
1699 markInstructionForDeletion(LI);
1706 bool GVN::processNonLocalLoad(
LoadInst *LI) {
1709 MD->getNonLocalPointerDependency(LI, Deps);
1714 unsigned NumDeps = Deps.size();
1721 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1723 dbgs() <<
"GVN: non-local load ";
1725 dbgs() <<
" has unknown dependencies\n";
1733 OE =
GEP->idx_end();
1735 if (
Instruction *I = dyn_cast<Instruction>(OI->get()))
1736 performScalarPRE(I);
1740 AvailValInBlkVect ValuesPerBlock;
1741 UnavailBlkVect UnavailableBlocks;
1742 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1746 if (ValuesPerBlock.empty())
1754 if (UnavailableBlocks.empty()) {
1755 DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *LI <<
'\n');
1761 if (isa<PHINode>(V))
1766 MD->invalidateCachedPointerInfo(V);
1767 markInstructionForDeletion(LI);
1776 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1788 if (
Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
1798 static const unsigned KnownIDs[] = {
1817 bool GVN::processLoad(
LoadInst *L) {
1825 markInstructionForDeletion(L);
1846 Value *AvailVal =
nullptr;
1882 << *AvailVal <<
'\n' << *L <<
"\n\n\n");
1887 MD->invalidateCachedPointerInfo(AvailVal);
1888 markInstructionForDeletion(L);
1898 dbgs() <<
"GVN: load ";
1901 dbgs() <<
" is clobbered by " << *I <<
'\n';
1908 return processNonLocalLoad(L);
1913 dbgs() <<
"GVN: load ";
1915 dbgs() <<
" has unknown dependence\n";
1921 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1922 Value *StoredVal = DepSI->getValueOperand();
1934 DEBUG(
dbgs() <<
"GVN COERCED STORE:\n" << *DepSI <<
'\n' << *StoredVal
1935 <<
'\n' << *L <<
"\n\n\n");
1941 MD->invalidateCachedPointerInfo(StoredVal);
1942 markInstructionForDeletion(L);
1947 if (
LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1948 Value *AvailableVal = DepLI;
1953 if (DepLI->getType() != L->
getType()) {
1960 DEBUG(
dbgs() <<
"GVN COERCED LOAD:\n" << *DepLI <<
"\n" << *AvailableVal
1961 <<
"\n" << *L <<
"\n\n\n");
1966 if (DepLI->getType()->getScalarType()->isPointerTy())
1967 MD->invalidateCachedPointerInfo(DepLI);
1968 markInstructionForDeletion(L);
1978 markInstructionForDeletion(L);
1986 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1988 markInstructionForDeletion(L);
1998 markInstructionForDeletion(L);
2012 LeaderTableEntry Vals = LeaderTable[num];
2013 if (!Vals.Val)
return nullptr;
2015 Value *Val =
nullptr;
2016 if (DT->dominates(Vals.BB, BB)) {
2018 if (isa<Constant>(Val))
return Val;
2021 LeaderTableEntry* Next = Vals.Next;
2023 if (DT->dominates(Next->BB, BB)) {
2024 if (isa<Constant>(Next->Val))
return Next->Val;
2025 if (!Val) Val = Next->Val;
2046 assert((!Pred || Pred == Src) &&
"No edge between these basic blocks!");
2048 return Pred !=
nullptr;
2054 bool GVN::propagateEquality(
Value *LHS,
Value *RHS,
2057 Worklist.
push_back(std::make_pair(LHS, RHS));
2058 bool Changed =
false;
2063 while (!Worklist.
empty()) {
2064 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2065 LHS = Item.first; RHS = Item.second;
2067 if (LHS == RHS)
continue;
2068 assert(LHS->
getType() == RHS->
getType() &&
"Equality but unequal types!");
2071 if (isa<Constant>(LHS) && isa<Constant>(RHS))
continue;
2074 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2076 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2082 uint32_t LVN = VN.lookup_or_add(LHS);
2083 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2084 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2087 uint32_t RVN = VN.lookup_or_add(RHS);
2103 if (RootDominatesEnd && !isa<Instruction>(RHS))
2104 addToLeaderTable(LVN, RHS, Root.
getEnd());
2111 Changed |= NumReplacements > 0;
2112 NumGVNEqProp += NumReplacements;
2128 bool isKnownFalse = !isKnownTrue;
2135 Worklist.
push_back(std::make_pair(A, RHS));
2136 Worklist.
push_back(std::make_pair(B, RHS));
2143 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2144 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2150 Worklist.
push_back(std::make_pair(Op0, Op1));
2164 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->
isZero())
2165 Worklist.
push_back(std::make_pair(Op0, Op1));
2174 uint32_t NextNum = VN.getNextUnusedValueNumber();
2175 uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2178 if (Num < NextNum) {
2180 if (NotCmp && isa<Instruction>(NotCmp)) {
2181 unsigned NumReplacements =
2183 Changed |= NumReplacements > 0;
2184 NumGVNEqProp += NumReplacements;
2191 if (RootDominatesEnd)
2192 addToLeaderTable(Num, NotVal, Root.
getEnd());
2205 if (isa<DbgInfoIntrinsic>(I))
2216 MD->invalidateCachedPointerInfo(V);
2217 markInstructionForDeletion(I);
2222 if (
LoadInst *LI = dyn_cast<LoadInst>(I)) {
2223 if (processLoad(LI))
2226 unsigned Num = VN.lookup_or_add(LI);
2227 addToLeaderTable(Num, LI, LI->
getParent());
2233 if (
BranchInst *BI = dyn_cast<BranchInst>(I)) {
2234 if (!BI->isConditional())
2237 if (isa<Constant>(BI->getCondition()))
2238 return processFoldableCondBr(BI);
2240 Value *BranchCond = BI->getCondition();
2244 if (TrueSucc == FalseSucc)
2248 bool Changed =
false;
2252 Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
2256 Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
2262 if (
SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2263 Value *SwitchCond =
SI->getCondition();
2265 bool Changed =
false;
2269 for (
unsigned i = 0, n =
SI->getNumSuccessors(); i != n; ++i)
2270 ++SwitchEdges[
SI->getSuccessor(i)];
2276 if (SwitchEdges.
lookup(Dst) == 1) {
2278 Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
2288 uint32_t NextNum = VN.getNextUnusedValueNumber();
2289 unsigned Num = VN.lookup_or_add(I);
2293 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
2294 addToLeaderTable(Num, I, I->
getParent());
2301 if (Num >= NextNum) {
2302 addToLeaderTable(Num, I, I->
getParent());
2311 addToLeaderTable(Num, I, I->
getParent());
2318 MD->invalidateCachedPointerInfo(repl);
2319 markInstructionForDeletion(I);
2325 if (skipOptnoneFunction(F))
2329 MD = &getAnalysis<MemoryDependenceAnalysis>();
2330 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2331 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2332 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
2333 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2337 bool Changed =
false;
2338 bool ShouldContinue =
true;
2346 BB, DT,
nullptr, VN.getAliasAnalysis(), MD);
2347 if (removedBlock) ++NumGVNBlocks;
2349 Changed |= removedBlock;
2352 unsigned Iteration = 0;
2353 while (ShouldContinue) {
2354 DEBUG(
dbgs() <<
"GVN iteration: " << Iteration <<
"\n");
2355 ShouldContinue = iterateOnFunction(F);
2356 Changed |= ShouldContinue;
2363 assignValNumForDeadCode();
2364 bool PREChanged =
true;
2365 while (PREChanged) {
2366 PREChanged = performPRE(F);
2367 Changed |= PREChanged;
2376 cleanupGlobalSets();
2388 assert(InstrsToErase.empty() &&
2389 "We expect InstrsToErase to be empty across iterations");
2390 if (DeadBlocks.count(BB))
2393 bool ChangedFunction =
false;
2398 if (InstrsToErase.empty()) {
2404 NumGVNInstr += InstrsToErase.size();
2407 bool AtStart = BI == BB->
begin();
2412 E = InstrsToErase.end(); I != E; ++
I) {
2413 DEBUG(
dbgs() <<
"GVN removed: " << **I <<
'\n');
2414 if (MD) MD->removeInstruction(*I);
2415 DEBUG(verifyRemoved(*I));
2416 (*I)->eraseFromParent();
2418 InstrsToErase.clear();
2426 return ChangedFunction;
2431 unsigned int ValNo) {
2439 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2442 if (
Value *V = findLeader(Pred, VN.lookup(Op))) {
2459 VN.add(Instr, ValNo);
2462 addToLeaderTable(ValNo, Instr, Pred);
2466 bool GVN::performScalarPRE(
Instruction *CurInst) {
2469 if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
2472 isa<DbgInfoIntrinsic>(CurInst))
2479 if (isa<CmpInst>(CurInst))
2483 if (
CallInst *CallI = dyn_cast<CallInst>(CurInst))
2484 if (CallI->isInlineAsm())
2487 uint32_t ValNo = VN.lookup(CurInst);
2495 unsigned NumWith = 0;
2496 unsigned NumWithout = 0;
2507 if (P == CurrentBlock) {
2510 }
else if (!DT->isReachableFromEntry(P)) {
2515 Value *predV = findLeader(P, ValNo);
2517 predMap.
push_back(std::make_pair(static_cast<Value *>(
nullptr), P));
2520 }
else if (predV == CurInst) {
2525 predMap.
push_back(std::make_pair(predV, P));
2532 if (NumWithout > 1 || NumWith == 0)
2540 if (NumWithout != 0) {
2550 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
2554 PREInstr = CurInst->
clone();
2555 if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) {
2557 DEBUG(verifyRemoved(PREInstr));
2565 assert (PREInstr !=
nullptr || NumWithout == 0);
2572 CurInst->
getName() +
".pre-phi", CurrentBlock->
begin());
2573 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
2574 if (
Value *V = predMap[i].first)
2581 addToLeaderTable(ValNo, Phi, CurrentBlock);
2590 VN.getAliasAnalysis()->addEscapingUse(Phi->
getOperandUse(jj));
2594 MD->invalidateCachedPointerInfo(Phi);
2597 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2599 DEBUG(
dbgs() <<
"GVN PRE removed: " << *CurInst <<
'\n');
2601 MD->removeInstruction(CurInst);
2602 DEBUG(verifyRemoved(CurInst));
2611 bool GVN::performPRE(
Function &F) {
2612 bool Changed =
false;
2623 BE = CurrentBlock->
end();
2626 Changed = performScalarPRE(CurInst);
2630 if (splitCriticalEdges())
2642 MD->invalidateCachedPredecessors();
2648 bool GVN::splitCriticalEdges() {
2649 if (toSplit.empty())
2652 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2655 }
while (!toSplit.empty());
2656 if (MD) MD->invalidateCachedPredecessors();
2661 bool GVN::iterateOnFunction(
Function &F) {
2662 cleanupGlobalSets();
2665 bool Changed =
false;
2669 std::vector<BasicBlock *> BBVect;
2670 BBVect.reserve(256);
2676 BBVect.push_back(*RI);
2678 for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end();
2680 Changed |= processBlock(*I);
2685 void GVN::cleanupGlobalSets() {
2687 LeaderTable.clear();
2688 TableAllocator.Reset();
2693 void GVN::verifyRemoved(
const Instruction *Inst)
const {
2694 VN.verifyRemoved(Inst);
2699 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++
I) {
2700 const LeaderTableEntry *Node = &I->second;
2701 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2703 while (Node->Next) {
2705 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2719 while (!NewDead.
empty()) {
2721 if (DeadBlocks.count(D))
2726 DT->getDescendants(D, Dom);
2731 E = Dom.
end(); I != E; I++) {
2735 if (DeadBlocks.count(S))
2738 bool AllPredDead =
true;
2740 if (!DeadBlocks.count(*PI)) {
2741 AllPredDead =
false;
2764 if (DeadBlocks.count(B))
2769 PE = Preds.end(); PI != PE; PI++) {
2772 if (!DeadBlocks.count(P))
2776 if (
BasicBlock *S = splitCriticalEdges(P, B))
2777 DeadBlocks.insert(P = S);
2781 PHINode &Phi = cast<PHINode>(*II);
2802 bool GVN::processFoldableCondBr(
BranchInst *BI) {
2816 if (DeadBlocks.count(DeadRoot))
2820 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
2822 addDeadBlock(DeadRoot);
2830 void GVN::assignValNumForDeadCode() {
2832 E = DeadBlocks.end(); I != E; I++) {
2837 unsigned ValNum = VN.lookup_or_add(Inst);
2838 addToLeaderTable(ValNum, Inst, BB);
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
const Use & getOperandUse(unsigned i) const
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Value * getValueOperand()
iplist< Instruction >::iterator eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing basic block and deletes it...
FunctionPass * createGVNPass(bool NoLoads=false)
static cl::opt< bool > EnableLoadPRE("enable-load-pre", cl::init(true))
void push_back(const T &Elt)
A parsed version of the target data layout string in and methods for querying it. ...
static ConstantInt * getFalse(LLVMContext &Context)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This class is the base class for the comparison instructions.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Helper class for SSA formation on a set of values defined in multiple blocks.
void addIncoming(Value *V, BasicBlock *BB)
addIncoming - Add an incoming value to the end of the PHI list
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
bool isDef() const
isDef - Return true if this MemDepResult represents a query that is an instruction definition depende...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, const DataLayout &DL)
STATISTIC(NumFunctions,"Total number of functions")
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Intrinsic::ID getIntrinsicID() const
getIntrinsicID - Return the intrinsic ID of this intrinsic.
const BasicBlock * getStart() const
unsigned getNumOperands() const
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value...
static bool isEqual(const Expression &LHS, const Expression &RHS)
CallInst - This class represents a function call, abstracting a target machine's calling convention...
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
An immutable pass that tracks lazily created AssumptionCache objects.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
PointerType::get - This constructs a pointer to an object of the specified type in a numbered address...
bool mayHaveSideEffects() const
mayHaveSideEffects - Return true if the instruction may have side effects.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
A cache of .assume calls within a function.
1 1 1 0 True if unordered or not equal
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering memory ...
MemSetInst - This class wraps the llvm.memset intrinsic.
This class implements a map that also provides access to all stored values in a deterministic order...
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned GetSuccessorNumber(BasicBlock *BB, BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
LoadInst - an instruction for reading from memory.
static IntegerType * getInt64Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Expression getEmptyKey()
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
iterator end()
Get an iterator to the end of the SetVector.
bool isClobber() const
isClobber - Return true if this MemDepResult represents a query that is an instruction clobber depend...
This file defines the MallocAllocator and BumpPtrAllocator interfaces.
static Constant * getNullValue(Type *Ty)
StringRef getName() const
Return a constant reference to the value's name.
iterator begin()
Instruction iterator methods.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static unsigned getHashValue(const Expression e)
static unsigned getOperandNumForIncomingValue(unsigned i)
bool match(Val *V, const Pattern &P)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
bool isUnconditional() const
static cl::opt< uint32_t > MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, cl::desc("Max recurse depth (default = 1000)"))
static cl::opt< bool > EnablePRE("enable-pre", cl::init(true), cl::Hidden)
Option class for critical edge splitting.
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val()
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Constant * ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
unsigned getNumArgOperands() const
getNumArgOperands - Return the number of call arguments.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being clobbered by another...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
MemoryDependenceAnalysis - This is an analysis that determines, for a given memory operation...
void setName(const Twine &Name)
Change the name of the value.
static ConstantInt * ExtractElement(Constant *V, Constant *Idx)
Instruction * clone() const
clone() - Create a copy of 'this' instruction that is identical in all ways except the following: ...
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static void patchReplacementInstruction(Instruction *I, Value *Repl)
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool mayReadFromMemory() const
mayReadFromMemory - Return true if this instruction may read memory.
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if it can be expressed as a b...
static bool processInstruction(Loop &L, Instruction &Inst, DominatorTree &DT, const SmallVectorImpl< BasicBlock * > &ExitBlocks, PredIteratorCache &PredCache, LoopInfo *LI)
Given an instruction in the loop, check to see if it has any uses that are outside the current loop...
bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, AliasAnalysis *AA=nullptr, MemoryDependenceAnalysis *MemDep=nullptr)
MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor, if possible.
static bool add(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len)
This function adds the integer array x to the integer array Y and places the result in dest...
BasicBlock * getSuccessor(unsigned i) const
iterator begin()
Get an iterator to the beginning of the SetVector.
virtual void addEscapingUse(Use &U)
addEscapingUse - This method should be used whenever an escaping use is added to a pointer value...
hash_code hash_value(const APFloat &Arg)
See friend declarations above.
static Value * GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering mem int...
bool isLittleEndian() const
Layout endianness...
StoreInst - an instruction for storing to memory.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool isArrayTy() const
isArrayTy - True if this is an instance of ArrayType.
void takeName(Value *V)
Transfer the name from V to this value.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
static Value * GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn)
This function is called when we have a memdep query of a load that ends up being a clobbering load...
unsigned getNumIncomingValues() const
getNumIncomingValues - Return the number of incoming edges
Interval::succ_iterator succ_end(Interval *I)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
GetElementPtrInst - an instruction for type-safe pointer arithmetic to access elements of arrays and ...
BasicBlock * SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
SplitCriticalEdge - If this edge is a critical edge, insert a new node to split the critical edge...
initializer< Ty > init(const Ty &Val)
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block...
static Value * GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering store...
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
void setDebugLoc(DebugLoc Loc)
setDebugLoc - Set the debug location information for this instruction.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
void setAAMetadata(const AAMDNodes &N)
setAAMetadata - Sets the metadata on this instruction from the AAMDNodes structure.
LLVM Basic Block Representation.
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
The instances of the Type class are immutable: once they are created, they are never changed...
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
This is an important class for using LLVM in a threaded context.
Allocate memory in an ever growing pool, as if by bump-pointer.
BranchInst - Conditional or Unconditional Branch instruction.
static Value * ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVN &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate LI...
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
This is an important base class in LLVM.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Represent the analysis usage information of a pass.
BasicBlock * getBB() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
FunctionPass class - This class is used to implement most global optimizations.
Value * getOperand(unsigned i) const
Interval::pred_iterator pred_end(Interval *I)
Value * getPointerOperand()
bool isCommutative() const
isCommutative - Return true if the instruction is commutative:
const MemDepResult & getResult() const
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void setAlignment(unsigned Align)
#define INITIALIZE_AG_DEPENDENCY(depName)
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
uint64_t NextPowerOf2(uint64_t A)
NextPowerOf2 - Returns the next power of two (in 64-bits) that is strictly greater than A...
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
GetUnderlyingObject - This method strips off any GEP address adjustments and pointer casts from the s...
std::vector< NodeType * >::reverse_iterator rpo_iterator
MemDepResult - A memory dependence query can return one of three different answers, described below.
const BasicBlock * getEnd() const
void dump() const
Support for debugging, callable in GDB: V->dump()
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A SetVector that performs no allocations if smaller than a certain size.
MemIntrinsic - This is the common base class for memset/memcpy/memmove.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
This is the shared class of boolean and integer constants.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType will succeed.
Value * getDest() const
getDest - This is just like getRawDest, but it strips off any cast instructions that feed it...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Value * getLength() const
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
bool isNonLocal() const
isNonLocal - Return true if this MemDepResult represents a query that is transparent to the start of ...
std::vector< NonLocalDepEntry > NonLocalDepInfo
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
const BasicBlock & getEntryBlock() const
static ConstantInt * getTrue(LLVMContext &Context)
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAllOnesValue() const
isAllOnesValue - Return true if this is the value that would be returned by getAllOnesValue.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
size_type count(const KeyT &Key) const
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
BasicBlock * getSinglePredecessor()
Return the predecessor of this block if it has a single predecessor block.
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
An opaque object representing a hash code.
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
iterator insert(iterator I, T &&Elt)
const Type * getScalarType() const LLVM_READONLY
getScalarType - If this is a vector type, return the element type, otherwise return 'this'...
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
bool isStructTy() const
isStructTy - True if this is an instance of StructType.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * getSource() const
getSource - This is just like getRawSource, but it strips off any cast instructions that feed it...
static bool isLifetimeStart(const Instruction *Inst)
static const uint16_t * lookup(unsigned opcode, unsigned domain)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Instruction * getInst() const
getInst() - If this is a normal dependency, return the instruction that is depended on...
Value * getCondition() const
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
getAAMetadata - Fills the AAMDNodes structure with AA metadata from this instruction.
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isLandingPad() const
Return true if this basic block is a landing pad.
bool hasOneUse() const
Return true if there is exactly one user of this value.
iterator find(const KeyT &Val)
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
iterator_range< df_iterator< T > > depth_first(const T &G)
static Expression getTombstoneKey()
SwitchInst - Multiway switch.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI)
This function is called when we have a memdep query of a load that ends up being a clobbering store...
LLVMContext & getContext() const
Get the context in which this basic block lives.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, char > &FullyAvailableBlocks, uint32_t RecurseDepth)
Return true if we can prove that the value we're analyzing is fully available in the specified block...
0 0 0 1 True if ordered and equal
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
LLVM Value Representation.
vector_type::const_iterator iterator
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
A vector that has set insertion semantics.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
static Value * CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, IRBuilder<> &IRB, const DataLayout &DL)
If we saw a store of a value to memory, and then a load from a must-aliased pointer of a different ty...
C - The default llvm calling convention, compatible with C.
bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
NonLocalDepEntry - This is an entry in the NonLocalDepInfo cache.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
SimplifyInstruction - See if we can compute a simplified version of this instruction.
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Legacy analysis pass which computes a DominatorTree.
bool operator==(uint64_t V1, const APInt &V2)
void setIncomingValue(unsigned i, Value *V)
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI)
getLoadLoadClobberFullWidthSize - This is a little bit of analysis that looks at a memory location fo...
Value * getPointerOperand()
int getBasicBlockIndex(const BasicBlock *BB) const
getBasicBlockIndex - Return the first index of the specified basic block in the value list for this P...
static IntegerType * getInt8Ty(LLVMContext &C)
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs)
Combine the metadata of two instructions so that K can replace J.
const BasicBlock * getParent() const
InstListType::iterator iterator
Instruction iterators...
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
bool isVoidTy() const
isVoidTy - Return true if this is 'void'.
InsertValueInst - This instruction inserts a struct field of array element value into an aggregate va...
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
void initializeGVNPass(PassRegistry &)