56 using namespace llvm::gvn;
57 using namespace PatternMatch;
59 #define DEBUG_TYPE "gvn"
61 STATISTIC(NumGVNInstr,
"Number of instructions deleted");
62 STATISTIC(NumGVNLoad,
"Number of loads deleted");
63 STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
64 STATISTIC(NumGVNBlocks,
"Number of blocks merged");
65 STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
66 STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
67 STATISTIC(NumPRELoad,
"Number of loads PRE'd");
76 cl::desc(
"Max recurse depth (default = 1000)"));
86 if (opcode != other.
opcode)
88 if (opcode == ~0U || opcode == ~1U)
90 if (type != other.
type)
141 Res.
Val.setPointer(V);
142 Res.
Val.setInt(SimpleVal);
149 Res.
Val.setPointer(MI);
150 Res.
Val.setInt(MemIntrin);
157 Res.
Val.setPointer(LI);
158 Res.
Val.setInt(LoadVal);
165 Res.
Val.setPointer(
nullptr);
166 Res.
Val.setInt(UndefVal);
177 assert(isSimpleValue() &&
"Wrong accessor");
178 return Val.getPointer();
182 assert(isCoercedLoadValue() &&
"Wrong accessor");
183 return cast<LoadInst>(Val.getPointer());
187 assert(isMemIntrinValue() &&
"Wrong accessor");
188 return cast<MemIntrinsic>(Val.getPointer());
209 Res.
AV = std::move(AV);
224 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
238 e.varargs.push_back(lookupOrAdd(*OI));
245 if (e.varargs[0] > e.varargs[1])
249 if (
CmpInst *
C = dyn_cast<CmpInst>(I)) {
252 if (e.varargs[0] > e.varargs[1]) {
256 e.opcode = (
C->getOpcode() << 8) | Predicate;
260 e.varargs.push_back(*II);
269 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
270 "Not a comparison!");
273 e.varargs.push_back(lookupOrAdd(LHS));
274 e.varargs.push_back(lookupOrAdd(RHS));
277 if (e.varargs[0] > e.varargs[1]) {
281 e.opcode = (Opcode << 8) | Predicate;
286 assert(EI &&
"Not an ExtractValueInst?");
297 case Intrinsic::sadd_with_overflow:
298 case Intrinsic::uadd_with_overflow:
301 case Intrinsic::ssub_with_overflow:
302 case Intrinsic::usub_with_overflow:
303 e.opcode = Instruction::Sub;
305 case Intrinsic::smul_with_overflow:
306 case Intrinsic::umul_with_overflow:
307 e.opcode = Instruction::Mul;
316 "Expect two args for recognised intrinsics.");
328 e.varargs.push_back(lookupOrAdd(*OI));
332 e.varargs.push_back(*II);
348 valueNumbering.insert(std::make_pair(V, num));
352 if (AA->doesNotAccessMemory(C)) {
354 uint32_t &e = expressionNumbering[exp];
355 if (!e) e = nextValueNumber++;
356 valueNumbering[
C] = e;
358 }
else if (AA->onlyReadsMemory(C)) {
359 Expression exp = createExpr(C);
360 uint32_t &e = expressionNumbering[exp];
362 e = nextValueNumber++;
363 valueNumbering[
C] = e;
367 e = nextValueNumber++;
368 valueNumbering[
C] = e;
375 valueNumbering[
C] = nextValueNumber;
376 return nextValueNumber++;
379 if (local_dep.
isDef()) {
383 valueNumbering[
C] = nextValueNumber;
384 return nextValueNumber++;
391 valueNumbering[
C] = nextValueNumber;
392 return nextValueNumber++;
396 uint32_t v = lookupOrAdd(local_cdep);
397 valueNumbering[
C] = v;
409 for (
unsigned i = 0, e = deps.size();
i != e; ++
i) {
424 cdep = NonLocalDepCall;
433 valueNumbering[
C] = nextValueNumber;
434 return nextValueNumber++;
438 valueNumbering[
C] = nextValueNumber;
439 return nextValueNumber++;
445 valueNumbering[
C] = nextValueNumber;
446 return nextValueNumber++;
451 valueNumbering[
C] = v;
455 valueNumbering[
C] = nextValueNumber;
456 return nextValueNumber++;
467 if (VI != valueNumbering.end())
470 if (!isa<Instruction>(V)) {
471 valueNumbering[V] = nextValueNumber;
472 return nextValueNumber++;
479 return lookupOrAddCall(cast<CallInst>(I));
481 case Instruction::FAdd:
482 case Instruction::Sub:
483 case Instruction::FSub:
484 case Instruction::Mul:
485 case Instruction::FMul:
486 case Instruction::UDiv:
487 case Instruction::SDiv:
488 case Instruction::FDiv:
489 case Instruction::URem:
490 case Instruction::SRem:
491 case Instruction::FRem:
492 case Instruction::Shl:
493 case Instruction::LShr:
494 case Instruction::AShr:
498 case Instruction::ICmp:
499 case Instruction::FCmp:
500 case Instruction::Trunc:
501 case Instruction::ZExt:
502 case Instruction::SExt:
503 case Instruction::FPToUI:
504 case Instruction::FPToSI:
505 case Instruction::UIToFP:
506 case Instruction::SIToFP:
507 case Instruction::FPTrunc:
508 case Instruction::FPExt:
509 case Instruction::PtrToInt:
510 case Instruction::IntToPtr:
511 case Instruction::BitCast:
513 case Instruction::ExtractElement:
514 case Instruction::InsertElement:
515 case Instruction::ShuffleVector:
516 case Instruction::InsertValue:
517 case Instruction::GetElementPtr:
520 case Instruction::ExtractValue:
521 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
524 valueNumbering[V] = nextValueNumber;
525 return nextValueNumber++;
528 uint32_t& e = expressionNumbering[exp];
529 if (!e) e = nextValueNumber++;
530 valueNumbering[V] = e;
538 assert(VI != valueNumbering.end() &&
"Value not numbered?");
549 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
550 uint32_t& e = expressionNumbering[exp];
551 if (!e) e = nextValueNumber++;
557 valueNumbering.clear();
558 expressionNumbering.clear();
564 valueNumbering.erase(V);
571 I = valueNumbering.begin(),
E = valueNumbering.end(); I !=
E; ++
I) {
572 assert(I->first != V &&
"Inst still occurs in value numbering map!");
592 bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE);
605 E = d.
end(); I !=
E; ++
I) {
606 errs() << I->first <<
"\n";
630 std::pair<DenseMap<BasicBlock*, char>::iterator,
char> IV =
631 FullyAvailableBlocks.
insert(std::make_pair(BB, 2));
637 if (IV.first->second == 2)
638 IV.first->second = 3;
639 return IV.first->second != 0;
647 goto SpeculationFailure;
649 for (; PI != PE; ++PI)
654 goto SpeculationFailure;
662 char &BBVal = FullyAvailableBlocks[BB];
680 char &EntryVal = FullyAvailableBlocks[Entry];
681 if (EntryVal == 0)
continue;
687 }
while (!BBWorklist.
empty());
722 "precondition violation - materialization can't fail");
724 if (
auto *C = dyn_cast<Constant>(StoredVal))
726 StoredVal = FoldedStoredVal;
735 if (StoredValSize == LoadedValSize) {
747 Type *TypeToCastTo = LoadedTy;
751 if (StoredValTy != TypeToCastTo)
759 if (
auto *C = dyn_cast<ConstantExpr>(StoredVal))
761 StoredVal = FoldedStoredVal;
769 assert(StoredValSize >= LoadedValSize &&
770 "CanCoerceMustAliasedValueToLoad fail");
789 StoredVal = IRB.
CreateLShr(StoredVal, ShiftAmt,
"tmp");
794 StoredVal = IRB.
CreateTrunc(StoredVal, NewIntTy,
"trunc");
796 if (LoadedTy != NewIntTy) {
802 StoredVal = IRB.
CreateBitCast(StoredVal, LoadedTy,
"bitcast");
805 if (
auto *C = dyn_cast<Constant>(StoredVal))
807 StoredVal = FoldedStoredVal;
822 uint64_t WriteSizeInBits,
829 int64_t StoreOffset = 0, LoadOffset = 0;
833 if (StoreBase != LoadBase)
846 if ((WriteSizeInBits & 7) | (LoadSize & 7))
848 uint64_t StoreSize = WriteSizeInBits / 8;
852 bool isAAFailure =
false;
853 if (StoreOffset < LoadOffset)
854 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
856 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
865 if (StoreOffset > LoadOffset ||
866 StoreOffset+StoreSize < LoadOffset+LoadSize)
871 return LoadOffset-StoreOffset;
887 StorePtr, StoreSize, DL);
902 if (R != -1)
return R;
906 int64_t LoadOffs = 0;
907 const Value *LoadBase =
912 LoadBase, LoadOffs, LoadSize, DepLI);
913 if (Size == 0)
return -1;
917 assert(DepLI->
isSimple() &&
"Cannot widen volatile/atomic load!");
930 if (!SizeCst)
return -1;
952 MI->
getDest(), MemSizeInBits, DL);
956 unsigned AS = Src->getType()->getPointerAddressSpace();
1000 ShiftAmt = (StoreSize-LoadSize-
Offset)*8;
1003 SrcVal = Builder.
CreateLShr(SrcVal, ShiftAmt);
1005 if (LoadSize != StoreSize)
1024 if (Offset+LoadSize > SrcValStoreSize) {
1025 assert(SrcVal->
isSimple() &&
"Cannot widen volatile/atomic load!");
1029 unsigned NewLoadSize = Offset+LoadSize;
1043 Builder.SetCurrentDebugLocation(SrcVal->
getDebugLoc());
1044 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
1045 LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
1049 DEBUG(
dbgs() <<
"GVN WIDENED LOAD: " << *SrcVal <<
"\n");
1050 DEBUG(
dbgs() <<
"TO: " << *NewLoad <<
"\n");
1054 Value *RV = NewLoad;
1056 RV = Builder.CreateLShr(RV, (NewLoadSize - SrcValStoreSize) * 8);
1057 RV = Builder.CreateTrunc(RV, SrcVal->
getType());
1085 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1088 Value *Val = MSI->getValue();
1092 Value *OneElt = Val;
1095 for (
unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1097 if (NumBytesSet*2 <= LoadSize) {
1099 Val = Builder.
CreateOr(Val, ShVal);
1106 Val = Builder.
CreateOr(OneElt, ShVal);
1139 if (ValuesPerBlock.
size() == 1 &&
1142 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1143 "Dead BB dominate this block");
1144 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
1171 if (isSimpleValue()) {
1172 Res = getSimpleValue();
1173 if (Res->
getType() != LoadTy) {
1176 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL VAL:\nOffset: " <<
Offset <<
" "
1177 << *getSimpleValue() <<
'\n'
1178 << *Res <<
'\n' <<
"\n\n\n");
1180 }
else if (isCoercedLoadValue()) {
1187 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset <<
" "
1188 << *getCoercedLoadValue() <<
'\n'
1189 << *Res <<
'\n' <<
"\n\n\n");
1191 }
else if (isMemIntrinValue()) {
1194 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " <<
Offset
1195 <<
" " << *getMemIntrinValue() <<
'\n'
1196 << *Res <<
'\n' <<
"\n\n\n");
1198 assert(isUndefValue() &&
"Should be UndefVal");
1199 DEBUG(
dbgs() <<
"GVN COERCED NONLOCAL Undef:\n";);
1202 assert(Res &&
"failed to materialize?");
1207 if (
const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1208 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1217 using namespace ore;
1218 User *OtherAccess =
nullptr;
1221 R <<
"load of type " <<
NV(
"Type", LI->
getType()) <<
" not eliminated"
1225 if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
1226 DT->
dominates(cast<Instruction>(U), LI)) {
1231 OtherAccess =
nullptr;
1237 R <<
" in favor of " <<
NV(
"OtherAccess", OtherAccess);
1239 R <<
" because it is clobbered by " <<
NV(
"ClobberedBy", DepInfo.
getInst());
1248 "expected a local dependence");
1259 if (Address && LI->
isAtomic() <= DepSI->isAtomic()) {
1263 Res = AvailableValue::get(DepSI->getValueOperand(),
Offset);
1277 if (DepLI != LI && Address && LI->
isAtomic() <= DepLI->isAtomic()) {
1282 Res = AvailableValue::getLoad(DepLI, Offset);
1295 Res = AvailableValue::getMI(DepMI, Offset);
1303 dbgs() <<
"GVN: load ";
1306 dbgs() <<
" is clobbered by " << *I <<
'\n';
1332 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1336 if (S->getValueOperand()->getType() != LI->
getType() &&
1342 if (S->isAtomic() < LI->
isAtomic())
1345 Res = AvailableValue::get(S->getValueOperand());
1349 if (
LoadInst *
LD = dyn_cast<LoadInst>(DepInst)) {
1361 Res = AvailableValue::getLoad(
LD);
1368 dbgs() <<
"GVN: load ";
1370 dbgs() <<
" has unknown def " << *DepInst <<
'\n';
1375 void GVN::AnalyzeLoadAvailability(
LoadInst *LI, LoadDepVect &Deps,
1376 AvailValInBlkVect &ValuesPerBlock,
1377 UnavailBlkVect &UnavailableBlocks) {
1383 unsigned NumDeps = Deps.size();
1384 for (
unsigned i = 0, e = NumDeps;
i != e; ++
i) {
1388 if (DeadBlocks.count(DepBB)) {
1391 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1396 UnavailableBlocks.push_back(DepBB);
1403 Value *Address = Deps[
i].getAddress();
1406 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
1410 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1413 UnavailableBlocks.push_back(DepBB);
1417 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1418 "post condition violation");
1421 bool GVN::PerformLoadPRE(
LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1422 UnavailBlkVect &UnavailableBlocks) {
1432 UnavailableBlocks.end());
1441 if (TmpBB == LoadBB)
1443 if (Blockers.count(TmpBB))
1463 FullyAvailableBlocks[AV.BB] =
true;
1464 for (
BasicBlock *UnavailableBB : UnavailableBlocks)
1465 FullyAvailableBlocks[UnavailableBB] =
false;
1471 if (Pred->getTerminator()->isEHPad()) {
1473 <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1474 << Pred->getName() <<
"': " << *LI <<
'\n');
1482 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1483 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1484 DEBUG(
dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1485 << Pred->getName() <<
"': " << *LI <<
'\n');
1489 if (LoadBB->isEHPad()) {
1491 <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1492 << Pred->getName() <<
"': " << *LI <<
'\n');
1499 PredLoads[Pred] =
nullptr;
1504 unsigned NumUnavailablePreds = PredLoads.
size() + CriticalEdgePred.
size();
1505 assert(NumUnavailablePreds != 0 &&
1506 "Fully available value should already be eliminated!");
1512 if (NumUnavailablePreds != 1)
1516 for (
BasicBlock *OrigPred : CriticalEdgePred) {
1517 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1518 assert(!PredLoads.
count(OrigPred) &&
"Split edges shouldn't be in map!");
1519 PredLoads[NewPred] =
nullptr;
1520 DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1521 << LoadBB->getName() <<
'\n');
1525 bool CanDoPRE =
true;
1528 for (
auto &PredLoad : PredLoads) {
1529 BasicBlock *UnavailablePred = PredLoad.first;
1538 Value *LoadPtr =
nullptr;
1539 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1545 DEBUG(
dbgs() <<
"COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1551 PredLoad.second = LoadPtr;
1555 while (!NewInsts.
empty()) {
1562 return !CriticalEdgePred.empty();
1568 DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *LI <<
'\n');
1570 dbgs() <<
"INSERTED " << NewInsts.
size() <<
" INSTS: "
1571 << *NewInsts.
back() <<
'\n');
1589 for (
const auto &PredLoad : PredLoads) {
1590 BasicBlock *UnavailablePred = PredLoad.first;
1591 Value *LoadPtr = PredLoad.second;
1602 NewLoad->setAAMetadata(Tags);
1618 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1621 DEBUG(
dbgs() <<
"GVN INSERTED " << *NewLoad <<
'\n');
1627 if (isa<PHINode>(V))
1635 <<
"load eliminated by PRE");
1642 using namespace ore;
1644 <<
"load of type " <<
NV(
"Type", LI->
getType()) <<
" eliminated"
1646 <<
NV(
"InfavorOfValue", AvailableValue));
1651 bool GVN::processNonLocalLoad(
LoadInst *LI) {
1663 unsigned NumDeps = Deps.size();
1670 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1672 dbgs() <<
"GVN: non-local load ";
1674 dbgs() <<
" has unknown dependencies\n";
1682 OE =
GEP->idx_end();
1684 if (
Instruction *I = dyn_cast<Instruction>(OI->get()))
1685 performScalarPRE(I);
1689 AvailValInBlkVect ValuesPerBlock;
1690 UnavailBlkVect UnavailableBlocks;
1691 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1695 if (ValuesPerBlock.empty())
1703 if (UnavailableBlocks.empty()) {
1704 DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *LI <<
'\n');
1710 if (isa<PHINode>(V))
1716 if (LI->
getDebugLoc() && ValuesPerBlock.size() != 1)
1730 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1733 bool GVN::processAssumeIntrinsic(
IntrinsicInst *IntrinsicI) {
1735 "This function can only be called with llvm.assume intrinsic");
1738 if (
ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1739 if (Cond->isZero()) {
1753 bool Changed =
false;
1760 Changed |= propagateEquality(V, True, Edge,
false);
1766 ReplaceWithConstMap[V] = True;
1772 if (
auto *CmpI = dyn_cast<CmpInst>(V)) {
1773 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ ||
1774 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1775 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1776 CmpI->getFastMathFlags().noNaNs())) {
1777 Value *CmpLHS = CmpI->getOperand(0);
1778 Value *CmpRHS = CmpI->getOperand(1);
1779 if (isa<Constant>(CmpLHS))
1784 if (RHSConst !=
nullptr && !isa<Constant>(CmpLHS))
1785 ReplaceWithConstMap[CmpLHS] = RHSConst;
1802 if (!isa<LoadInst>(I))
1814 static const unsigned KnownIDs[] = {
1847 return processNonLocalLoad(L);
1854 dbgs() <<
"GVN: load ";
1856 dbgs() <<
" has unknown dependence\n";
1886 LeaderTableEntry
Vals = LeaderTable[num];
1887 if (!Vals.Val)
return nullptr;
1889 Value *Val =
nullptr;
1892 if (isa<Constant>(Val))
return Val;
1895 LeaderTableEntry* Next = Vals.Next;
1898 if (isa<Constant>(Next->Val))
return Next->Val;
1899 if (!Val) Val = Next->Val;
1920 "No edge between these basic blocks!");
1921 return Pred !=
nullptr;
1926 bool GVN::replaceOperandsWithConsts(
Instruction *Instr)
const {
1927 bool Changed =
false;
1928 for (
unsigned OpNum = 0; OpNum < Instr->
getNumOperands(); ++OpNum) {
1930 auto it = ReplaceWithConstMap.find(Operand);
1931 if (it != ReplaceWithConstMap.end()) {
1932 assert(!isa<Constant>(Operand) &&
1933 "Replacing constants with constants is invalid");
1934 DEBUG(
dbgs() <<
"GVN replacing: " << *Operand <<
" with " << *it->second
1935 <<
" in instruction " << *Instr <<
'\n');
1949 bool DominatesByEdge) {
1951 Worklist.
push_back(std::make_pair(LHS, RHS));
1952 bool Changed =
false;
1957 while (!Worklist.
empty()) {
1958 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
1959 LHS = Item.first; RHS = Item.second;
1966 if (isa<Constant>(LHS) && isa<Constant>(RHS))
1970 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
1972 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
1979 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
1980 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
1999 if (RootDominatesEnd && !isa<Instruction>(RHS))
2000 addToLeaderTable(LVN, RHS, Root.
getEnd());
2006 unsigned NumReplacements =
2009 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2011 Changed |= NumReplacements > 0;
2012 NumGVNEqProp += NumReplacements;
2028 bool isKnownFalse = !isKnownTrue;
2035 Worklist.
push_back(std::make_pair(A, RHS));
2036 Worklist.
push_back(std::make_pair(B, RHS));
2043 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2044 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2050 Worklist.
push_back(std::make_pair(Op0, Op1));
2064 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->
isZero())
2065 Worklist.
push_back(std::make_pair(Op0, Op1));
2078 if (Num < NextNum) {
2080 if (NotCmp && isa<Instruction>(NotCmp)) {
2081 unsigned NumReplacements =
2084 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2086 Changed |= NumReplacements > 0;
2087 NumGVNEqProp += NumReplacements;
2094 if (RootDominatesEnd)
2095 addToLeaderTable(Num, NotVal, Root.
getEnd());
2108 if (isa<DbgInfoIntrinsic>(I))
2117 bool Changed =
false;
2127 if (MD && V->getType()->getScalarType()->isPointerTy())
2136 return processAssumeIntrinsic(IntrinsicI);
2138 if (
LoadInst *LI = dyn_cast<LoadInst>(I)) {
2139 if (processLoad(LI))
2143 addToLeaderTable(Num, LI, LI->
getParent());
2149 if (
BranchInst *BI = dyn_cast<BranchInst>(I)) {
2150 if (!BI->isConditional())
2153 if (isa<Constant>(BI->getCondition()))
2154 return processFoldableCondBr(BI);
2156 Value *BranchCond = BI->getCondition();
2160 if (TrueSucc == FalseSucc)
2164 bool Changed =
false;
2168 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2172 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2178 if (
SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2179 Value *SwitchCond =
SI->getCondition();
2181 bool Changed =
false;
2185 for (
unsigned i = 0, n =
SI->getNumSuccessors();
i != n; ++
i)
2186 ++SwitchEdges[
SI->getSuccessor(
i)];
2192 if (SwitchEdges.
lookup(Dst) == 1) {
2194 Changed |= propagateEquality(SwitchCond,
i.getCaseValue(),
E,
true);
2210 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
2211 addToLeaderTable(Num, I, I->
getParent());
2218 if (Num >= NextNum) {
2219 addToLeaderTable(Num, I, I->
getParent());
2228 addToLeaderTable(Num, I, I->
getParent());
2230 }
else if (Repl == I) {
2258 bool Changed =
false;
2259 bool ShouldContinue =
true;
2270 Changed |= removedBlock;
2273 unsigned Iteration = 0;
2274 while (ShouldContinue) {
2275 DEBUG(
dbgs() <<
"GVN iteration: " << Iteration <<
"\n");
2276 ShouldContinue = iterateOnFunction(F);
2277 Changed |= ShouldContinue;
2284 assignValNumForDeadCode();
2285 bool PREChanged =
true;
2286 while (PREChanged) {
2287 PREChanged = performPRE(F);
2288 Changed |= PREChanged;
2297 cleanupGlobalSets();
2308 assert(InstrsToErase.empty() &&
2309 "We expect InstrsToErase to be empty across iterations");
2310 if (DeadBlocks.count(BB))
2314 ReplaceWithConstMap.clear();
2315 bool ChangedFunction =
false;
2319 if (!ReplaceWithConstMap.empty())
2320 ChangedFunction |= replaceOperandsWithConsts(&*BI);
2321 ChangedFunction |= processInstruction(&*BI);
2323 if (InstrsToErase.empty()) {
2329 NumGVNInstr += InstrsToErase.size();
2332 bool AtStart = BI == BB->
begin();
2337 E = InstrsToErase.end(); I !=
E; ++
I) {
2338 DEBUG(
dbgs() <<
"GVN removed: " << **I <<
'\n');
2340 DEBUG(verifyRemoved(*I));
2341 (*I)->eraseFromParent();
2343 InstrsToErase.clear();
2351 return ChangedFunction;
2356 unsigned int ValNo) {
2364 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2391 VN.
add(Instr, ValNo);
2394 addToLeaderTable(ValNo, Instr, Pred);
2398 bool GVN::performScalarPRE(
Instruction *CurInst) {
2399 if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
2402 isa<DbgInfoIntrinsic>(CurInst))
2409 if (isa<CmpInst>(CurInst))
2413 if (
CallInst *CallI = dyn_cast<CallInst>(CurInst))
2414 if (CallI->isInlineAsm())
2425 unsigned NumWith = 0;
2426 unsigned NumWithout = 0;
2435 if (
P == CurrentBlock) {
2443 Value *predV = findLeader(
P, ValNo);
2445 predMap.
push_back(std::make_pair(static_cast<Value *>(
nullptr),
P));
2448 }
else if (predV == CurInst) {
2460 if (NumWithout > 1 || NumWith == 0)
2468 if (NumWithout != 0) {
2478 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
2482 PREInstr = CurInst->
clone();
2483 if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) {
2485 DEBUG(verifyRemoved(PREInstr));
2493 assert (PREInstr !=
nullptr || NumWithout == 0);
2500 CurInst->
getName() +
".pre-phi", &CurrentBlock->
front());
2501 for (
unsigned i = 0, e = predMap.
size();
i != e; ++
i) {
2502 if (
Value *V = predMap[
i].first)
2509 addToLeaderTable(ValNo, Phi, CurrentBlock);
2515 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2517 DEBUG(
dbgs() <<
"GVN PRE removed: " << *CurInst <<
'\n');
2520 DEBUG(verifyRemoved(CurInst));
2529 bool GVN::performPRE(
Function &F) {
2530 bool Changed =
false;
2541 BE = CurrentBlock->
end();
2544 Changed |= performScalarPRE(CurInst);
2548 if (splitCriticalEdges())
2566 bool GVN::splitCriticalEdges() {
2567 if (toSplit.empty())
2570 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2573 }
while (!toSplit.empty());
2579 bool GVN::iterateOnFunction(
Function &F) {
2580 cleanupGlobalSets();
2583 bool Changed =
false;
2587 std::vector<BasicBlock *> BBVect;
2588 BBVect.reserve(256);
2594 BBVect.push_back(*RI);
2596 for (std::vector<BasicBlock *>::iterator I = BBVect.begin(),
E = BBVect.end();
2598 Changed |= processBlock(*I);
2603 void GVN::cleanupGlobalSets() {
2605 LeaderTable.
clear();
2606 TableAllocator.
Reset();
2611 void GVN::verifyRemoved(
const Instruction *Inst)
const {
2617 I = LeaderTable.
begin(),
E = LeaderTable.
end(); I !=
E; ++
I) {
2618 const LeaderTableEntry *Node = &I->second;
2619 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2621 while (Node->Next) {
2623 assert(Node->Val != Inst &&
"Inst still in value numbering scope!");
2637 while (!NewDead.
empty()) {
2639 if (DeadBlocks.count(D))
2645 DeadBlocks.insert(Dom.
begin(), Dom.
end());
2650 if (DeadBlocks.count(S))
2653 bool AllPredDead =
true;
2655 if (!DeadBlocks.count(
P)) {
2656 AllPredDead =
false;
2679 if (DeadBlocks.count(B))
2684 if (!DeadBlocks.count(
P))
2689 DeadBlocks.insert(
P = S);
2693 PHINode &Phi = cast<PHINode>(*II);
2714 bool GVN::processFoldableCondBr(
BranchInst *BI) {
2728 if (DeadBlocks.count(DeadRoot))
2732 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
2734 addDeadBlock(DeadRoot);
2742 void GVN::assignValNumForDeadCode() {
2746 addToLeaderTable(ValNum, &Inst, BB);
2760 if (skipFunction(F))
2763 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2765 return Impl.runImpl(
2766 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
2767 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
2768 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
2769 getAnalysis<AAResultsWrapperPass>().getAAResults(),
2771 : &getAnalysis<MemoryDependenceWrapperPass>().
getMemDep(),
2772 LIWP ? &LIWP->getLoopInfo() :
nullptr,
2773 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE());
Legacy wrapper pass to provide the GlobalsAAResult object.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
void setDomTree(DominatorTree *D)
Value * getValueOperand()
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
FunctionPass * createGVNPass(bool NoLoads=false)
Create a legacy GVN pass.
static cl::opt< bool > EnableLoadPRE("enable-load-pre", cl::init(true))
void push_back(const T &Elt)
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
A parsed version of the target data layout string in and methods for querying it. ...
static ConstantInt * getFalse(LLVMContext &Context)
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This class is the base class for the comparison instructions.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Helper class for SSA formation on a set of values defined in multiple blocks.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM's alias analysis passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
DiagnosticInfoOptimizationBase::Argument NV
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, const DataLayout &DL)
STATISTIC(NumFunctions,"Total number of functions")
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Value * MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, GVN &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type...
bool isUndefValue() const
This is the interface for a simple mod/ref and alias analysis over globals.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
const BasicBlock * getStart() const
unsigned getNumOperands() const
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value...
This class represents a function call, abstracting a target machine's calling convention.
An immutable pass that tracks lazily created AssumptionCache objects.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
LoadInst * getCoercedLoadValue() const
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
A cache of .assume calls within a function.
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
void setAliasAnalysis(AliasAnalysis *A)
1 1 1 0 True if unordered or not equal
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering memory ...
This class wraps the llvm.memset intrinsic.
This class implements a map that also provides access to all stored values in a deterministic order...
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction & front() const
Analysis pass which computes a DominatorTree.
An instruction for reading from memory.
static IntegerType * getInt64Ty(LLVMContext &C)
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
iterator end()
Get an iterator to the end of the SetVector.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
StringRef getName() const
Return a constant reference to the value's name.
iterator begin()
Instruction iterator methods.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
bool match(Val *V, const Pattern &P)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
bool isUnconditional() const
static cl::opt< uint32_t > MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, cl::desc("Max recurse depth (default = 1000)"))
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static cl::opt< bool > EnablePRE("enable-pre", cl::init(true), cl::Hidden)
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Option class for critical edge splitting.
void clear()
Remove all entries from the ValueTable.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it...
A Use represents the edge between a Value definition and its users.
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI)
Looks at a memory location for a load (specified by MemLocBase, Offs, and Size) and compares it again...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
unsigned getNumArgOperands() const
Return the number of call arguments.
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location...
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being clobbered by another...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
An analysis that produces MemoryDependenceResults for a function.
void setName(const Twine &Name)
Change the name of the value.
std::vector< NonLocalDepEntry > NonLocalDepInfo
Analysis pass that exposes the LoopInfo for a function.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following: ...
SynchronizationScope getSynchScope() const
LLVM_NODISCARD bool empty() const
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Attempt to fold the constant using the specified DataLayout.
static void patchReplacementInstruction(Instruction *I, Value *Repl)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool mayReadFromMemory() const
Return true if this instruction may read memory.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isSimpleValue() const
The core GVN pass object.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Expression(uint32_t o=~2U)
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction...
Function Alias Analysis false
BasicBlock * getSuccessor(unsigned i) const
iterator begin()
Get an iterator to the beginning of the SetVector.
DiagnosticInfoOptimizationBase::setExtraArgs setExtraArgs
static AvailableValue getLoad(LoadInst *LI, unsigned Offset=0)
hash_code hash_value(const APFloat &Arg)
See friend declarations above.
static Value * GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering mem int...
bool isLittleEndian() const
Layout endianness...
static GVN::Expression getEmptyKey()
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
An instruction for storing to memory.
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool isArrayTy() const
True if this is an instance of ArrayType.
Value * getSimpleValue() const
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
void takeName(Value *V)
Transfer the name from V to this value.
static unsigned getHashValue(const GVN::Expression &e)
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Type * getScalarType() const LLVM_READONLY
If this is a vector type, return the element type, otherwise return 'this'.
static Value * GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn)
This function is called when we have a memdep query of a load that ends up being a clobbering load...
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
Interval::succ_iterator succ_end(Interval *I)
void initializeGVNLegacyPassPass(PassRegistry &)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
BasicBlock * SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
If this edge is a critical edge, insert a new node to split the critical edge.
initializer< Ty > init(const Ty &Val)
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block...
SmallVector< uint32_t, 4 > varargs
bool operator==(const Expression &other) const
const NonLocalDepInfo & getNonLocalCallDependency(CallSite QueryCS)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
static Value * GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering store...
A set of analyses that are preserved following a run of a transformation pass.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
constexpr bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs...ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
LLVM Basic Block Representation.
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
The instances of the Type class are immutable: once they are created, they are never changed...
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
This is an important class for using LLVM in a threaded context.
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
This class holds the mapping between values and value numbers.
Conditional or Unconditional Branch instruction.
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
static GVN::Expression getTombstoneKey()
static Value * ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVN &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate LI...
This is an important base class in LLVM.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS)
A manager for alias analyses.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Represent the analysis usage information of a pass.
BasicBlock * getBB() const
std::vector< NodeRef >::reverse_iterator rpo_iterator
Analysis pass providing a never-invalidated alias analysis result.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
PointerIntPair< Value *, 2, ValType > Val
V - The value that is live out of the block.
FunctionPass class - This class is used to implement most global optimizations.
Value * getOperand(unsigned i) const
Interval::pred_iterator pred_end(Interval *I)
Value * getPointerOperand()
bool isCommutative() const
Return true if the instruction is commutative:
const MemDepResult & getResult() const
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
GVNLegacyPass(bool NoLoads=false)
void setAlignment(unsigned Align)
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
static void reportLoadElim(LoadInst *LI, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
bool isPointerTy() const
True if this is an instance of PointerType.
DominatorTree & getDominatorTree() const
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
uint64_t NextPowerOf2(uint64_t A)
NextPowerOf2 - Returns the next power of two (in 64-bits) that is strictly greater than A...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
A memory dependence query can return one of three different answers.
const BasicBlock * getEnd() const
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
A function analysis which provides an AssumptionCache.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A SetVector that performs no allocations if smaller than a certain size.
This is the common base class for memset/memcpy/memmove.
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
This is the shared class of boolean and integer constants.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType will succeed.
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions that feed it, giving the original input.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Value * getLength() const
LLVM_NODISCARD T pop_back_val()
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block...
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
AtomicOrdering getOrdering() const
Returns the ordering effect of this fence.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
const BasicBlock & getEntryBlock() const
static ConstantInt * getTrue(LLVMContext &Context)
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
size_type count(const KeyT &Key) const
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock...
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< user_iterator > users()
BasicBlock * getSinglePredecessor()
Return the predecessor of this block if it has a single predecessor block.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
An opaque object representing a hash code.
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
bool isStructTy() const
True if this is an instance of StructType.
void erase(Value *v)
Remove a value from the value numbering.
bool isCoercedLoadValue() const
Value * getSource() const
This is just like getRawSource, but it strips off any cast instructions that feed it...
static bool isLifetimeStart(const Instruction *Inst)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
This class wraps the llvm.memcpy/memmove intrinsics.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Value * getCondition() const
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemoryDependenceResults *MemDep=nullptr)
Attempts to merge a block into its predecessor, if possible.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
static AvailableValue get(Value *V, unsigned Offset=0)
void setMemDep(MemoryDependenceResults *M)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void preserve()
Mark an analysis as preserved.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
friend hash_code hash_value(const Expression &Value)
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Analysis pass providing the TargetLibraryInfo.
bool isMemIntrinValue() const
iterator_range< df_iterator< T > > depth_first(const T &G)
uint32_t lookup(Value *V) const
Returns the value number of the specified value.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI)
This function is called when we have a memdep query of a load that ends up being a clobbering store...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represents a particular available value that we know how to materialize.
LLVMContext & getContext() const
Get the context in which this basic block lives.
uint32_t getNextUnusedValueNumber()
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, char > &FullyAvailableBlocks, uint32_t RecurseDepth)
Return true if we can prove that the value we're analyzing is fully available in the specified block...
0 0 0 1 True if ordered and equal
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
LLVM Value Representation.
succ_range successors(BasicBlock *BB)
static AvailableValueInBlock getUndef(BasicBlock *BB)
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
static Value * CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, IRBuilder<> &IRB, const DataLayout &DL)
If we saw a store of a value to memory, and then a load from a must-aliased pointer of a different ty...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
This is an entry in the NonLocalDepInfo cache.
A container for analyses that lazily runs them and caches their results.
BasicBlock * BB
BB - The basic block in question.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Legacy analysis pass which computes a DominatorTree.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Value * MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type...
void setIncomingValue(unsigned i, Value *V)
AvailableValue AV
AV - The actual available value.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
Value * getPointerOperand()
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
static IntegerType * getInt8Ty(LLVMContext &C)
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs)
Combine the metadata of two instructions so that K can replace J.
const BasicBlock * getParent() const
InstListType::iterator iterator
Instruction iterators...
static AvailableValue getUndef()
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
MemIntrinsic * getMemIntrinValue() const
A wrapper class for inspecting calls to intrinsic functions.
bool isVoidTy() const
Return true if this is 'void'.
This instruction inserts a struct field of array element value into an aggregate value.
MemoryDependenceResults & getMemDep() const
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.