108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
120STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
126 "Controls which instructions are visited");
140 "instcombine-max-sink-users",
cl::init(32),
141 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
144 "instcombine-infinite-loop-threshold",
145 cl::desc(
"Number of instruction combining iterations considered an "
151 cl::desc(
"Maximum array size considered when doing a combine"));
163std::optional<Instruction *>
174 bool &KnownBitsComputed) {
191 *
this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
210bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
229bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
230 unsigned ToWidth)
const {
236 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
241 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
246 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
257bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
263 unsigned FromWidth =
From->getPrimitiveSizeInBits();
265 return shouldChangeType(FromWidth, ToWidth);
274 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
275 if (!OBO || !OBO->hasNoSignedWrap())
280 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
283 const APInt *BVal, *CVal;
287 bool Overflow =
false;
288 if (Opcode == Instruction::Add)
289 (void)BVal->
sadd_ov(*CVal, Overflow);
291 (
void)BVal->
ssub_ov(*CVal, Overflow);
297 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
298 return OBO && OBO->hasNoUnsignedWrap();
302 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
303 return OBO && OBO->hasNoSignedWrap();
312 I.clearSubclassOptionalData();
317 I.clearSubclassOptionalData();
318 I.setFastMathFlags(FMF);
327 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
328 if (!Cast || !Cast->hasOneUse())
332 auto CastOpcode = Cast->getOpcode();
333 if (CastOpcode != Instruction::ZExt)
341 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
342 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
366Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
367 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
370 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
371 Type *CastTy = IntToPtr->getDestTy();
374 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
377 return PtrToInt->getOperand(0);
404 bool Changed =
false;
412 Changed = !
I.swapOperands();
417 if (
I.isAssociative()) {
440 I.setHasNoUnsignedWrap(
true);
443 I.setHasNoSignedWrap(
true);
472 if (
I.isAssociative() &&
I.isCommutative()) {
535 if (isa<FPMathOperator>(NewBO)) {
549 I.setHasNoUnsignedWrap(
true);
567 if (LOp == Instruction::And)
568 return ROp == Instruction::Or || ROp == Instruction::Xor;
571 if (LOp == Instruction::Or)
572 return ROp == Instruction::And;
576 if (LOp == Instruction::Mul)
577 return ROp == Instruction::Add || ROp == Instruction::Sub;
600 if (isa<Constant>(V))
614 assert(Op &&
"Expected a binary operator");
615 LHS = Op->getOperand(0);
616 RHS = Op->getOperand(1);
617 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
622 return Instruction::Mul;
626 return Op->getOpcode();
635 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
638 Value *RetVal =
nullptr;
649 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
661 RetVal =
Builder.CreateBinOp(InnerOpcode,
A, V);
669 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
681 RetVal =
Builder.CreateBinOp(InnerOpcode, V,
B);
692 if (isa<OverflowingBinaryOperator>(RetVal)) {
695 if (isa<OverflowingBinaryOperator>(&
I)) {
696 HasNSW =
I.hasNoSignedWrap();
697 HasNUW =
I.hasNoUnsignedWrap();
699 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
700 HasNSW &= LOBO->hasNoSignedWrap();
701 HasNUW &= LOBO->hasNoUnsignedWrap();
704 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
705 HasNSW &= ROBO->hasNoSignedWrap();
706 HasNUW &= ROBO->hasNoUnsignedWrap();
709 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
719 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
722 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
743 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
869 if (!LHSIsSelect && !RHSIsSelect)
874 if (isa<FPMathOperator>(&
I)) {
875 FMF =
I.getFastMathFlags();
882 Value *
Cond, *True =
nullptr, *False =
nullptr;
890 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
905 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
914 else if (True && !False)
943void InstCombinerImpl::freelyInvertAllUsersOf(
Value *
I,
Value *IgnoredUser) {
945 if (U == IgnoredUser)
947 switch (cast<Instruction>(U)->
getOpcode()) {
948 case Instruction::Select: {
949 auto *
SI = cast<SelectInst>(U);
951 SI->swapProfMetadata();
954 case Instruction::Br:
955 cast<BranchInst>(U)->swapSuccessors();
957 case Instruction::Xor:
962 "canFreelyInvertAllUsersOf() ?");
969Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
979 if (
C->getType()->getElementType()->isIntegerTy())
983 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
988 if (isa<UndefValue>(Elt))
991 if (!isa<ConstantInt>(Elt))
998 if (
auto *CV = dyn_cast<Constant>(V))
999 if (CV->getType()->isVectorTy() &&
1000 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1018 !
X->getType()->isIntOrIntVectorTy(1))
1033 for (
Value *Op :
I.operands()) {
1037 C = dyn_cast<Constant>(IsTrueArm ?
SI->getTrueValue()
1038 :
SI->getFalseValue());
1039 }
else if (
match(
SI->getCondition(),
1045 C = dyn_cast<Constant>(Op);
1065 bool FoldWithMultiUse) {
1067 if (!
SI->hasOneUse() && !FoldWithMultiUse)
1070 Value *TV =
SI->getTrueValue();
1071 Value *FV =
SI->getFalseValue();
1072 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1076 if (
SI->getType()->isIntOrIntVectorTy(1))
1081 if (
auto *BC = dyn_cast<BitCastInst>(&Op)) {
1082 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1083 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1086 if ((SrcTy ==
nullptr) != (DestTy ==
nullptr))
1097 if (!NewTV && !NewFV)
1110 if (NumPHIValues == 0)
1120 if (UI != &
I && !
I.isIdenticalTo(UI))
1131 Value *NonSimplifiedInVal =
nullptr;
1132 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1140 for (
Value *Op :
I.operands()) {
1158 if (isa<PHINode>(InVal))
return nullptr;
1159 if (NonSimplifiedBB)
return nullptr;
1161 NonSimplifiedBB = InBB;
1162 NonSimplifiedInVal = InVal;
1167 if (isa<InvokeInst>(InVal))
1168 if (cast<Instruction>(InVal)->
getParent() == NonSimplifiedBB)
1185 if (NonSimplifiedBB !=
nullptr) {
1201 if (NonSimplifiedBB) {
1205 U = NonSimplifiedInVal;
1207 U = U->DoPHITranslation(PN->
getParent(), NonSimplifiedBB);
1212 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1213 if (NewPhiValues[i])
1221 if (
User == &
I)
continue;
1227 const_cast<PHINode &
>(*NewPN),
1236 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1237 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1238 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1239 Phi0->getNumOperands() != Phi1->getNumOperands())
1260 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1261 auto &Phi0Use = std::get<0>(
T);
1262 auto &Phi1Use = std::get<1>(
T);
1263 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1265 Value *Phi0UseV = Phi0Use.get();
1266 Value *Phi1UseV = Phi1Use.get();
1269 else if (Phi1UseV ==
C)
1276 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1277 CanFoldIncomingValuePair)) {
1280 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1281 "The number of collected incoming values should equal the number "
1282 "of the original PHINode operands!");
1283 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1284 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1289 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1296 ConstBB = Phi0->getIncomingBlock(0);
1297 OtherBB = Phi0->getIncomingBlock(1);
1299 ConstBB = Phi0->getIncomingBlock(1);
1300 OtherBB = Phi0->getIncomingBlock(0);
1310 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1311 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1318 for (
auto BBIter = BO.
getParent()->
begin(); &*BBIter != &BO; ++BBIter)
1331 Phi0->getIncomingValueForBlock(OtherBB),
1332 Phi1->getIncomingValueForBlock(OtherBB));
1333 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1334 NotFoldedNewBO->copyIRFlags(&BO);
1344 if (!isa<Constant>(
I.getOperand(1)))
1347 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
1350 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
1361 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1368 if (!isa<VectorType>(Inst.
getType()))
1374 cast<VectorType>(Inst.
getType())->getElementCount());
1376 cast<VectorType>(Inst.
getType())->getElementCount());
1381 Value *L0, *L1, *R0, *R1;
1386 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
1387 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
1394 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1397 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1404 if (
auto *BO = dyn_cast<BinaryOperator>(V))
1408 M, Intrinsic::experimental_vector_reverse, V->getType());
1421 return createBinOpReverse(V1, V2);
1425 return createBinOpReverse(V1,
RHS);
1429 return createBinOpReverse(
LHS, V2);
1439 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
1448 V1->
getType() == V2->getType() &&
1451 return createBinOpShuffle(V1, V2, Mask);
1460 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
1461 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
1466 if (LShuf->isSelect() &&
1468 RShuf->isSelect() &&
1486 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
1491 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
1492 InstVTy->getNumElements()) {
1494 "Shuffle should not change scalar type");
1501 bool ConstOp1 = isa<Constant>(
RHS);
1503 unsigned SrcVecNumElts =
1504 cast<FixedVectorType>(V1->
getType())->getNumElements();
1507 bool MayChange =
true;
1508 unsigned NumElts = InstVTy->getNumElements();
1509 for (
unsigned I = 0;
I < NumElts; ++
I) {
1511 if (ShMask[
I] >= 0) {
1512 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
1520 if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
1521 I >= SrcVecNumElts) {
1525 NewVecC[ShMask[
I]] = CElt;
1536 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
1557 Value *NewLHS = ConstOp1 ? V1 : NewC;
1558 Value *NewRHS = ConstOp1 ? NewC : V1;
1559 return createBinOpShuffle(NewLHS, NewRHS, Mask);
1566 if (isa<ShuffleVectorInst>(
RHS))
1599 if (isa<FPMathOperator>(R)) {
1600 R->copyFastMathFlags(&Inst);
1603 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1604 NewInstBO->copyIRFlags(R);
1633 cast<Operator>(Op1)->getOpcode() == CastOpc &&
1634 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
1652 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
1658 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1660 NewBinOp->setHasNoSignedWrap();
1662 NewBinOp->setHasNoUnsignedWrap();
1680 if (!
GEP.hasAllConstantIndices())
1695 bool IsInBounds =
GEP.isInBounds();
1696 Type *Ty =
GEP.getSourceElementType();
1697 Value *NewTrueC =
Builder.CreateGEP(Ty, TrueC, IndexC,
"", IsInBounds);
1698 Value *NewFalseC =
Builder.CreateGEP(Ty, FalseC, IndexC,
"", IsInBounds);
1711 Type *PtrTy = Src->getType()->getScalarType();
1712 if (
GEP.hasAllConstantIndices() &&
1713 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
1717 bool IsFirstType =
true;
1718 unsigned NumVarIndices = 0;
1719 for (
auto Pair :
enumerate(Src->indices())) {
1720 if (!isa<ConstantInt>(Pair.value())) {
1722 IsFirstType =
false;
1723 NumVarIndices = Pair.index() + 1;
1730 if (NumVarIndices != Src->getNumIndices()) {
1732 if (isa<ScalableVectorType>(
BaseType))
1751 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
1754 if (Src->hasAllConstantIndices())
1766 Src->getNumIndices() - NumVarIndices));
1773 IsInBounds &=
Idx.isNonNegative() == ConstIndices[0].isNonNegative();
1778 Indices,
"", IsInBounds));
1781 if (Src->getResultElementType() !=
GEP.getSourceElementType())
1787 bool EndsWithSequential =
false;
1790 EndsWithSequential =
I.isSequential();
1793 if (EndsWithSequential) {
1796 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
1814 if (Src->getNumOperands() == 2) {
1820 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
1823 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
1824 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
1825 Src->getNumOperands() != 1) {
1827 Indices.
append(Src->op_begin()+1, Src->op_end());
1831 if (!Indices.
empty())
1834 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
1843 Type *GEPType =
GEP.getType();
1844 Type *GEPEltType =
GEP.getSourceElementType();
1845 bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
1853 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
1854 auto VWidth = GEPFVTy->getNumElements();
1855 APInt UndefElts(VWidth, 0);
1871 bool MadeChange =
false;
1875 Type *NewScalarIndexTy =
1885 Type *IndexTy = (*I)->getType();
1886 Type *NewIndexType =
1889 cast<VectorType>(IndexTy)->getElementCount())
1901 if (IndexTy != NewIndexType) {
1913 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
1914 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
1929 for (
auto I = PN->op_begin()+1,
E = PN->op_end();
I !=
E; ++
I) {
1930 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
1931 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
1932 Op1->getSourceElementType() != Op2->getSourceElementType())
1940 Type *CurTy =
nullptr;
1942 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
1943 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
1946 if (Op1->getOperand(J) != Op2->getOperand(J)) {
1955 assert(CurTy &&
"No current type?");
1975 CurTy = Op1->getSourceElementType();
1987 if (DI != -1 && !PN->hasOneUse())
1990 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2003 PN->getNumOperands());
2006 for (
auto &
I : PN->operands())
2007 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2008 PN->getIncomingBlock(
I));
2010 NewGEP->setOperand(DI, NewPN);
2013 NewGEP->insertInto(
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2017 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2023 if (
GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2024 unsigned AS =
GEP.getPointerAddressSpace();
2025 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2029 bool Matched =
false;
2032 if (TyAllocSize == 1) {
2033 V =
GEP.getOperand(1);
2035 }
else if (
match(
GEP.getOperand(1),
2037 if (TyAllocSize == 1ULL <<
C)
2039 }
else if (
match(
GEP.getOperand(1),
2041 if (TyAllocSize ==
C)
2061 if (!
GEP.isInBounds()) {
2064 APInt BasePtrOffset(IdxWidth, 0);
2065 Value *UnderlyingPtrOp =
2068 if (
auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
2069 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
2074 if (BasePtrOffset.
ule(AllocSize)) {
2076 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
2090 if (isa<ConstantPointerNull>(V))
2092 if (
auto *LI = dyn_cast<LoadInst>(V))
2093 return isa<GlobalVariable>(LI->getPointerOperand());
2117 return Dest && Dest->Ptr == UsedV;
2131 switch (
I->getOpcode()) {
2136 case Instruction::AddrSpaceCast:
2137 case Instruction::BitCast:
2138 case Instruction::GetElementPtr:
2143 case Instruction::ICmp: {
2150 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
2157 case Instruction::Call:
2164 case Intrinsic::memmove:
2165 case Intrinsic::memcpy:
2166 case Intrinsic::memset: {
2168 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
2172 case Intrinsic::assume:
2173 case Intrinsic::invariant_start:
2174 case Intrinsic::invariant_end:
2175 case Intrinsic::lifetime_start:
2176 case Intrinsic::lifetime_end:
2177 case Intrinsic::objectsize:
2180 case Intrinsic::launder_invariant_group:
2181 case Intrinsic::strip_invariant_group:
2210 case Instruction::Store: {
2212 if (
SI->isVolatile() ||
SI->getPointerOperand() != PI)
2220 }
while (!Worklist.
empty());
2242 std::unique_ptr<DIBuilder> DIB;
2243 if (isa<AllocaInst>(
MI)) {
2249 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
2267 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
2276 C->isFalseWhenEqual()));
2277 }
else if (
auto *
SI = dyn_cast<StoreInst>(
I)) {
2278 for (
auto *DVI : DVIs)
2279 if (DVI->isAddressOfVariable())
2319 for (
auto *DVI : DVIs)
2320 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2321 DVI->eraseFromParent();
2367 if (FreeInstrBB->
size() != 2) {
2369 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2371 auto *Cast = dyn_cast<CastInst>(&Inst);
2372 if (!Cast || !Cast->isNoopCast(
DL))
2393 "Broken CFG: missing edge from predecessor to successor");
2398 if (&Instr == FreeInstrBBTerminator)
2400 Instr.moveBefore(TI);
2403 "Only the branch instruction should remain");
2414 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
2415 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
2416 if (Dereferenceable.
isValid()) {
2418 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
2419 Attribute::Dereferenceable);
2420 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
2429 if (isa<UndefValue>(Op)) {
2437 if (isa<ConstantPointerNull>(Op))
2442 CallInst *CI = dyn_cast<CallInst>(Op);
2477 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
2482 if (Prev->isEHPad())
2495 assert(
I.getParent()->sizeWithoutDebug() == 1 &&
"The block is now empty.");
2509 return BBI->isDebugOrPseudoInst() ||
2510 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
2515 if (BBI != FirstInstr)
2517 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
2519 return dyn_cast<StoreInst>(BBI);
2537 bool Changed =
false;
2540 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
2544 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
2557 bool Changed =
false;
2559 if (Succ != LiveSucc)
2581 if (isa<SelectInst>(
Cond) &&
2600 auto *Cmp = cast<CmpInst>(
Cond);
2610 if (
auto *CI = dyn_cast<ConstantInt>(
Cond))
2624 for (
auto Case :
SI.cases()) {
2626 assert(isa<ConstantInt>(NewCase) &&
2627 "Result of expression should be constant");
2628 Case.setValue(cast<ConstantInt>(NewCase));
2634 SI.getParent(),
nullptr, *
this))
2636 if (
auto *CI = dyn_cast<ConstantInt>(
Cond))
2638 SI.getParent(),
SI.findCaseValue(CI)->getCaseSuccessor(), *
this))
2647 for (
const auto &
C :
SI.cases()) {
2649 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
2651 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
2654 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
2660 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
2661 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
2666 for (
auto Case :
SI.cases()) {
2667 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
2683 const APInt *
C =
nullptr;
2685 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
2686 OvID == Intrinsic::umul_with_overflow)) {
2691 if (
C->isPowerOf2()) {
2692 return BinaryOperator::CreateShl(
2702 if (!WO->hasOneUse())
2716 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
2719 if (OvID == Intrinsic::usub_with_overflow)
2724 if (OvID == Intrinsic::smul_with_overflow &&
2725 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
2726 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
2735 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
2740 auto *OpTy = WO->getRHS()->getType();
2741 auto *NewLHS = WO->getLHS();
2763 const unsigned *exti, *exte, *insi, *inse;
2764 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
2765 exte = EV.
idx_end(), inse =
IV->idx_end();
2766 exti != exte && insi != inse;
2780 if (exti == exte && insi == inse)
2813 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
2816 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
2818 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
2819 STy && STy->containsScalableVectorType())
2827 if (L->isSimple() && L->hasOneUse()) {
2839 L->getPointerOperand(), Indices);
2843 NL->setAAMetadata(L->getAAMetadata());
2850 if (
auto *PN = dyn_cast<PHINode>(Agg))
2867 switch (Personality) {
2896 cast<ArrayType>(
LHS->
getType())->getNumElements()
2898 cast<ArrayType>(
RHS->
getType())->getNumElements();
2910 bool MakeNewInstruction =
false;
2912 bool CleanupFlag =
LI.isCleanup();
2915 for (
unsigned i = 0, e =
LI.getNumClauses(); i != e; ++i) {
2916 bool isLastClause = i + 1 == e;
2917 if (
LI.isCatch(i)) {
2924 if (AlreadyCaught.
insert(TypeInfo).second) {
2929 MakeNewInstruction =
true;
2936 MakeNewInstruction =
true;
2937 CleanupFlag =
false;
2948 assert(
LI.isFilter(i) &&
"Unsupported landingpad clause!");
2956 if (!NumTypeInfos) {
2959 MakeNewInstruction =
true;
2960 CleanupFlag =
false;
2964 bool MakeNewFilter =
false;
2966 if (isa<ConstantAggregateZero>(FilterClause)) {
2968 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
2974 MakeNewInstruction =
true;
2981 if (NumTypeInfos > 1)
2982 MakeNewFilter =
true;
2986 NewFilterElts.
reserve(NumTypeInfos);
2991 bool SawCatchAll =
false;
2992 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
3020 if (SeenInFilter.
insert(TypeInfo).second)
3021 NewFilterElts.
push_back(cast<Constant>(Elt));
3026 MakeNewInstruction =
true;
3031 if (NewFilterElts.
size() < NumTypeInfos)
3032 MakeNewFilter =
true;
3034 if (MakeNewFilter) {
3036 NewFilterElts.
size());
3038 MakeNewInstruction =
true;
3047 if (MakeNewFilter && !NewFilterElts.
size()) {
3048 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
3049 CleanupFlag =
false;
3060 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
3063 for (j = i; j != e; ++j)
3064 if (!isa<ArrayType>(NewClauses[j]->
getType()))
3070 for (
unsigned k = i; k + 1 < j; ++k)
3074 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
3076 MakeNewInstruction =
true;
3095 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
3105 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
3106 Value *LFilter = NewClauses[j];
3117 NewClauses.
erase(J);
3118 MakeNewInstruction =
true;
3128 if (isa<ConstantAggregateZero>(LFilter)) {
3131 if (isa<ConstantAggregateZero>(
Filter)) {
3132 assert(FElts <= LElts &&
"Should have handled this case earlier!");
3134 NewClauses.
erase(J);
3135 MakeNewInstruction =
true;
3141 if (isa<ConstantAggregateZero>(
Filter)) {
3144 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
3145 for (
unsigned l = 0; l != LElts; ++l)
3148 NewClauses.
erase(J);
3149 MakeNewInstruction =
true;
3160 bool AllFound =
true;
3161 for (
unsigned f = 0; f != FElts; ++f) {
3164 for (
unsigned l = 0; l != LElts; ++l) {
3166 if (LTypeInfo == FTypeInfo) {
3176 NewClauses.
erase(J);
3177 MakeNewInstruction =
true;
3185 if (MakeNewInstruction) {
3188 for (
unsigned i = 0, e = NewClauses.
size(); i != e; ++i)
3193 if (NewClauses.
empty())
3201 if (
LI.isCleanup() != CleanupFlag) {
3202 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
3203 LI.setCleanup(CleanupFlag);
3227 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3232 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3246 Use *MaybePoisonOperand =
nullptr;
3247 for (
Use &U : OrigOpInst->operands()) {
3248 if (isa<MetadataAsValue>(U.get()) ||
3251 if (!MaybePoisonOperand)
3252 MaybePoisonOperand = &U;
3257 OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
3260 if (!MaybePoisonOperand)
3265 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
3267 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3278 Use *StartU =
nullptr;
3296 Value *StartV = StartU->get();
3308 if (!Visited.
insert(V).second)
3311 if (Visited.
size() > 32)
3328 I->dropPoisonGeneratingFlagsAndMetadata();
3330 if (StartNeedsFreeze) {
3342 if (isa<Constant>(Op) || Op->hasOneUse())
3351 if (isa<Argument>(Op)) {
3360 bool Changed =
false;
3361 if (&FI != MoveBefore) {
3366 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
3368 Changed |= Dominates;
3377 for (
auto *U : V->users()) {
3378 if (isa<ShuffleVectorInst>(U))
3387 Value *Op0 =
I.getOperand(0);
3393 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
3416 auto getUndefReplacement = [&
I](
Type *Ty) {
3419 for (
const auto *U :
I.users()) {
3428 else if (BestValue !=
C)
3429 BestValue = NullValue;
3431 assert(BestValue &&
"Must have at least one use");
3446 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
3461 auto *CB = dyn_cast<CallBase>(
I);
3480 for (
const User *U :
I.users()) {
3481 if (Visited.
insert(U).second)
3486 while (!AllocaUsers.
empty()) {
3487 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
3488 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
3489 isa<AddrSpaceCastInst>(UserI)) {
3510 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
3518 if (isa<AllocaInst>(
I))
3526 if (
auto *CI = dyn_cast<CallInst>(
I)) {
3527 if (CI->isConvergent())
3533 if (
I->mayWriteToMemory()) {
3540 if (
I->mayReadFromMemory()) {
3547 E =
I->getParent()->end();
3549 if (Scan->mayWriteToMemory())
3553 I->dropDroppableUses([&](
const Use *U) {
3554 auto *
I = dyn_cast<Instruction>(U->getUser());
3555 if (
I &&
I->getParent() != DestBlock) {
3565 I->moveBefore(&*InsertPos);
3579 if (DVI->getParent() == SrcBlock)
3582 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
3586 for (
auto *
User : DbgUsersToSink) {
3591 if (isa<DbgDeclareInst>(
User))
3596 User->getDebugLoc()->getInlinedAt());
3598 if (!SunkVariables.
insert(DbgUserVariable).second)
3603 if (isa<DbgAssignIntrinsic>(
User))
3606 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
3607 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
3608 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
3613 if (!DIIClones.empty()) {
3618 DIIClone->insertBefore(&*InsertPos);
3645 if (
I ==
nullptr)
continue;
3660 auto getOptionalSinkBlockForInst =
3661 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
3663 return std::nullopt;
3667 unsigned NumUsers = 0;
3669 for (
auto *U :
I->users()) {
3670 if (U->isDroppable())
3673 return std::nullopt;
3677 if (
PHINode *PN = dyn_cast<PHINode>(UserInst)) {
3678 for (
unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
3679 if (PN->getIncomingValue(i) ==
I) {
3683 if (UserParent && UserParent != PN->getIncomingBlock(i))
3684 return std::nullopt;
3685 UserParent = PN->getIncomingBlock(i);
3688 assert(UserParent &&
"expected to find user block!");
3690 if (UserParent && UserParent != UserInst->
getParent())
3691 return std::nullopt;
3697 if (NumUsers == 0) {
3701 return std::nullopt;
3713 return std::nullopt;
3723 return std::nullopt;
3728 auto OptBB = getOptionalSinkBlockForInst(
I);
3730 auto *UserParent = *OptBB;
3738 for (
Use &U :
I->operands())
3739 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
3747 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
3760 <<
" New = " << *Result <<
'\n');
3762 Result->copyMetadata(*
I,
3763 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
3765 I->replaceAllUsesWith(Result);
3768 Result->takeName(
I);
3775 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
3777 if (isa<PHINode>(
I))
3783 Result->insertInto(InstParent, InsertPos);
3792 <<
" New = " << *
I <<
'\n');
3824 if (!
I->hasMetadataOtherThanDebugLoc())
3827 auto Track = [](
Metadata *ScopeList,
auto &Container) {
3828 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
3829 if (!MDScopeList || !Container.insert(MDScopeList).second)
3831 for (
const auto &
MDOperand : MDScopeList->operands())
3832 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
3833 Container.insert(MDScope);
3836 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
3837 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
3846 "llvm.experimental.noalias.scope.decl in use ?");
3849 "llvm.experimental.noalias.scope should refer to a single scope");
3851 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
3852 return !UsedAliasScopesAndLists.
contains(MD) ||
3853 !UsedNoAliasScopesAndLists.
contains(MD);
3871 bool MadeIRChange =
false;
3884 if (!Visited.
insert(BB).second)
3889 if (!Inst.use_empty() &&
3890 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
3894 Inst.replaceAllUsesWith(
C);
3897 Inst.eraseFromParent();
3898 MadeIRChange =
true;
3903 for (
Use &U : Inst.operands()) {
3904 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
3907 auto *
C = cast<Constant>(U);
3908 Constant *&FoldRes = FoldedConstants[
C];
3914 <<
"\n Old = " << *
C
3915 <<
"\n New = " << *FoldRes <<
'\n');
3917 MadeIRChange =
true;
3924 if (!Inst.isDebugOrPseudoInst()) {
3925 InstrsForInstructionWorklist.
push_back(&Inst);
3926 SeenAliasScopes.
analyse(&Inst);
3934 if (isa<UndefValue>(BI->getCondition()))
3937 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
3938 bool CondVal =
Cond->getZExtValue();
3939 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
3943 }
else if (
SwitchInst *
SI = dyn_cast<SwitchInst>(TI)) {
3944 if (isa<UndefValue>(
SI->getCondition()))
3947 if (
auto *
Cond = dyn_cast<ConstantInt>(
SI->getCondition())) {
3954 }
while (!Worklist.
empty());
3960 if (Visited.
count(&BB))
3963 unsigned NumDeadInstInBB;
3964 unsigned NumDeadDbgInstInBB;
3965 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
3968 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
3969 NumDeadInst += NumDeadInstInBB;
3977 ICWorklist.
reserve(InstrsForInstructionWorklist.
size());
3986 Inst->eraseFromParent();
3987 MadeIRChange =
true;
3991 ICWorklist.
push(Inst);
3994 return MadeIRChange;
4002 auto &
DL =
F.getParent()->getDataLayout();
4010 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
4016 bool MadeIRChange =
false;
4021 unsigned Iteration = 0;
4023 ++NumWorklistIterations;
4028 "Instruction Combining seems stuck in an infinite loop after " +
4032 if (Iteration > MaxIterations) {
4033 LLVM_DEBUG(
dbgs() <<
"\n\n[IC] Iteration limit #" << MaxIterations
4034 <<
" on " <<
F.getName()
4035 <<
" reached; stopping before reaching a fixpoint\n");
4039 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
4040 <<
F.getName() <<
"\n");
4045 ORE, BFI, PSI,
DL, LI);
4051 MadeIRChange =
true;
4054 return MadeIRChange;
4062 OS, MapClassName2PassName);
4065 OS << (Options.
UseLoopInfo ?
"" :
"no-") <<
"use-loop-info";
4087 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
4122 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4123 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
4124 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
4125 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
4126 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4127 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4130 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4131 auto *LI = LIWP ? &LIWP->getLoopInfo() :
nullptr;
4133 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4136 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4140 BFI, PSI, MaxIterations, LI);
4156 "Combine redundant instructions",
false,
false)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL, const TargetLibraryInfo *TLI, InstructionWorklist &ICWorklist)
Populate the IC worklist from a function, by walking it in depth-first order and adding all reachable...
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool handlePotentiallyDeadBlock(BasicBlock *BB, InstCombiner &IC)
If a block is dead due to a known branch condition, remove instructions in it.
static cl::opt< unsigned > InfiniteLoopDetectionThreshold("instcombine-infinite-loop-threshold", cl::desc("Number of instruction combining iterations considered an " "infinite loop"), cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden)
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static bool handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc, InstCombiner &IC)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Constant * constantFoldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2)
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
print must be executed print the must be executed context for all instructions
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
APInt trunc(unsigned width) const
Truncate to new width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
uint64_t getNumElements() const
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_ULT
unsigned less than
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This class represents a range of values.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
This is the common base class for debug info intrinsics for variables.
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
static bool isTargetIntrinsic(Intrinsic::ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")