44#define DEBUG_TYPE "instcombine" 
   50          "Number of aggregate reconstructions turned into reuse of the " 
   51          "original aggregate");
 
   63    return CEI || 
C->getSplatValue();
 
 
  102  SmallVector<Instruction *, 2> Extracts;
 
  108  for (
auto *U : PN->
users()) {
 
  114    } 
else if (!PHIUser) {
 
  142    if (PHIInVal == PHIUser) {
 
  147      unsigned opId = (B0->
getOperand(0) == PN) ? 1 : 0;
 
  174  for (
auto *
E : Extracts) {
 
  190  ElementCount NumElts =
 
  194  bool IsBigEndian = 
DL.isBigEndian();
 
  198  if (
X->getType()->isIntegerTy()) {
 
  200           "Expected fixed vector type for bitcast from scalar integer");
 
  207    unsigned ShiftAmountC = ExtIndexC * DestWidth;
 
  208    if ((!ShiftAmountC ||
 
  209         isDesirableIntType(
X->getType()->getPrimitiveSizeInBits())) &&
 
  210        Ext.getVectorOperand()->hasOneUse()) {
 
  212        X = 
Builder.CreateLShr(
X, ShiftAmountC, 
"extelt.offset");
 
  216        return new BitCastInst(Trunc, DestTy);
 
  218      return new TruncInst(
X, DestTy);
 
  222  if (!
X->getType()->isVectorTy())
 
  229  ElementCount NumSrcElts = SrcTy->getElementCount();
 
  230  if (NumSrcElts == NumElts)
 
  232      return new BitCastInst(Elt, DestTy);
 
  235         "Src and Dst must be the same sort of vector type");
 
  251    unsigned NarrowingRatio =
 
  254    if (ExtIndexC / NarrowingRatio != InsIndexC) {
 
  260      if (
X->hasOneUse() && 
Ext.getVectorOperand()->hasOneUse()) {
 
  278    unsigned Chunk = ExtIndexC % NarrowingRatio;
 
  280      Chunk = NarrowingRatio - 1 - Chunk;
 
  285    bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy();
 
  287    if (NeedSrcBitcast && NeedDestBitcast)
 
  290    unsigned SrcWidth = SrcTy->getScalarSizeInBits();
 
  291    unsigned ShAmt = Chunk * DestWidth;
 
  296    if (!
X->hasOneUse() || !
Ext.getVectorOperand()->hasOneUse())
 
  297      if (NeedSrcBitcast || NeedDestBitcast)
 
  300    if (NeedSrcBitcast) {
 
  307      if (!
Ext.getVectorOperand()->hasOneUse())
 
  312    if (NeedDestBitcast) {
 
  314      return new BitCastInst(
Builder.CreateTrunc(Scalar, DestIntTy), DestTy);
 
  316    return new TruncInst(Scalar, DestTy);
 
  325                                         APInt &UnionUsedElts) {
 
  329  case Instruction::ExtractElement: {
 
  333    if (EEIIndexC && EEIIndexC->
getValue().
ult(VWidth)) {
 
  339  case Instruction::ShuffleVector: {
 
  341    unsigned MaskNumElts =
 
  346      if (MaskVal == -1u || MaskVal >= 2 * VWidth)
 
  348      if (Shuffle->
getOperand(0) == V && (MaskVal < VWidth))
 
  349        UnionUsedElts.
setBit(MaskVal);
 
  351          ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))
 
  352        UnionUsedElts.
setBit(MaskVal - VWidth);
 
 
  370  APInt UnionUsedElts(VWidth, 0);
 
  371  for (
const Use &U : V->
uses()) {
 
  384  return UnionUsedElts;
 
 
  403                                            SQ.getWithInstruction(&EI)))
 
  415    if (
SI->getCondition()->getType()->isIntegerTy() &&
 
  423  bool HasKnownValidIndex = 
false;
 
  430    unsigned NumElts = EC.getKnownMinValue();
 
  431    HasKnownValidIndex = IndexC->getValue().ult(NumElts);
 
  437      if (IID == Intrinsic::stepvector && IndexC->getValue().ult(NumElts)) {
 
  439        unsigned BitWidth = Ty->getIntegerBitWidth();
 
  443        if (IndexC->getValue().getActiveBits() <= 
BitWidth)
 
  444          Idx = ConstantInt::get(Ty, IndexC->getValue().zextOrTrunc(
BitWidth));
 
  453    if (!EC.isScalable() && IndexC->getValue().uge(NumElts))
 
  462      if (
Instruction *ScalarPHI = scalarizePHI(EI, Phi))
 
  487      (HasKnownValidIndex ||
 
  518      uint64_t IdxVal = IndexC ? IndexC->getZExtValue() : 0;
 
  519      if (IndexC && IdxVal < EC.getKnownMinValue() && 
GEP->hasOneUse()) {
 
  530              return isa<VectorType>(V->getType());
 
  532        if (VectorOps == 1) {
 
  533          Value *NewPtr = 
GEP->getPointerOperand();
 
  535            NewPtr = 
Builder.CreateExtractElement(NewPtr, IndexC);
 
  538          for (
unsigned I = 1; 
I != 
GEP->getNumOperands(); ++
I) {
 
  547              GEP->getSourceElementType(), NewPtr, NewOps);
 
  561        std::optional<int> SrcIdx;
 
  563        if (SplatIndex != -1)
 
  566          SrcIdx = SVI->getMaskValue(CI->getZExtValue());
 
  576          if (*SrcIdx < (
int)LHSWidth)
 
  577            Src = SVI->getOperand(0);
 
  580            Src = SVI->getOperand(1);
 
  584              Src, ConstantInt::get(Int64Ty, *SrcIdx, 
false));
 
  591      if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
 
  592        Value *EE = 
Builder.CreateExtractElement(CI->getOperand(0), Index);
 
  603    unsigned NumElts = EC.getKnownMinValue();
 
  607    if (!EC.isScalable() && NumElts != 1) {
 
  611        APInt PoisonElts(NumElts, 0);
 
  612        APInt DemandedElts(NumElts, 0);
 
  613        DemandedElts.
setBit(IndexC->getZExtValue());
 
  622          APInt PoisonElts(NumElts, 0);
 
  624                  SrcVec, DemandedElts, PoisonElts, 0 ,
 
 
  644         "Invalid CollectSingleShuffleElements");
 
  648    Mask.assign(NumElts, -1);
 
  653    for (
unsigned i = 0; i != NumElts; ++i)
 
  659    for (
unsigned i = 0; i != NumElts; ++i)
 
  660      Mask.push_back(i + NumElts);
 
  666    Value *VecOp    = IEI->getOperand(0);
 
  667    Value *ScalarOp = IEI->getOperand(1);
 
  668    Value *IdxOp    = IEI->getOperand(2);
 
  679        Mask[InsertedIdx] = -1;
 
  684        unsigned ExtractedIdx =
 
  686        unsigned NumLHSElts =
 
  696              Mask[InsertedIdx % NumElts] = ExtractedIdx;
 
  699              Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts;
 
 
  719  unsigned NumInsElts = InsVecType->getNumElements();
 
  720  unsigned NumExtElts = ExtVecType->getNumElements();
 
  723  if (InsVecType->getElementType() != ExtVecType->getElementType() ||
 
  724      NumExtElts >= NumInsElts)
 
  737  for (
unsigned i = 0; i < NumExtElts; ++i)
 
  739  for (
unsigned i = NumExtElts; i < NumInsElts; ++i)
 
  756  if (InsertionBlock != InsElt->
getParent())
 
  774    WideVec->insertAfter(ExtVecOpInst->getIterator());
 
  782    if (!OldExt || OldExt->
getParent() != WideVec->getParent())
 
 
  808  assert(V->getType()->isVectorTy() && 
"Invalid shuffle!");
 
  812    Mask.assign(NumElts, -1);
 
  813    return std::make_pair(
 
  818    Mask.assign(NumElts, 0);
 
  819    return std::make_pair(V, 
nullptr);
 
  824    Value *VecOp    = IEI->getOperand(0);
 
  825    Value *ScalarOp = IEI->getOperand(1);
 
  826    Value *IdxOp    = IEI->getOperand(2);
 
  830        unsigned ExtractedIdx =
 
  836        if (EI->
getOperand(0) == PermittedRHS || PermittedRHS == 
nullptr) {
 
  839          assert(LR.second == 
nullptr || LR.second == 
RHS);
 
  841          if (LR.first->getType() != 
RHS->getType()) {
 
  849            for (
unsigned i = 0; i < NumElts; ++i)
 
  851            return std::make_pair(V, 
nullptr);
 
  854          unsigned NumLHSElts =
 
  856          Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx;
 
  857          return std::make_pair(LR.first, 
RHS);
 
  860        if (VecOp == PermittedRHS) {
 
  863          unsigned NumLHSElts =
 
  866          for (
unsigned i = 0; i != NumElts; ++i)
 
  867            Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i);
 
  868          return std::make_pair(EI->
getOperand(0), PermittedRHS);
 
  876          return std::make_pair(EI->
getOperand(0), PermittedRHS);
 
  882  for (
unsigned i = 0; i != NumElts; ++i)
 
  884  return std::make_pair(V, 
nullptr);
 
 
  910  assert(NumAggElts > 0 && 
"Aggregate should have elements.");
 
  914  static constexpr auto NotFound = std::nullopt;
 
  915  static constexpr auto FoundMismatch = 
nullptr;
 
  922  auto KnowAllElts = [&AggElts]() {
 
  930  static const int DepthLimit = 2 * NumAggElts;
 
  935       Depth < DepthLimit && CurrIVI && !KnowAllElts();
 
  938    auto *InsertedValue =
 
  946    if (Indices.
size() != 1)
 
  952    std::optional<Instruction *> &Elt = AggElts[Indices.
front()];
 
  953    Elt = Elt.value_or(InsertedValue);
 
  966  enum class AggregateDescription {
 
  982  auto Describe = [](std::optional<Value *> SourceAggregate) {
 
  983    if (SourceAggregate == NotFound)
 
  984      return AggregateDescription::NotFound;
 
  985    if (*SourceAggregate == FoundMismatch)
 
  986      return AggregateDescription::FoundMismatch;
 
  987    return AggregateDescription::Found;
 
  991  bool EltDefinedInUseBB = 
false;
 
  998  auto FindSourceAggregate =
 
  999      [&](
Instruction *Elt, 
unsigned EltIdx, std::optional<BasicBlock *> UseBB,
 
 1000          std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
 
 1002    if (UseBB && PredBB) {
 
 1005        EltDefinedInUseBB = 
true;
 
 1014    Value *SourceAggregate = EVI->getAggregateOperand();
 
 1017    if (SourceAggregate->
getType() != AggTy)
 
 1018      return FoundMismatch;
 
 1020    if (EVI->getNumIndices() != 1 || EltIdx != EVI->getIndices().front())
 
 1021      return FoundMismatch;
 
 1023    return SourceAggregate; 
 
 1029  auto FindCommonSourceAggregate =
 
 1030      [&](std::optional<BasicBlock *> UseBB,
 
 1031          std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
 
 1032    std::optional<Value *> SourceAggregate;
 
 1035      assert(Describe(SourceAggregate) != AggregateDescription::FoundMismatch &&
 
 1036             "We don't store nullptr in SourceAggregate!");
 
 1037      assert((Describe(SourceAggregate) == AggregateDescription::Found) ==
 
 1039             "SourceAggregate should be valid after the first element,");
 
 1044      std::optional<Value *> SourceAggregateForElement =
 
 1045          FindSourceAggregate(*
I.value(), 
I.index(), UseBB, PredBB);
 
 1052      if (Describe(SourceAggregateForElement) != AggregateDescription::Found)
 
 1053        return SourceAggregateForElement;
 
 1057      switch (Describe(SourceAggregate)) {
 
 1058      case AggregateDescription::NotFound:
 
 1060        SourceAggregate = SourceAggregateForElement; 
 
 1062      case AggregateDescription::Found:
 
 1065        if (*SourceAggregateForElement != *SourceAggregate)
 
 1066          return FoundMismatch;
 
 1068      case AggregateDescription::FoundMismatch:
 
 1073    assert(Describe(SourceAggregate) == AggregateDescription::Found &&
 
 1074           "Must be a valid Value");
 
 1075    return *SourceAggregate;
 
 1078  std::optional<Value *> SourceAggregate;
 
 1081  SourceAggregate = FindCommonSourceAggregate(std::nullopt,
 
 1083  if (Describe(SourceAggregate) != AggregateDescription::NotFound) {
 
 1084    if (Describe(SourceAggregate) == AggregateDescription::FoundMismatch)
 
 1086    ++NumAggregateReconstructionsSimplified;
 
 1099  for (
const std::optional<Instruction *> &
I : AggElts) {
 
 1123  static const int PredCountLimit = 64;
 
 1130    if (Preds.
size() >= PredCountLimit) 
 
 1139  bool FoundSrcAgg = 
false;
 
 1141    std::pair<
decltype(SourceAggregates)
::iterator, 
bool> 
IV =
 
 1150    SourceAggregate = FindCommonSourceAggregate(UseBB, Pred);
 
 1151    if (Describe(SourceAggregate) == AggregateDescription::Found) {
 
 1153      IV.first->second = *SourceAggregate;
 
 1158      if (!BI || !BI->isUnconditional())
 
 1168  for (
auto &It : SourceAggregates) {
 
 1169    if (Describe(It.second) == AggregateDescription::Found)
 
 1173    if (EltDefinedInUseBB)
 
 1181    if (UseBB != OrigBB)
 
 1186    bool ConstAgg = 
true;
 
 1187    for (
auto Val : AggElts) {
 
 1200  for (
auto &It : SourceAggregates) {
 
 1201    if (Describe(It.second) == AggregateDescription::Found)
 
 1205    Builder.SetInsertPoint(Pred->getTerminator());
 
 1207    for (
auto [Idx, Val] : 
enumerate(AggElts)) {
 
 1209      V = 
Builder.CreateInsertValue(V, Elt, Idx);
 
 1221  Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
 
 1225    PHI->addIncoming(SourceAggregates[Pred], Pred);
 
 1227  ++NumAggregateReconstructionsSimplified;
 
 
 1240          I.getAggregateOperand(), 
I.getInsertedValueOperand(), 
I.getIndices(),
 
 1241          SQ.getWithInstruction(&
I)))
 
 1244  bool IsRedundant = 
false;
 
 1253  while (V->hasOneUse() && 
Depth < 10) {
 
 1256    if (!UserInsInst || U->getOperand(0) != V)
 
 1258    if (UserInsInst->getIndices() == FirstIndices) {
 
 
 1286  if (MaskSize != VecSize)
 
 1291  for (
int i = 0; i != MaskSize; ++i) {
 
 1293    if (Elt != -1 && Elt != i && Elt != i + VecSize)
 
 
 1318  if (NumElements == 1)
 
 1330    if (!Idx || CurrIE->
getOperand(1) != SplatVal)
 
 1337    if (CurrIE != &InsElt &&
 
 1338        (!CurrIE->
hasOneUse() && (NextIE != 
nullptr || !Idx->isZero())))
 
 1341    ElementPresent[Idx->getZExtValue()] = 
true;
 
 1347  if (FirstIE == &InsElt)
 
 1355    if (!ElementPresent.
all())
 
 1361  Constant *Zero = ConstantInt::get(Int64Ty, 0);
 
 1368  for (
unsigned i = 0; i != NumElements; ++i)
 
 1369    if (!ElementPresent[i])
 
 
 1380  if (!Shuf || !Shuf->isZeroEltSplat())
 
 1395  Value *Op0 = Shuf->getOperand(0);
 
 1403  unsigned NumMaskElts =
 
 1406  for (
unsigned i = 0; i != NumMaskElts; ++i)
 
 1407    NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i);
 
 
 1418      !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
 
 1434  Value *
X = Shuf->getOperand(0);
 
 1442  unsigned NumMaskElts =
 
 1446  for (
unsigned i = 0; i != NumMaskElts; ++i) {
 
 1449      NewMask[i] = OldMask[i];
 
 1450    } 
else if (OldMask[i] == (
int)IdxC) {
 
 1456             "Unexpected shuffle mask element for identity shuffle");
 
 
 1476  if (!InsElt1 || !InsElt1->hasOneUse())
 
 1487    Value *NewInsElt1 = Builder.CreateInsertElement(
X, ScalarC, IdxC2);
 
 
 1500  if (!Inst || !Inst->hasOneUse())
 
 1505    Constant *ShufConstVec, *InsEltScalar;
 
 1529    unsigned NumElts = Mask.size();
 
 1532    for (
unsigned I = 0; 
I != NumElts; ++
I) {
 
 1533      if (
I == InsEltIndex) {
 
 1534        NewShufElts[
I] = InsEltScalar;
 
 1535        NewMaskElts[
I] = InsEltIndex + NumElts;
 
 1539        NewMaskElts[
I] = Mask[
I];
 
 1543      if (!NewShufElts[
I])
 
 1570    auto ValI = std::begin(Val);
 
 1577        Mask[
I] = NumElts + 
I;
 
 1582    for (
unsigned I = 0; 
I < NumElts; ++
I) {
 
 
 1614    CastOpcode = Instruction::FPExt;
 
 1616    CastOpcode = Instruction::SExt;
 
 1618    CastOpcode = Instruction::ZExt;
 
 1623  if (
X->getType()->getScalarType() != 
Y->getType())
 
 
 1651  Value *Scalar0, *BaseVec;
 
 1653  if (!VTy || (VTy->getNumElements() & 1) ||
 
 1662  if (Index0 + 1 != Index1 || Index0 & 1)
 
 1679  Type *SrcTy = 
X->getType();
 
 1680  unsigned ScalarWidth = SrcTy->getScalarSizeInBits();
 
 1681  unsigned VecEltWidth = VTy->getScalarSizeInBits();
 
 1682  if (ScalarWidth != VecEltWidth * 2 || ShAmt != VecEltWidth)
 
 1687  Value *CastBaseVec = Builder.CreateBitCast(BaseVec, CastTy);
 
 1691  uint64_t NewIndex = IsBigEndian ? Index1 / 2 : Index0 / 2;
 
 1692  Value *NewInsert = Builder.CreateInsertElement(CastBaseVec, 
X, NewIndex);
 
 
 1697  Value *VecOp    = IE.getOperand(0);
 
 1698  Value *ScalarOp = IE.getOperand(1);
 
 1699  Value *IdxOp    = IE.getOperand(2);
 
 1702          VecOp, ScalarOp, IdxOp, 
SQ.getWithInstruction(&IE)))
 
 1710    Value *BaseVec, *OtherScalar;
 
 1715        !
isa<Constant>(OtherScalar) && OtherIndexVal > IndexC->getZExtValue()) {
 
 1716      Value *NewIns = 
Builder.CreateInsertElement(BaseVec, ScalarOp, IdxOp);
 
 1718                                       Builder.getInt64(OtherIndexVal));
 
 1736    Value *NewInsElt = 
Builder.CreateInsertElement(NewUndef, ScalarSrc, IdxOp);
 
 1751    Value *NewInsElt = 
Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp);
 
 1759  uint64_t InsertedIdx, ExtractedIdx;
 
 1783      if (!Insert.hasOneUse())
 
 1792    if (isShuffleRootCandidate(IE)) {
 
 1803        if (LR.first != &IE && LR.second != &IE) {
 
 1805          if (LR.second == 
nullptr)
 
 1814    unsigned VWidth = VecTy->getNumElements();
 
 1815    APInt PoisonElts(VWidth, 0);
 
 1838    return IdentityShuf;
 
 
 1852                                unsigned Depth = 5) {
 
 1859  if (!
I) 
return false;
 
 1862  if (!
I->hasOneUse())
 
 1865  if (
Depth == 0) 
return false;
 
 1867  switch (
I->getOpcode()) {
 
 1868    case Instruction::UDiv:
 
 1869    case Instruction::SDiv:
 
 1870    case Instruction::URem:
 
 1871    case Instruction::SRem:
 
 1878    case Instruction::Add:
 
 1879    case Instruction::FAdd:
 
 1880    case Instruction::Sub:
 
 1881    case Instruction::FSub:
 
 1882    case Instruction::Mul:
 
 1883    case Instruction::FMul:
 
 1884    case Instruction::FDiv:
 
 1885    case Instruction::FRem:
 
 1886    case Instruction::Shl:
 
 1887    case Instruction::LShr:
 
 1888    case Instruction::AShr:
 
 1889    case Instruction::And:
 
 1890    case Instruction::Or:
 
 1891    case Instruction::Xor:
 
 1892    case Instruction::ICmp:
 
 1893    case Instruction::FCmp:
 
 1894    case Instruction::Trunc:
 
 1895    case Instruction::ZExt:
 
 1896    case Instruction::SExt:
 
 1897    case Instruction::FPToUI:
 
 1898    case Instruction::FPToSI:
 
 1899    case Instruction::UIToFP:
 
 1900    case Instruction::SIToFP:
 
 1901    case Instruction::FPTrunc:
 
 1902    case Instruction::FPExt:
 
 1903    case Instruction::GetElementPtr: {
 
 1906      Type *ITy = 
I->getType();
 
 1910      for (
Value *Operand : 
I->operands()) {
 
 1916    case Instruction::InsertElement: {
 
 1918      if (!CI) 
return false;
 
 1923      bool SeenOnce = 
false;
 
 1924      for (
int I : Mask) {
 
 1925        if (
I == ElementNumber) {
 
 
 1941  Builder.SetInsertPoint(
I);
 
 1942  switch (
I->getOpcode()) {
 
 1943    case Instruction::Add:
 
 1944    case Instruction::FAdd:
 
 1945    case Instruction::Sub:
 
 1946    case Instruction::FSub:
 
 1947    case Instruction::Mul:
 
 1948    case Instruction::FMul:
 
 1949    case Instruction::UDiv:
 
 1950    case Instruction::SDiv:
 
 1951    case Instruction::FDiv:
 
 1952    case Instruction::URem:
 
 1953    case Instruction::SRem:
 
 1954    case Instruction::FRem:
 
 1955    case Instruction::Shl:
 
 1956    case Instruction::LShr:
 
 1957    case Instruction::AShr:
 
 1958    case Instruction::And:
 
 1959    case Instruction::Or:
 
 1960    case Instruction::Xor: {
 
 1962      assert(NewOps.
size() == 2 && 
"binary operator with #ops != 2");
 
 1964                                       NewOps[0], NewOps[1]);
 
 1971          NewI->setIsExact(BO->
isExact());
 
 1974          NewI->copyFastMathFlags(
I);
 
 1978    case Instruction::ICmp:
 
 1979      assert(NewOps.
size() == 2 && 
"icmp with #ops != 2");
 
 1980      return Builder.CreateICmp(
cast<ICmpInst>(
I)->getPredicate(), NewOps[0],
 
 1982    case Instruction::FCmp:
 
 1983      assert(NewOps.
size() == 2 && 
"fcmp with #ops != 2");
 
 1984      return Builder.CreateFCmp(
cast<FCmpInst>(
I)->getPredicate(), NewOps[0],
 
 1986    case Instruction::Trunc:
 
 1987    case Instruction::ZExt:
 
 1988    case Instruction::SExt:
 
 1989    case Instruction::FPToUI:
 
 1990    case Instruction::FPToSI:
 
 1991    case Instruction::UIToFP:
 
 1992    case Instruction::SIToFP:
 
 1993    case Instruction::FPTrunc:
 
 1994    case Instruction::FPExt: {
 
 1998          I->getType()->getScalarType(),
 
 2000      assert(NewOps.
size() == 1 && 
"cast with #ops != 1");
 
 2004    case Instruction::GetElementPtr: {
 
 
 2019  assert(V->getType()->isVectorTy() && 
"can't reorder non-vector elements");
 
 2036  switch (
I->getOpcode()) {
 
 2037    case Instruction::Add:
 
 2038    case Instruction::FAdd:
 
 2039    case Instruction::Sub:
 
 2040    case Instruction::FSub:
 
 2041    case Instruction::Mul:
 
 2042    case Instruction::FMul:
 
 2043    case Instruction::UDiv:
 
 2044    case Instruction::SDiv:
 
 2045    case Instruction::FDiv:
 
 2046    case Instruction::URem:
 
 2047    case Instruction::SRem:
 
 2048    case Instruction::FRem:
 
 2049    case Instruction::Shl:
 
 2050    case Instruction::LShr:
 
 2051    case Instruction::AShr:
 
 2052    case Instruction::And:
 
 2053    case Instruction::Or:
 
 2054    case Instruction::Xor:
 
 2055    case Instruction::ICmp:
 
 2056    case Instruction::FCmp:
 
 2057    case Instruction::Trunc:
 
 2058    case Instruction::ZExt:
 
 2059    case Instruction::SExt:
 
 2060    case Instruction::FPToUI:
 
 2061    case Instruction::FPToSI:
 
 2062    case Instruction::UIToFP:
 
 2063    case Instruction::SIToFP:
 
 2064    case Instruction::FPTrunc:
 
 2065    case Instruction::FPExt:
 
 2066    case Instruction::Select:
 
 2067    case Instruction::GetElementPtr: {
 
 2072      for (
int i = 0, e = 
I->getNumOperands(); i != e; ++i) {
 
 2077        if (
I->getOperand(i)->getType()->isVectorTy())
 
 2080          V = 
I->getOperand(i);
 
 2082        NeedsRebuild |= (V != 
I->getOperand(i));
 
 2088    case Instruction::InsertElement: {
 
 2096      for (
int e = Mask.size(); Index != e; ++Index) {
 
 2097        if (Mask[Index] == Element) {
 
 2110      Builder.SetInsertPoint(
I);
 
 2111      return Builder.CreateInsertElement(V, 
I->getOperand(1), Index);
 
 
 2127  unsigned MaskElems = Mask.size();
 
 2128  unsigned BegIdx = Mask.front();
 
 2129  unsigned EndIdx = Mask.back();
 
 2130  if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)
 
 2132  for (
unsigned I = 0; 
I != MaskElems; ++
I)
 
 2133    if (
static_cast<unsigned>(Mask[
I]) != BegIdx + 
I)
 
 
 2158  case Instruction::Shl: {
 
 2163          Instruction::Shl, ConstantInt::get(Ty, 1), 
C, 
DL);
 
 2164      assert(ShlOne && 
"Constant folding of immediate constants failed");
 
 2165      return {Instruction::Mul, BO0, ShlOne};
 
 2169  case Instruction::Or: {
 
 2172      return {Instruction::Add, BO0, BO1};
 
 2175  case Instruction::Sub:
 
 
 2190  assert(Shuf.
isSelect() && 
"Must have select-equivalent shuffle");
 
 2195  unsigned NumElts = Mask.size();
 
 2199  if (ShufOp && ShufOp->isSelect() &&
 
 2200      (ShufOp->getOperand(0) == Op1 || ShufOp->getOperand(1) == Op1)) {
 
 2206  if (!ShufOp || !ShufOp->isSelect() ||
 
 2207      (ShufOp->getOperand(0) != Op0 && ShufOp->getOperand(1) != Op0))
 
 2210  Value *
X = ShufOp->getOperand(0), *
Y = ShufOp->getOperand(1);
 
 2212  ShufOp->getShuffleMask(Mask1);
 
 2213  assert(Mask1.
size() == NumElts && 
"Vector size changed with select shuffle");
 
 2226  for (
unsigned i = 0; i != NumElts; ++i)
 
 2227    NewMask[i] = Mask[i] < (
signed)NumElts ? Mask[i] : Mask1[i];
 
 2232         "Unexpected shuffle mask");
 
 
 2238  assert(Shuf.
isSelect() && 
"Must have select-equivalent shuffle");
 
 2261  Value *
X = Op0IsBinop ? Op1 : Op0;
 
 2282  bool MightCreatePoisonOrUB =
 
 2285  if (MightCreatePoisonOrUB)
 
 
 2320  Value *NewIns = Builder.CreateInsertElement(PoisonVec, 
X, (
uint64_t)0);
 
 2326  unsigned NumMaskElts =
 
 2329  for (
unsigned i = 0; i != NumMaskElts; ++i)
 
 2331      NewMask[i] = Mask[i];
 
 
 2368  Constant *C0 = 
nullptr, *C1 = 
nullptr;
 
 2369  bool ConstantsAreOp1;
 
 2372    ConstantsAreOp1 = 
false;
 
 2377    ConstantsAreOp1 = 
true;
 
 2384  bool DropNSW = 
false;
 
 2385  if (ConstantsAreOp1 && Opc0 != Opc1) {
 
 2389    if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl)
 
 2393      Opc0 = AltB0.Opcode;
 
 2397      Opc1 = AltB1.Opcode;
 
 2402  if (Opc0 != Opc1 || !C0 || !C1)
 
 2415  bool MightCreatePoisonOrUB =
 
 2418  if (MightCreatePoisonOrUB)
 
 2441    if (MightCreatePoisonOrUB && !ConstantsAreOp1)
 
 2451    V = 
Builder.CreateShuffleVector(
X, 
Y, Mask);
 
 2454  Value *NewBO = ConstantsAreOp1 ? 
Builder.CreateBinOp(BOpc, V, NewC) :
 
 2455                                   Builder.CreateBinOp(BOpc, NewC, V);
 
 2463    NewI->copyIRFlags(B0);
 
 2464    NewI->andIRFlags(B1);
 
 2466      NewI->setHasNoSignedWrap(
false);
 
 2468      NewI->dropPoisonGeneratingFlags();
 
 
 2487  Type *SrcType = 
X->getType();
 
 2488  if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() ||
 
 2495         "Expected a shuffle that decreases length");
 
 2502  for (
unsigned i = 0, e = Mask.size(); i != e; ++i) {
 
 2505    uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio;
 
 2506    assert(LSBIndex <= INT32_MAX && 
"Overflowed 32-bits");
 
 2507    if (Mask[i] != (
int)LSBIndex)
 
 
 2533  unsigned NarrowNumElts =
 
 
 2558  bool IsFNeg = S0->getOpcode() == Instruction::FNeg;
 
 2564      S0->getOpcode() != 
S1->getOpcode() ||
 
 2565      (!S0->hasOneUse() && !
S1->hasOneUse()))
 
 2572    NewF = UnaryOperator::CreateFNeg(NewShuf);
 
 
 2593  switch (CastOpcode) {
 
 2594  case Instruction::SExt:
 
 2595  case Instruction::ZExt:
 
 2596  case Instruction::FPToSI:
 
 2597  case Instruction::FPToUI:
 
 2598  case Instruction::SIToFP:
 
 2599  case Instruction::UIToFP:
 
 2610  if (ShufTy->getElementCount().getKnownMinValue() >
 
 2611      ShufOpTy->getElementCount().getKnownMinValue())
 
 2618    auto *NewIns = Builder.CreateShuffleVector(Cast0->getOperand(0),
 
 2626  if (!Cast1 || Cast0->getOpcode() != Cast1->getOpcode() ||
 
 2627      Cast0->getSrcTy() != Cast1->getSrcTy())
 
 2632         "Expected fixed vector operands for casts and binary shuffle");
 
 2633  if (CastSrcTy->getPrimitiveSizeInBits() > ShufOpTy->getPrimitiveSizeInBits())
 
 2637  if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
 
 2641  Value *
X = Cast0->getOperand(0);
 
 2642  Value *
Y = Cast1->getOperand(0);
 
 
 2657      X->getType()->getPrimitiveSizeInBits() ==
 
 2685  assert(NumElts < Mask.size() &&
 
 2686         "Identity with extract must have less elements than its inputs");
 
 2688  for (
unsigned i = 0; i != NumElts; ++i) {
 
 2690    int MaskElt = Mask[i];
 
 2691    NewMask[i] = ExtractMaskElt == 
PoisonMaskElem ? ExtractMaskElt : MaskElt;
 
 
 2704  int NumElts = Mask.size();
 
 2730  if (NumElts != InpNumElts)
 
 2734  auto isShufflingScalarIntoOp1 = [&](
Value *&Scalar, 
ConstantInt *&IndexC) {
 
 2742    int NewInsIndex = -1;
 
 2743    for (
int i = 0; i != NumElts; ++i) {
 
 2749      if (Mask[i] == NumElts + i)
 
 2753      if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue())
 
 2760    assert(NewInsIndex != -1 && 
"Did not fold shuffle with unused operand?");
 
 2763    IndexC = ConstantInt::get(IndexC->getIntegerType(), NewInsIndex);
 
 2772  if (isShufflingScalarIntoOp1(Scalar, IndexC))
 
 2780  if (isShufflingScalarIntoOp1(Scalar, IndexC))
 
 
 2792  if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() ||
 
 2793      !Shuffle1 || !Shuffle1->isIdentityWithPadding())
 
 2801  Value *
X = Shuffle0->getOperand(0);
 
 2802  Value *
Y = Shuffle1->getOperand(0);
 
 2803  if (
X->getType() != 
Y->getType() ||
 
 2812         "Unexpected operand for identity shuffle");
 
 2820  assert(WideElts > NarrowElts && 
"Unexpected types for identity with padding");
 
 2824  for (
int i = 0, e = Mask.size(); i != e; ++i) {
 
 2830    if (Mask[i] < WideElts) {
 
 2831      if (Shuffle0->getMaskValue(Mask[i]) == -1)
 
 2834      if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1)
 
 2841    if (Mask[i] < WideElts) {
 
 2842      assert(Mask[i] < NarrowElts && 
"Unexpected shuffle mask");
 
 2843      NewMask[i] = Mask[i];
 
 2845      assert(Mask[i] < (WideElts + NarrowElts) && 
"Unexpected shuffle mask");
 
 2846      NewMask[i] = Mask[i] - (WideElts - NarrowElts);
 
 
 2868  if (
X->getType() != 
Y->getType())
 
 2877    NewBOI->copyIRFlags(BinOp);
 
 
 2913      X->getType()->isVectorTy() && 
X->getType() == 
Y->getType() &&
 
 2914      X->getType()->getScalarSizeInBits() ==
 
 2916      (LHS->hasOneUse() || RHS->hasOneUse())) {
 
 2931      X->getType()->isVectorTy() && VWidth == LHSWidth) {
 
 2934    unsigned XNumElts = XType->getNumElements();
 
 2940                                              ScaledMask, XType, ShufQuery))
 
 2948           "Shuffle with 2 undef ops not simplified?");
 
 2976  APInt PoisonElts(VWidth, 0);
 
 2998      if (
SI->getCondition()->getType()->isIntegerTy() &&
 
 3045  bool MadeChange = 
false;
 
 3048    unsigned MaskElems = Mask.size();
 
 3050    unsigned VecBitWidth = 
DL.getTypeSizeInBits(SrcTy);
 
 3051    unsigned SrcElemBitWidth = 
DL.getTypeSizeInBits(SrcTy->getElementType());
 
 3052    assert(SrcElemBitWidth && 
"vector elements must have a bitwidth");
 
 3053    unsigned SrcNumElems = SrcTy->getNumElements();
 
 3059        if (BC->use_empty())
 
 3062        if (BC->hasOneUse()) {
 
 3064          if (BC2 && isEliminableCastPair(BC, BC2))
 
 3070      unsigned BegIdx = Mask.front();
 
 3071      Type *TgtTy = BC->getDestTy();
 
 3072      unsigned TgtElemBitWidth = 
DL.getTypeSizeInBits(TgtTy);
 
 3073      if (!TgtElemBitWidth)
 
 3075      unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;
 
 3076      bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;
 
 3077      bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);
 
 3078      if (!VecBitWidthsEqual)
 
 3083      if (!BegIsAligned) {
 
 3087        for (
unsigned I = 0, E = MaskElems, Idx = BegIdx; 
I != E; ++Idx, ++
I)
 
 3088          ShuffleMask[
I] = Idx;
 
 3089        V = 
Builder.CreateShuffleVector(V, ShuffleMask,
 
 3093      unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
 
 3094      assert(SrcElemsPerTgtElem);
 
 3095      BegIdx /= SrcElemsPerTgtElem;
 
 3096      auto [It, Inserted] = NewBCs.
try_emplace(CastSrcTy);
 
 3098        It->second = 
Builder.CreateBitCast(V, CastSrcTy, SVI.
getName() + 
".bc");
 
 3099      auto *Ext = 
Builder.CreateExtractElement(It->second, BegIdx,
 
 3156      LHSShuffle = 
nullptr;
 
 3159      RHSShuffle = 
nullptr;
 
 3160  if (!LHSShuffle && !RHSShuffle)
 
 3161    return MadeChange ? &SVI : 
nullptr;
 
 3163  Value* LHSOp0 = 
nullptr;
 
 3164  Value* LHSOp1 = 
nullptr;
 
 3165  Value* RHSOp0 = 
nullptr;
 
 3166  unsigned LHSOp0Width = 0;
 
 3167  unsigned RHSOp0Width = 0;
 
 3177  Value* newLHS = LHS;
 
 3178  Value* newRHS = RHS;
 
 3186    else if (LHSOp0Width == LHSWidth) {
 
 3191  if (RHSShuffle && RHSOp0Width == LHSWidth) {
 
 3195  if (LHSOp0 == RHSOp0) {
 
 3200  if (newLHS == LHS && newRHS == RHS)
 
 3201    return MadeChange ? &SVI : 
nullptr;
 
 3207  if (RHSShuffle && newRHS != RHS)
 
 3210  unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
 
 3216  for (
unsigned i = 0; i < VWidth; ++i) {
 
 3221    } 
else if (Mask[i] < (
int)LHSWidth) {
 
 3226      if (newLHS != LHS) {
 
 3227        eltMask = LHSMask[Mask[i]];
 
 3243      else if (newRHS != RHS) {
 
 3244        eltMask = RHSMask[Mask[i]-LHSWidth];
 
 3247        if (eltMask >= (
int)RHSOp0Width) {
 
 3249                 "should have been check above");
 
 3253        eltMask = Mask[i]-LHSWidth;
 
 3261      if (eltMask >= 0 && newRHS != 
nullptr && newLHS != newRHS)
 
 3262        eltMask += newLHSWidth;
 
 3267      if (SplatElt >= 0 && SplatElt != eltMask)
 
 3277  if (
isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {
 
 3283  return MadeChange ? &SVI : 
nullptr;
 
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
 
This file implements a class to represent arbitrary precision integral constant values and operations...
 
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
 
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
 
This file contains the declarations for the subclasses of Constant, which represent the different fla...
 
This file defines the DenseMap class.
 
This file provides internal interfaces used to implement the InstCombine.
 
static Instruction * foldConstantInsEltIntoShuffle(InsertElementInst &InsElt)
insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex --> shufflevector X,...
 
static Value * evaluateInDifferentElementOrder(Value *V, ArrayRef< int > Mask, IRBuilderBase &Builder)
 
static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, SmallVectorImpl< int > &Mask)
If V is a shuffle of values that ONLY returns elements from either LHS or RHS, return the shuffle mas...
 
static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl< int > &Mask, Value *PermittedRHS, InstCombinerImpl &IC, bool &Rerun)
 
static APInt findDemandedEltsByAllUsers(Value *V)
Find union of elements of V demanded by all its users.
 
static Instruction * foldTruncInsEltPair(InsertElementInst &InsElt, bool IsBigEndian, InstCombiner::BuilderTy &Builder)
If we are inserting 2 halves of a value into adjacent elements of a vector, try to convert to a singl...
 
static Instruction * foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf, const SimplifyQuery &SQ)
 
static Instruction * foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf)
 
static Instruction * foldIdentityExtractShuffle(ShuffleVectorInst &Shuf)
Try to fold an extract subvector operation.
 
static bool findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr, APInt &UnionUsedElts)
Find elements of V demanded by UserInstr.
 
static Instruction * foldInsEltIntoSplat(InsertElementInst &InsElt)
Try to fold an insert element into an existing splat shuffle by changing the shuffle's mask to includ...
 
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs.
 
static Instruction * foldShuffleWithInsert(ShuffleVectorInst &Shuf, InstCombinerImpl &IC)
Try to replace a shuffle with an insertelement or try to replace a shuffle operand with the operand o...
 
static Instruction * canonicalizeInsertSplat(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)
If we have an insert of a scalar to a non-zero element of an undefined vector and then shuffle that v...
 
static Instruction * foldTruncShuffle(ShuffleVectorInst &Shuf, bool IsBigEndian)
Convert a narrowing shuffle of a bitcasted vector into a vector truncate.
 
static bool replaceExtractElements(InsertElementInst *InsElt, ExtractElementInst *ExtElt, InstCombinerImpl &IC)
If we have insertion into a vector that is wider than the vector that we are extracting from,...
 
static bool cheapToScalarize(Value *V, Value *EI)
Return true if the value is cheaper to scalarize than it is to leave as a vector operation.
 
static Value * buildNew(Instruction *I, ArrayRef< Value * > NewOps, IRBuilderBase &Builder)
Rebuild a new instruction just like 'I' but with the new operands given.
 
static bool canEvaluateShuffled(Value *V, ArrayRef< int > Mask, unsigned Depth=5)
Return true if we can evaluate the specified expression tree if the vector elements were shuffled in ...
 
static Instruction * foldSelectShuffleOfSelectShuffle(ShuffleVectorInst &Shuf)
A select shuffle of a select shuffle with a shared operand can be reduced to a single select shuffle.
 
static Instruction * hoistInsEltConst(InsertElementInst &InsElt2, InstCombiner::BuilderTy &Builder)
If we have an insertelement instruction feeding into another insertelement and the 2nd is inserting a...
 
static Instruction * foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)
Canonicalize FP negate/abs after shuffle.
 
static Instruction * foldCastShuffle(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)
Canonicalize casts after shuffle.
 
static Instruction * narrowInsElt(InsertElementInst &InsElt, InstCombiner::BuilderTy &Builder)
If both the base vector and the inserted element are extended from the same type, do the insert eleme...
 
static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf)
 
static Instruction * foldInsSequenceIntoSplat(InsertElementInst &InsElt)
Turn a chain of inserts that splats a value into an insert + shuffle: insertelt(insertelt(insertelt(i...
 
static Instruction * foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt)
Try to fold an extract+insert element into an existing identity shuffle by changing the shuffle's mas...
 
static ConstantInt * getPreferredVectorIndex(ConstantInt *IndexC)
Given a constant index for a extractelement or insertelement instruction, return it with the canonica...
 
static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef< int > Mask)
 
static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL)
Binops may be transformed into binops with different opcodes and operands.
 
This file provides the interface for the instcombine pass implementation.
 
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
 
uint64_t IntrinsicInst * II
 
const SmallVectorImpl< MachineOperand > & Cond
 
This file implements the SmallBitVector class.
 
This file defines the SmallVector class.
 
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
 
#define STATISTIC(VARNAME, DESC)
 
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
 
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
 
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
 
static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget)
If both arms of a vector select are concatenated vectors, split the select, and concatenate the resul...
 
static const uint32_t IV[8]
 
Class for arbitrary precision integers.
 
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
 
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
 
unsigned getActiveBits() const
Compute the number of active bits in the value.
 
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
 
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
 
bool ult(const APInt &RHS) const
Unsigned less than comparison.
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
 
const T & front() const
front - Get the first element.
 
size_t size() const
size - Get the array size.
 
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
 
LLVM Basic Block Representation.
 
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
 
const Function * getParent() const
Return the enclosing method, or null if none.
 
InstListType::iterator iterator
Instruction iterators...
 
BinaryOps getOpcode() const
 
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
 
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
 
This class represents a no-op cast from one type to another.
 
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
 
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
 
This class is the base class for the comparison instructions.
 
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
 
OtherOps getOpcode() const
Get the opcode casted to the right type.
 
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
 
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
 
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
 
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
 
This is the shared class of boolean and integer constants.
 
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
 
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
 
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
 
const APInt & getValue() const
Return the constant as an APInt value reference.
 
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
 
This is an important base class in LLVM.
 
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
 
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
 
A parsed version of the target data layout string in and methods for querying it.
 
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
 
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
 
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
 
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
 
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
 
Common base class shared among various IRBuilders.
 
This instruction inserts a single (scalar) element into a VectorType value.
 
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
 
VectorType * getType() const
Overload to return most specific vector type.
 
This instruction inserts a struct field of array element value into an aggregate value.
 
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
 
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
 
Instruction * foldSelectShuffle(ShuffleVectorInst &Shuf)
Try to fold shuffles that are the equivalent of a vector select.
 
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
 
Instruction * visitInsertValueInst(InsertValueInst &IV)
Try to find redundant insertvalue instructions, like the following ones: %0 = insertvalue { i8,...
 
Instruction * visitInsertElementInst(InsertElementInst &IE)
 
Instruction * visitExtractElementInst(ExtractElementInst &EI)
 
Instruction * simplifyBinOpSplats(ShuffleVectorInst &SVI)
 
Instruction * foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI)
Look for chain of insertvalue's that fully define an aggregate, and trace back the values inserted,...
 
Instruction * visitShuffleVectorInst(ShuffleVectorInst &SVI)
 
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
 
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
 
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
 
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
 
void addToWorklist(Instruction *I)
 
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
 
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
 
const SimplifyQuery & getSimplifyQuery() const
 
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
 
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
 
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
 
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
 
LLVM_ABI void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
 
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
 
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
 
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
 
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
 
A wrapper class for inspecting calls to intrinsic functions.
 
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
 
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
 
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
 
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
 
unsigned getNumIncomingValues() const
Return the number of incoming edges.
 
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
 
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
 
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
 
This class represents the LLVM 'select' instruction.
 
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
 
This instruction constructs a fixed permutation of two input vectors.
 
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
 
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
 
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
 
VectorType * getType() const
Overload to return most specific vector type.
 
bool increasesLength() const
Return true if this shuffle returns a vector with a greater number of elements than its source vector...
 
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
 
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
 
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
 
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
 
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
 
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
 
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
 
bool all() const
Returns true if all bits are set.
 
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
 
reference emplace_back(ArgTypes &&... Args)
 
void push_back(const T &Elt)
 
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
 
This class represents a truncation of integer types.
 
The instances of the Type class are immutable: once they are created, they are never changed.
 
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
 
bool isVectorTy() const
True if this is an instance of VectorType.
 
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
 
LLVM_ABI unsigned getStructNumElements() const
 
LLVM_ABI uint64_t getArrayNumElements() const
 
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
 
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
 
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
 
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
 
bool isIntegerTy() const
True if this is an instance of IntegerType.
 
TypeID getTypeID() const
Return the type id for the type.
 
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
 
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
 
UnaryOps getOpcode() const
 
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
 
A Use represents the edge between a Value definition and its users.
 
Value * getOperand(unsigned i) const
 
LLVM Value Representation.
 
Type * getType() const
All values are typed, get the type of this value.
 
LLVM_ABI const Value * DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) const
Translate PHI node to its predecessor from the given basic block.
 
bool hasOneUse() const
Return true if there is exactly one use of this value.
 
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
 
iterator_range< user_iterator > users()
 
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
 
iterator_range< use_iterator > uses()
 
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
 
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
 
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
 
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
 
Type * getElementType() const
 
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
 
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
 
const ParentTy * getParent() const
 
self_iterator getIterator()
 
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
 
@ C
The default llvm calling convention, compatible with C.
 
@ BasicBlock
Various leaf nodes.
 
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
 
BinaryOpc_match< LHS, RHS, false > m_BinOp(unsigned Opcode, const LHS &L, const RHS &R)
 
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
 
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
 
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
 
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
 
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
 
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
 
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
 
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
 
bool match(Val *V, const Pattern &P)
 
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
 
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
 
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
 
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
 
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
 
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
 
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
 
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
 
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
 
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
 
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
 
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
 
class_match< UnaryOperator > m_UnOp()
Match an arbitrary unary operation and ignore it.
 
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
 
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
 
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
 
auto m_Undef()
Match an arbitrary undef constant.
 
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
 
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
 
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
 
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
 
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
 
friend class Instruction
Iterator for Instructions in a `BasicBlock.
 
This is an optimization pass for GlobalISel generic memory operations.
 
FunctionAddr VTableAddr Value
 
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
 
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
 
LLVM_ABI llvm::SmallVector< int, 16 > createUnaryMask(ArrayRef< int > Mask, unsigned NumElts)
Given a shuffle mask for a binary shuffle, create the equivalent shuffle mask assuming both operands ...
 
LLVM_ABI Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
 
auto dyn_cast_or_null(const Y &Val)
 
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
 
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
 
LLVM_ABI Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
 
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
 
constexpr int PoisonMaskElem
 
LLVM_ABI Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
 
DWARFExpression::Operation Op
 
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
 
LLVM_ABI Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
 
constexpr unsigned BitWidth
 
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
 
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
 
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
 
auto predecessors(const MachineBasicBlock *BB)
 
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
 
bool pred_empty(const BasicBlock *BB)
 
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
 
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
 
LLVM_ABI Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
 
LLVM_ABI bool scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Attempt to narrow/widen the Mask shuffle mask to the NumDstElts target width.
 
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
 
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
 
These are the ingredients in an alternate form binary operator as described below.
 
BinopElts(BinaryOperator::BinaryOps Opc=(BinaryOperator::BinaryOps) 0, Value *V0=nullptr, Value *V1=nullptr)
 
BinaryOperator::BinaryOps Opcode
 
A MapVector that performs no allocations if smaller than a certain size.