45#define DEBUG_TYPE "gi-combiner" 
   54                       cl::desc(
"Force all indexed operations to be " 
   55                                "legal for the GlobalISel combiner"));
 
   64      RBI(
Builder.getMF().getSubtarget().getRegBankInfo()),
 
   65      TRI(
Builder.getMF().getSubtarget().getRegisterInfo()) {
 
 
   70  return *
Builder.getMF().getSubtarget().getTargetLowering();
 
 
   88  assert(
I < ByteWidth && 
"I must be in [0, ByteWidth)");
 
 
  107  assert(
I < ByteWidth && 
"I must be in [0, ByteWidth)");
 
  108  return ByteWidth - 
I - 1;
 
 
  128static std::optional<bool>
 
  132  unsigned Width = MemOffset2Idx.
size();
 
  135  bool BigEndian = 
true, LittleEndian = 
true;
 
  136  for (
unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
 
  137    auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
 
  138    if (MemOffsetAndIdx == MemOffset2Idx.
end())
 
  140    const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
 
  141    assert(Idx >= 0 && 
"Expected non-negative byte offset?");
 
  144    if (!BigEndian && !LittleEndian)
 
  148  assert((BigEndian != LittleEndian) &&
 
  149         "Pattern cannot be both big and little endian!");
 
 
  156  assert(
LI && 
"Must have LegalizerInfo to query isLegal!");
 
 
  177  return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
 
  178         isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
 
 
  185  if (
MRI.constrainRegAttrs(ToReg, FromReg))
 
  186    MRI.replaceRegWith(FromReg, ToReg);
 
  188    Builder.buildCopy(FromReg, ToReg);
 
  190  Observer.finishedChangingAllUsesOfReg();
 
 
  205                                       unsigned ToOpcode)
 const {
 
 
  220    MRI.setRegBank(Reg, *RegBank);
 
 
  231  if (
MI.getOpcode() != TargetOpcode::COPY)
 
 
  241  MI.eraseFromParent();
 
 
  250  if (!
MRI.hasOneNonDBGUse(OrigOp))
 
  269  std::optional<MachineOperand> MaybePoisonOperand;
 
  271    if (!Operand.isReg())
 
  277    if (!MaybePoisonOperand)
 
  278      MaybePoisonOperand = Operand;
 
  287  if (!MaybePoisonOperand) {
 
  292      B.buildCopy(
DstOp, OrigOp);
 
  297  Register MaybePoisonOperandReg = MaybePoisonOperand->getReg();
 
  298  LLT MaybePoisonOperandRegTy = 
MRI.getType(MaybePoisonOperandReg);
 
  305    auto Freeze = 
B.buildFreeze(MaybePoisonOperandRegTy, MaybePoisonOperandReg);
 
 
  316  assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
 
  317         "Invalid instruction");
 
  327    assert(Def && 
"Operand not defined");
 
  328    if (!
MRI.hasOneNonDBGUse(Reg))
 
  330    switch (Def->getOpcode()) {
 
  331    case TargetOpcode::G_BUILD_VECTOR:
 
  336        Ops.push_back(BuildVecMO.getReg());
 
  338    case TargetOpcode::G_IMPLICIT_DEF: {
 
  339      LLT OpType = 
MRI.getType(Reg);
 
  343        Undef = 
Builder.buildUndef(OpType.getScalarType());
 
  345      assert(
MRI.getType(Undef->getOperand(0).getReg()) ==
 
  346                 OpType.getScalarType() &&
 
  347             "All undefs should have the same type");
 
  350      for (
unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
 
  351           EltIdx != EltEnd; ++EltIdx)
 
  352        Ops.push_back(Undef->getOperand(0).getReg());
 
  361  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
  363          {TargetOpcode::G_BUILD_VECTOR, {DstTy, 
MRI.getType(
Ops[0])}})) {
 
 
  378  Register NewDstReg = 
MRI.cloneVirtualRegister(DstReg);
 
  391  MI.eraseFromParent();
 
 
  397  Register SrcVec1 = Shuffle.getSrc1Reg();
 
  398  Register SrcVec2 = Shuffle.getSrc2Reg();
 
  399  LLT EltTy = 
MRI.getType(SrcVec1).getElementType();
 
  400  int Width = 
MRI.getType(SrcVec1).getNumElements();
 
  402  auto Unmerge1 = 
Builder.buildUnmerge(EltTy, SrcVec1);
 
  403  auto Unmerge2 = 
Builder.buildUnmerge(EltTy, SrcVec2);
 
  407  for (
int Val : Shuffle.getMask()) {
 
  410    else if (Val < Width)
 
  411      Extracts.
push_back(Unmerge1.getReg(Val));
 
  413      Extracts.
push_back(Unmerge2.getReg(Val - Width));
 
  415  assert(Extracts.
size() > 0 && 
"Expected at least one element in the shuffle");
 
  416  if (Extracts.
size() == 1)
 
  417    Builder.buildCopy(
MI.getOperand(0).getReg(), Extracts[0]);
 
  419    Builder.buildBuildVector(
MI.getOperand(0).getReg(), Extracts);
 
  420  MI.eraseFromParent();
 
 
  430  if (!ConcatMI1 || !ConcatMI2)
 
  434  if (
MRI.getType(ConcatMI1->getSourceReg(0)) !=
 
  435      MRI.getType(ConcatMI2->getSourceReg(0)))
 
  438  LLT ConcatSrcTy = 
MRI.getType(ConcatMI1->getReg(1));
 
  439  LLT ShuffleSrcTy1 = 
MRI.getType(
MI.getOperand(1).getReg());
 
  441  for (
unsigned i = 0; i < Mask.size(); i += ConcatSrcNumElt) {
 
  445      for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
 
  446        if (i + j >= Mask.size())
 
  448        if (Mask[i + j] != -1)
 
  452              {TargetOpcode::G_IMPLICIT_DEF, {ConcatSrcTy}}))
 
  455    } 
else if (Mask[i] % ConcatSrcNumElt == 0) {
 
  456      for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
 
  457        if (i + j >= Mask.size())
 
  459        if (Mask[i + j] != Mask[i] + 
static_cast<int>(j))
 
  465        Ops.push_back(ConcatMI1->getSourceReg(Mask[i] / ConcatSrcNumElt));
 
  467        Ops.push_back(ConcatMI2->getSourceReg(Mask[i] / ConcatSrcNumElt -
 
  468                                              ConcatMI1->getNumSources()));
 
  476          {TargetOpcode::G_CONCAT_VECTORS,
 
  477           {
MRI.getType(
MI.getOperand(0).getReg()), ConcatSrcTy}}))
 
 
  488      SrcTy = 
MRI.getType(Reg);
 
  490  assert(SrcTy.isValid() && 
"Unexpected full undef vector in concat combine");
 
  497        UndefReg = 
Builder.buildUndef(SrcTy).getReg(0);
 
  503    Builder.buildConcatVectors(
MI.getOperand(0).getReg(), 
Ops);
 
  506  MI.eraseFromParent();
 
 
  520  assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
 
  521         "Invalid instruction kind");
 
  522  LLT DstType = 
MRI.getType(
MI.getOperand(0).getReg());
 
  524  LLT SrcType = 
MRI.getType(Src1);
 
  526  unsigned DstNumElts = DstType.getNumElements();
 
  527  unsigned SrcNumElts = SrcType.getNumElements();
 
  544  if (DstNumElts < 2 * SrcNumElts)
 
  549  if (DstNumElts % SrcNumElts != 0)
 
  555  unsigned NumConcat = DstNumElts / SrcNumElts;
 
  558  for (
unsigned i = 0; i != DstNumElts; ++i) {
 
  565    if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
 
  566        (ConcatSrcs[i / SrcNumElts] >= 0 &&
 
  567         ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts)))
 
  570    ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
 
  577  for (
auto Src : ConcatSrcs) {
 
  581        UndefReg = 
Builder.buildUndef(SrcType).getReg(0);
 
  583      Ops.push_back(UndefReg);
 
 
  596  Register NewDstReg = 
MRI.cloneVirtualRegister(DstReg);
 
  604  MI.eraseFromParent();
 
 
  613                                  const LLT TyForCandidate,
 
  614                                  unsigned OpcodeForCandidate,
 
  619      return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
 
  630  if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
 
  633  else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
 
  634           OpcodeForCandidate != TargetOpcode::G_ANYEXT)
 
  635    return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
 
  643        OpcodeForCandidate == TargetOpcode::G_ZEXT)
 
  645    else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
 
  646             OpcodeForCandidate == TargetOpcode::G_SEXT)
 
  647      return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
 
  656    return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
 
  667static void InsertInsnsWithoutSideEffectsBeforeUse(
 
  679    InsertBB = PredBB->
getMBB();
 
  684  if (InsertBB == 
DefMI.getParent()) {
 
  686    Inserter(InsertBB, std::next(InsertPt), UseMO);
 
  705  unsigned CandidateLoadOpc;
 
  707  case TargetOpcode::G_ANYEXT:
 
  708    CandidateLoadOpc = TargetOpcode::G_LOAD;
 
  710  case TargetOpcode::G_SEXT:
 
  711    CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
 
  713  case TargetOpcode::G_ZEXT:
 
  714    CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
 
  719  return CandidateLoadOpc;
 
 
  736  LLT LoadValueTy = 
MRI.getType(LoadReg);
 
  758  unsigned PreferredOpcode =
 
  760          ? TargetOpcode::G_ANYEXT
 
  762  Preferred = {
LLT(), PreferredOpcode, 
nullptr};
 
  763  for (
auto &
UseMI : 
MRI.use_nodbg_instructions(LoadReg)) {
 
  764    if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
 
  765        UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
 
  766        (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
 
  767      const auto &MMO = LoadMI->
getMMO();
 
  775        LLT UseTy = 
MRI.getType(
UseMI.getOperand(0).getReg());
 
  777        if (
LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
 
  781      Preferred = ChoosePreferredUse(
MI, Preferred,
 
  782                                     MRI.getType(
UseMI.getOperand(0).getReg()),
 
  792  assert(Preferred.Ty != LoadValueTy && 
"Extending to same type?");
 
 
  810    if (PreviouslyEmitted) {
 
  817    Builder.setInsertPt(*InsertIntoBB, InsertBefore);
 
  818    Register NewDstReg = 
MRI.cloneVirtualRegister(
MI.getOperand(0).getReg());
 
  820    EmittedInsns[InsertIntoBB] = NewMI;
 
  826  MI.setDesc(
Builder.getTII().get(LoadOpc));
 
  833  for (
auto *UseMO : 
Uses) {
 
  839        UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
 
  842      const LLT UseDstTy = 
MRI.getType(UseDstReg);
 
  843      if (UseDstReg != ChosenDstReg) {
 
  844        if (Preferred.
Ty == UseDstTy) {
 
  881          InsertInsnsWithoutSideEffectsBeforeUse(
Builder, 
MI, *UseMO,
 
  896    InsertInsnsWithoutSideEffectsBeforeUse(
Builder, 
MI, *UseMO, InsertTruncAt);
 
  899  MI.getOperand(0).setReg(ChosenDstReg);
 
 
  905  assert(
MI.getOpcode() == TargetOpcode::G_AND);
 
  916  if (
MRI.getType(Dst).isVector())
 
  924  APInt MaskVal = MaybeMask->Value;
 
  933  if (!LoadMI || !
MRI.hasOneNonDBGUse(LoadMI->
getDstReg()))
 
  937  LLT RegTy = 
MRI.getType(LoadReg);
 
  945  if (MaskSizeBits > LoadSizeBits.
getValue())
 
  965  else if (LoadSizeBits.
getValue() > MaskSizeBits ||
 
  971          {TargetOpcode::G_ZEXTLOAD, {RegTy, 
MRI.getType(PtrReg)}, {MemDesc}}))
 
  975    B.setInstrAndDebugLoc(*LoadMI);
 
  976    auto &MF = 
B.getMF();
 
  978    auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.
MemoryTy);
 
  979    B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
 
 
  988         "shouldn't consider debug uses");
 
  996  if (DefOrUse == 
MBB.end())
 
  998  return &*DefOrUse == &
DefMI;
 
 
 1004         "shouldn't consider debug uses");
 
 1007  else if (
DefMI.getParent() != 
UseMI.getParent())
 
 
 1014  assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
 
 1018  if (
MRI.getType(SrcReg).isVector())
 
 1023    LoadUser = TruncSrc;
 
 1025  uint64_t SizeInBits = 
MI.getOperand(2).getImm();
 
 1030    auto LoadSizeBits = LoadMI->getMemSizeInBits();
 
 1032        MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits.getValue())
 
 1034    if (LoadSizeBits == SizeInBits)
 
 
 1041  assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
 
 1042  Builder.buildCopy(
MI.getOperand(0).getReg(), 
MI.getOperand(1).getReg());
 
 1043  MI.eraseFromParent();
 
 
 1047    MachineInstr &
MI, std::tuple<Register, unsigned> &MatchInfo)
 const {
 
 1048  assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
 
 1051  LLT RegTy = 
MRI.getType(DstReg);
 
 1059  if (!LoadDef || !
MRI.hasOneNonDBGUse(SrcReg))
 
 1062  uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
 
 1067  unsigned NewSizeBits = std::min((
uint64_t)
MI.getOperand(2).getImm(), MemBits);
 
 1070  if (NewSizeBits < 8)
 
 1082  if (LoadDef->isSimple())
 
 1084  else if (MemBits > NewSizeBits || MemBits == RegTy.
getSizeInBits())
 
 1089                                 {
MRI.getType(LoadDef->getDstReg()),
 
 1090                                  MRI.getType(LoadDef->getPointerReg())},
 
 1094  MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
 
 
 1099    MachineInstr &
MI, std::tuple<Register, unsigned> &MatchInfo)
 const {
 
 1100  assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
 
 1102  unsigned ScalarSizeBits;
 
 1103  std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
 
 1112  auto &MMO = LoadDef->
getMMO();
 
 1113  Builder.setInstrAndDebugLoc(*LoadDef);
 
 1115  auto PtrInfo = MMO.getPointerInfo();
 
 1116  auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
 
 1117  Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, 
MI.getOperand(0).getReg(),
 
 1119  MI.eraseFromParent();
 
 
 1130  auto *MF = 
MI->getMF();
 
 1137    AM.
BaseOffs = CstOff->getSExtValue(); 
 
 1142      MF->getDataLayout(), AM,
 
 1144                    MF->getFunction().getContext()),
 
 1145      MI->getMMO().getAddrSpace());
 
 
 1150  case TargetOpcode::G_LOAD:
 
 1151    return TargetOpcode::G_INDEXED_LOAD;
 
 1152  case TargetOpcode::G_STORE:
 
 1153    return TargetOpcode::G_INDEXED_STORE;
 
 1154  case TargetOpcode::G_ZEXTLOAD:
 
 1155    return TargetOpcode::G_INDEXED_ZEXTLOAD;
 
 1156  case TargetOpcode::G_SEXTLOAD:
 
 1157    return TargetOpcode::G_INDEXED_SEXTLOAD;
 
 
 1163bool CombinerHelper::isIndexedLoadStoreLegal(
GLoadStore &LdSt)
 const {
 
 1173  if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
 
 1174    OpTys = {PtrTy, Ty, Ty};
 
 1176    OpTys = {Ty, PtrTy}; 
 
 1178  LegalityQuery Q(IndexedOpc, OpTys, MemDescrs);
 
 1184    cl::desc(
"Number of uses of a base pointer to check before it is no longer " 
 1185             "considered for post-indexing."));
 
 1189                                            bool &RematOffset)
 const {
 
 1199  if (
MRI.hasOneNonDBGUse(
Ptr))
 
 1202  if (!isIndexedLoadStoreLegal(LdSt))
 
 1209  auto *PtrDef = 
MRI.getVRegDef(
Ptr);
 
 1211  unsigned NumUsesChecked = 0;
 
 1212  for (
auto &
Use : 
MRI.use_nodbg_instructions(
Ptr)) {
 
 1219    if (!PtrAdd || 
MRI.use_nodbg_empty(PtrAdd->getReg(0)))
 
 1224    if (StoredValDef == &
Use)
 
 1227    Offset = PtrAdd->getOffsetReg();
 
 1229        !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(), 
Offset,
 
 1235    RematOffset = 
false;
 
 1239      if (OffsetDef->
getOpcode() != TargetOpcode::G_CONSTANT)
 
 1244    for (
auto &BasePtrUse : 
MRI.use_nodbg_instructions(PtrAdd->getBaseReg())) {
 
 1245      if (&BasePtrUse == PtrDef)
 
 1251      if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
 
 1253          isIndexedLoadStoreLegal(*BasePtrLdSt))
 
 1259        Register PtrAddDefReg = BasePtrUseDef->getReg(0);
 
 1260        for (
auto &BaseUseUse : 
MRI.use_nodbg_instructions(PtrAddDefReg)) {
 
 1263          if (BaseUseUse.getParent() != LdSt.
getParent())
 
 1275    Addr = PtrAdd->getReg(0);
 
 1276    Base = PtrAdd->getBaseReg();
 
 1291      MRI.hasOneNonDBGUse(Addr))
 
 1298  if (!isIndexedLoadStoreLegal(LdSt))
 
 1302  if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
 
 1307    if (
Base == St->getValueReg())
 
 1312    if (St->getValueReg() == Addr)
 
 1317  for (
auto &AddrUse : 
MRI.use_nodbg_instructions(Addr))
 
 1318    if (AddrUse.getParent() != LdSt.
getParent())
 
 1323  bool RealUse = 
false;
 
 1324  for (
auto &AddrUse : 
MRI.use_nodbg_instructions(Addr)) {
 
 1342  assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
 
 1352  assert(
MRI.getType(
MI.getOperand(0).getReg()) == VecEltTy);
 
 1359  if (!LoadMI->isSimple())
 
 1371  const unsigned MaxIter = 20;
 
 1374    if (
II->isLoadFoldBarrier())
 
 1376    if (Iter++ == MaxIter)
 
 1392    int Elt = CVal->getZExtValue();
 
 1405  Register VecPtr = LoadMI->getPointerReg();
 
 1406  LLT PtrTy = 
MRI.getType(VecPtr);
 
 1414          {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}}))
 
 1437    B.buildLoad(Result, finalPtr, PtrInfo, Alignment);
 
 
 1452  MatchInfo.
IsPre = findPreIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
 
 1454  if (!MatchInfo.
IsPre &&
 
 1455      !findPostIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
 
 
 1465  unsigned Opcode = 
MI.getOpcode();
 
 1466  bool IsStore = Opcode == TargetOpcode::G_STORE;
 
 1472    auto *OldCst = 
MRI.getVRegDef(MatchInfo.
Offset);
 
 1474                                        *OldCst->getOperand(1).getCImm());
 
 1475    MatchInfo.
Offset = NewCst.getReg(0);
 
 1478  auto MIB = 
Builder.buildInstr(NewOpcode);
 
 1480    MIB.addDef(MatchInfo.
Addr);
 
 1481    MIB.addUse(
MI.getOperand(0).getReg());
 
 1483    MIB.addDef(
MI.getOperand(0).getReg());
 
 1484    MIB.addDef(MatchInfo.
Addr);
 
 1487  MIB.addUse(MatchInfo.
Base);
 
 1488  MIB.addUse(MatchInfo.
Offset);
 
 1489  MIB.addImm(MatchInfo.
IsPre);
 
 1490  MIB->cloneMemRefs(*
MI.getMF(), 
MI);
 
 1491  MI.eraseFromParent();
 
 
 1499  unsigned Opcode = 
MI.getOpcode();
 
 1500  bool IsDiv, IsSigned;
 
 1505  case TargetOpcode::G_SDIV:
 
 1506  case TargetOpcode::G_UDIV: {
 
 1508    IsSigned = Opcode == TargetOpcode::G_SDIV;
 
 1511  case TargetOpcode::G_SREM:
 
 1512  case TargetOpcode::G_UREM: {
 
 1514    IsSigned = Opcode == TargetOpcode::G_SREM;
 
 1520  unsigned DivOpcode, RemOpcode, DivremOpcode;
 
 1522    DivOpcode = TargetOpcode::G_SDIV;
 
 1523    RemOpcode = TargetOpcode::G_SREM;
 
 1524    DivremOpcode = TargetOpcode::G_SDIVREM;
 
 1526    DivOpcode = TargetOpcode::G_UDIV;
 
 1527    RemOpcode = TargetOpcode::G_UREM;
 
 1528    DivremOpcode = TargetOpcode::G_UDIVREM;
 
 1546  for (
auto &
UseMI : 
MRI.use_nodbg_instructions(Src1)) {
 
 1547    if (
MI.getParent() == 
UseMI.getParent() &&
 
 1548        ((IsDiv && 
UseMI.getOpcode() == RemOpcode) ||
 
 1549         (!IsDiv && 
UseMI.getOpcode() == DivOpcode)) &&
 
 
 1562  unsigned Opcode = 
MI.getOpcode();
 
 1563  assert(OtherMI && 
"OtherMI shouldn't be empty.");
 
 1566  if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
 
 1567    DestDivReg = 
MI.getOperand(0).getReg();
 
 1571    DestRemReg = 
MI.getOperand(0).getReg();
 
 1575      Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
 
 1582  Builder.setInstrAndDebugLoc(*FirstInst);
 
 1584  Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
 
 1585                              : TargetOpcode::G_UDIVREM,
 
 1586                     {DestDivReg, DestRemReg},
 
 1588  MI.eraseFromParent();
 
 
 1594  assert(
MI.getOpcode() == TargetOpcode::G_BR);
 
 1611  if (BrIt == 
MBB->begin())
 
 1613  assert(std::next(BrIt) == 
MBB->end() && 
"expected G_BR to be a terminator");
 
 1615  BrCond = &*std::prev(BrIt);
 
 1616  if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
 
 1622  return BrCondTarget != 
MI.getOperand(0).getMBB() &&
 
 1623         MBB->isLayoutSuccessor(BrCondTarget);
 
 
 1629  Builder.setInstrAndDebugLoc(*BrCond);
 
 1634  auto True = 
Builder.buildConstant(
 
 1640  MI.getOperand(0).setMBB(FallthroughBB);
 
 
 1655  return Helper.lowerMemcpyInline(
MI) ==
 
 
 1660                                            unsigned MaxLen)
 const {
 
 
 1672  switch (
MI.getOpcode()) {
 
 1675  case TargetOpcode::G_FNEG: {
 
 1676    Result.changeSign();
 
 1679  case TargetOpcode::G_FABS: {
 
 1683  case TargetOpcode::G_FPEXT:
 
 1684  case TargetOpcode::G_FPTRUNC: {
 
 1686    LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 1691  case TargetOpcode::G_FSQRT: {
 
 1695    Result = 
APFloat(sqrt(Result.convertToDouble()));
 
 1698  case TargetOpcode::G_FLOG2: {
 
 
 1718  Builder.buildFConstant(
MI.getOperand(0), *NewCst);
 
 1719  MI.eraseFromParent();
 
 
 1730  if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
 
 1740  if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
 
 1753  Type *AccessTy = 
nullptr;
 
 1754  auto &MF = *
MI.getMF();
 
 1755  for (
auto &
UseMI : 
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg())) {
 
 1758                               MF.getFunction().getContext());
 
 1763  APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
 
 1768    AMOld.
BaseOffs = MaybeImmVal->Value.getSExtValue();
 
 1770    unsigned AS = 
MRI.getType(Add2).getAddressSpace();
 
 1771    const auto &TLI = *MF.getSubtarget().getTargetLowering();
 
 1772    if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
 
 1773        !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
 
 1782  unsigned PtrAddFlags = 
MI.getFlags();
 
 1783  unsigned LHSPtrAddFlags = Add2Def->
getFlags();
 
 1799  MatchInfo.
Flags = Flags;
 
 
 1805  assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD && 
"Expected G_PTR_ADD");
 
 1807  LLT OffsetTy = 
MRI.getType(
MI.getOperand(2).getReg());
 
 1811  MI.getOperand(1).setReg(MatchInfo.
Base);
 
 1812  MI.getOperand(2).setReg(NewOffset.getReg(0));
 
 
 1826  unsigned Opcode = 
MI.getOpcode();
 
 1827  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
 
 1828          Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
 
 1829          Opcode == TargetOpcode::G_USHLSAT) &&
 
 1830         "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
 
 1850      (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
 
 1855  if (Opcode == TargetOpcode::G_USHLSAT &&
 
 1856      MatchInfo.
Imm >= 
MRI.getType(Shl2).getScalarSizeInBits())
 
 
 1864  unsigned Opcode = 
MI.getOpcode();
 
 1865  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
 
 1866          Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
 
 1867          Opcode == TargetOpcode::G_USHLSAT) &&
 
 1868         "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
 
 1870  LLT Ty = 
MRI.getType(
MI.getOperand(1).getReg());
 
 1871  unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
 
 1872  auto Imm = MatchInfo.
Imm;
 
 1874  if (Imm >= ScalarSizeInBits) {
 
 1876    if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
 
 1877      Builder.buildConstant(
MI.getOperand(0), 0);
 
 1878      MI.eraseFromParent();
 
 1883    Imm = ScalarSizeInBits - 1;
 
 1886  LLT ImmTy = 
MRI.getType(
MI.getOperand(2).getReg());
 
 1889  MI.getOperand(1).setReg(MatchInfo.
Reg);
 
 1890  MI.getOperand(2).setReg(NewImm);
 
 
 1906  unsigned ShiftOpcode = 
MI.getOpcode();
 
 1907  assert((ShiftOpcode == TargetOpcode::G_SHL ||
 
 1908          ShiftOpcode == TargetOpcode::G_ASHR ||
 
 1909          ShiftOpcode == TargetOpcode::G_LSHR ||
 
 1910          ShiftOpcode == TargetOpcode::G_USHLSAT ||
 
 1911          ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
 
 1912         "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
 
 1915  Register LogicDest = 
MI.getOperand(1).getReg();
 
 1916  if (!
MRI.hasOneNonDBGUse(LogicDest))
 
 1920  unsigned LogicOpcode = LogicMI->
getOpcode();
 
 1921  if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
 
 1922      LogicOpcode != TargetOpcode::G_XOR)
 
 1926  const Register C1 = 
MI.getOperand(2).getReg();
 
 1928  if (!MaybeImmVal || MaybeImmVal->Value == 0)
 
 1931  const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
 
 1935    if (
MI->getOpcode() != ShiftOpcode ||
 
 1936        !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
 
 1945    ShiftVal = MaybeImmVal->Value.getSExtValue();
 
 1956  if (matchFirstShift(LogicMIOp1, C0Val)) {
 
 1958    MatchInfo.
Shift2 = LogicMIOp1;
 
 1959  } 
else if (matchFirstShift(LogicMIOp2, C0Val)) {
 
 1961    MatchInfo.
Shift2 = LogicMIOp2;
 
 1965  MatchInfo.
ValSum = C0Val + C1Val;
 
 1968  if (MatchInfo.
ValSum >= 
MRI.getType(LogicDest).getScalarSizeInBits())
 
 1971  MatchInfo.
Logic = LogicMI;
 
 
 1977  unsigned Opcode = 
MI.getOpcode();
 
 1978  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
 
 1979          Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
 
 1980          Opcode == TargetOpcode::G_SSHLSAT) &&
 
 1981         "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
 
 1983  LLT ShlType = 
MRI.getType(
MI.getOperand(2).getReg());
 
 1984  LLT DestType = 
MRI.getType(
MI.getOperand(0).getReg());
 
 1990      Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).
getReg(0);
 
 1999  Register Shift2Const = 
MI.getOperand(2).getReg();
 
 2001                        .buildInstr(Opcode, {DestType},
 
 2011  MI.eraseFromParent();
 
 
 2016  assert(
MI.getOpcode() == TargetOpcode::G_SHL && 
"Expected G_SHL");
 
 2038  auto *SrcDef = 
MRI.getVRegDef(SrcReg);
 
 2039  assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
 
 2040          SrcDef->getOpcode() == TargetOpcode::G_OR) && 
"Unexpected op");
 
 2041  LLT SrcTy = 
MRI.getType(SrcReg);
 
 2043    auto S1 = 
B.buildShl(SrcTy, 
X, ShiftReg);
 
 2044    auto S2 = 
B.buildShl(SrcTy, C1, ShiftReg);
 
 2045    B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2});
 
 
 2053  assert(
MI.getOpcode() == TargetOpcode::G_LSHR && 
"Expected a G_LSHR");
 
 2057  unsigned OpSizeInBits = 
MRI.getType(N0).getScalarSizeInBits();
 
 2072  LLT InnerShiftTy = 
MRI.getType(InnerShift);
 
 2074  if ((N1C + N001C).ult(InnerShiftSize)) {
 
 2080    if ((N001C + OpSizeInBits) == InnerShiftSize)
 
 2082    if (
MRI.hasOneUse(N0) && 
MRI.hasOneUse(InnerShift)) {
 
 2083      MatchInfo.
Mask = 
true;
 
 
 2093  assert(
MI.getOpcode() == TargetOpcode::G_LSHR && 
"Expected a G_LSHR");
 
 2100  if (MatchInfo.
Mask == 
true) {
 
 2108    Builder.buildTrunc(Dst, Shift);
 
 2109  MI.eraseFromParent();
 
 
 2113                                          unsigned &ShiftVal)
 const {
 
 2114  assert(
MI.getOpcode() == TargetOpcode::G_MUL && 
"Expected a G_MUL");
 
 2120  ShiftVal = MaybeImmVal->Value.exactLogBase2();
 
 2121  return (
static_cast<int32_t
>(ShiftVal) != -1);
 
 
 2125                                          unsigned &ShiftVal)
 const {
 
 2126  assert(
MI.getOpcode() == TargetOpcode::G_MUL && 
"Expected a G_MUL");
 
 2128  LLT ShiftTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 2131  MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
 
 2132  MI.getOperand(2).setReg(ShiftCst.getReg(0));
 
 
 2153    auto NegCst = 
B.buildConstant(Ty, -Imm);
 
 2155    MI.setDesc(
B.getTII().get(TargetOpcode::G_ADD));
 
 2156    MI.getOperand(2).setReg(NegCst.getReg(0));
 
 2158    if (Imm.isMinSignedValue())
 
 
 2168  assert(
MI.getOpcode() == TargetOpcode::G_SHL && 
VT);
 
 2183  if (!MaybeShiftAmtVal)
 
 2187    LLT SrcTy = 
MRI.getType(ExtSrc);
 
 2197  int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
 
 2198  MatchData.
Reg = ExtSrc;
 
 2199  MatchData.
Imm = ShiftAmt;
 
 2201  unsigned MinLeadingZeros = 
VT->getKnownZeroes(ExtSrc).countl_one();
 
 2202  unsigned SrcTySize = 
MRI.getType(ExtSrc).getScalarSizeInBits();
 
 2203  return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
 
 
 2209  int64_t ShiftAmtVal = MatchData.
Imm;
 
 2211  LLT ExtSrcTy = 
MRI.getType(ExtSrcReg);
 
 2212  auto ShiftAmt = 
Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
 
 2214      Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, 
MI.getFlags());
 
 2215  Builder.buildZExt(
MI.getOperand(0), NarrowShift);
 
 2216  MI.eraseFromParent();
 
 
 2223  for (
unsigned I = 0; 
I < 
Merge.getNumSources(); ++
I)
 
 2227  if (!Unmerge || Unmerge->getNumDefs() != 
Merge.getNumSources())
 
 2230  for (
unsigned I = 0; 
I < MergedValues.
size(); ++
I)
 
 2231    if (MergedValues[
I] != Unmerge->getReg(
I))
 
 2234  MatchInfo = Unmerge->getSourceReg();
 
 
 2248  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2249         "Expected an unmerge");
 
 2258  LLT SrcMergeTy = 
MRI.getType(SrcInstr->getSourceReg(0));
 
 2259  LLT Dst0Ty = 
MRI.getType(Unmerge.getReg(0));
 
 2261  if (SrcMergeTy != Dst0Ty && !SameSize)
 
 2265  for (
unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
 
 2266    Operands.
push_back(SrcInstr->getSourceReg(Idx));
 
 
 2272  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2273         "Expected an unmerge");
 
 2275         "Not enough operands to replace all defs");
 
 2276  unsigned NumElems = 
MI.getNumOperands() - 1;
 
 2278  LLT SrcTy = 
MRI.getType(Operands[0]);
 
 2279  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 2280  bool CanReuseInputDirectly = DstTy == SrcTy;
 
 2281  for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
 
 2282    Register DstReg = 
MI.getOperand(Idx).getReg();
 
 2287    const auto &DstCB = 
MRI.getRegClassOrRegBank(DstReg);
 
 2288    if (!DstCB.isNull() && DstCB != 
MRI.getRegClassOrRegBank(SrcReg)) {
 
 2289      SrcReg = 
Builder.buildCopy(
MRI.getType(SrcReg), SrcReg).getReg(0);
 
 2290      MRI.setRegClassOrRegBank(SrcReg, DstCB);
 
 2293    if (CanReuseInputDirectly)
 
 2296      Builder.buildCast(DstReg, SrcReg);
 
 2298  MI.eraseFromParent();
 
 
 2303  unsigned SrcIdx = 
MI.getNumOperands() - 1;
 
 2304  Register SrcReg = 
MI.getOperand(SrcIdx).getReg();
 
 2306  if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
 
 2307      SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
 
 2315  LLT Dst0Ty = 
MRI.getType(
MI.getOperand(0).getReg());
 
 2318  for (
unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
 
 2320    Val = Val.
lshr(ShiftAmt);
 
 
 2328  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2329         "Expected an unmerge");
 
 2331         "Not enough operands to replace all defs");
 
 2332  unsigned NumElems = 
MI.getNumOperands() - 1;
 
 2333  for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
 
 2334    Register DstReg = 
MI.getOperand(Idx).getReg();
 
 2335    Builder.buildConstant(DstReg, Csts[Idx]);
 
 2338  MI.eraseFromParent();
 
 
 2344  unsigned SrcIdx = 
MI.getNumOperands() - 1;
 
 2345  Register SrcReg = 
MI.getOperand(SrcIdx).getReg();
 
 2347    unsigned NumElems = 
MI.getNumOperands() - 1;
 
 2348    for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
 
 2349      Register DstReg = 
MI.getOperand(Idx).getReg();
 
 2350      B.buildUndef(DstReg);
 
 
 2358  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2359         "Expected an unmerge");
 
 2360  if (
MRI.getType(
MI.getOperand(0).getReg()).isVector() ||
 
 2361      MRI.getType(
MI.getOperand(
MI.getNumDefs()).getReg()).isVector())
 
 2364  for (
unsigned Idx = 1, EndIdx = 
MI.getNumDefs(); Idx != EndIdx; ++Idx) {
 
 2365    if (!
MRI.use_nodbg_empty(
MI.getOperand(Idx).getReg()))
 
 
 2373  Register SrcReg = 
MI.getOperand(
MI.getNumDefs()).getReg();
 
 2374  Register Dst0Reg = 
MI.getOperand(0).getReg();
 
 2375  Builder.buildTrunc(Dst0Reg, SrcReg);
 
 2376  MI.eraseFromParent();
 
 
 2380  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2381         "Expected an unmerge");
 
 2382  Register Dst0Reg = 
MI.getOperand(0).getReg();
 
 2383  LLT Dst0Ty = 
MRI.getType(Dst0Reg);
 
 2389  Register SrcReg = 
MI.getOperand(
MI.getNumDefs()).getReg();
 
 2390  LLT SrcTy = 
MRI.getType(SrcReg);
 
 2391  if (SrcTy.isVector())
 
 2401  LLT ZExtSrcTy = 
MRI.getType(ZExtSrcReg);
 
 
 2406  assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
 
 2407         "Expected an unmerge");
 
 2409  Register Dst0Reg = 
MI.getOperand(0).getReg();
 
 2412      MRI.getVRegDef(
MI.getOperand(
MI.getNumDefs()).getReg());
 
 2414         "Expecting a G_ZEXT");
 
 2417  LLT Dst0Ty = 
MRI.getType(Dst0Reg);
 
 2418  LLT ZExtSrcTy = 
MRI.getType(ZExtSrcReg);
 
 2421    Builder.buildZExt(Dst0Reg, ZExtSrcReg);
 
 2424           "ZExt src doesn't fit in destination");
 
 2429  for (
unsigned Idx = 1, EndIdx = 
MI.getNumDefs(); Idx != EndIdx; ++Idx) {
 
 2431      ZeroReg = 
Builder.buildConstant(Dst0Ty, 0).getReg(0);
 
 2434  MI.eraseFromParent();
 
 
 2438                                                unsigned TargetShiftSize,
 
 2439                                                unsigned &ShiftVal)
 const {
 
 2440  assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
 
 2441          MI.getOpcode() == TargetOpcode::G_LSHR ||
 
 2442          MI.getOpcode() == TargetOpcode::G_ASHR) && 
"Expected a shift");
 
 2444  LLT Ty = 
MRI.getType(
MI.getOperand(0).getReg());
 
 2449  unsigned Size = Ty.getSizeInBits();
 
 2450  if (
Size <= TargetShiftSize)
 
 2458  ShiftVal = MaybeImmVal->Value.getSExtValue();
 
 2459  return ShiftVal >= 
Size / 2 && ShiftVal < 
Size;
 
 
 2466  LLT Ty = 
MRI.getType(SrcReg);
 
 2467  unsigned Size = Ty.getSizeInBits();
 
 2468  unsigned HalfSize = 
Size / 2;
 
 2469  assert(ShiftVal >= HalfSize);
 
 2473  auto Unmerge = 
Builder.buildUnmerge(HalfTy, SrcReg);
 
 2474  unsigned NarrowShiftAmt = ShiftVal - HalfSize;
 
 2476  if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
 
 2477    Register Narrowed = Unmerge.getReg(1);
 
 2484    if (NarrowShiftAmt != 0) {
 
 2485      Narrowed = 
Builder.buildLShr(HalfTy, Narrowed,
 
 2486        Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
 
 2489    auto Zero = 
Builder.buildConstant(HalfTy, 0);
 
 2490    Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero});
 
 2491  } 
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
 
 2492    Register Narrowed = Unmerge.getReg(0);
 
 2497    if (NarrowShiftAmt != 0) {
 
 2498      Narrowed = 
Builder.buildShl(HalfTy, Narrowed,
 
 2499        Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
 
 2502    auto Zero = 
Builder.buildConstant(HalfTy, 0);
 
 2503    Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed});
 
 2505    assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
 
 2507      HalfTy, Unmerge.getReg(1),
 
 2508      Builder.buildConstant(HalfTy, HalfSize - 1));
 
 2510    if (ShiftVal == HalfSize) {
 
 2513      Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), 
Hi});
 
 2514    } 
else if (ShiftVal == 
Size - 1) {
 
 2522        HalfTy, Unmerge.getReg(1),
 
 2523        Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
 
 2531  MI.eraseFromParent();
 
 
 2547  assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR && 
"Expected a G_INTTOPTR");
 
 2549  LLT DstTy = 
MRI.getType(DstReg);
 
 
 2557  assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR && 
"Expected a G_INTTOPTR");
 
 2559  Builder.buildCopy(DstReg, Reg);
 
 2560  MI.eraseFromParent();
 
 
 2565  assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT && 
"Expected a G_PTRTOINT");
 
 2567  Builder.buildZExtOrTrunc(DstReg, Reg);
 
 2568  MI.eraseFromParent();
 
 
 2573  assert(
MI.getOpcode() == TargetOpcode::G_ADD);
 
 2576  LLT IntTy = 
MRI.getType(LHS);
 
 2580  PtrReg.second = 
false;
 
 2581  for (
Register SrcReg : {LHS, RHS}) {
 
 2585      LLT PtrTy = 
MRI.getType(PtrReg.first);
 
 2590    PtrReg.second = 
true;
 
 
 2602  const bool DoCommute = PtrReg.second;
 
 2607  LLT PtrTy = 
MRI.getType(LHS);
 
 2609  auto PtrAdd = 
Builder.buildPtrAdd(PtrTy, LHS, RHS);
 
 2610  Builder.buildPtrToInt(Dst, PtrAdd);
 
 2611  MI.eraseFromParent();
 
 
 2615                                                  APInt &NewCst)
 const {
 
 2617  Register LHS = PtrAdd.getBaseReg();
 
 2618  Register RHS = PtrAdd.getOffsetReg();
 
 2624      auto DstTy = 
MRI.getType(PtrAdd.getReg(0));
 
 2627      NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
 
 
 2636                                                  APInt &NewCst)
 const {
 
 2640  Builder.buildConstant(Dst, NewCst);
 
 2641  PtrAdd.eraseFromParent();
 
 
 2646  assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT && 
"Expected a G_ANYEXT");
 
 2651    SrcReg = OriginalSrcReg;
 
 2652  LLT DstTy = 
MRI.getType(DstReg);
 
 
 2660  assert(
MI.getOpcode() == TargetOpcode::G_ZEXT && 
"Expected a G_ZEXT");
 
 2663  LLT DstTy = 
MRI.getType(DstReg);
 
 2668    unsigned SrcSize = 
MRI.getType(SrcReg).getScalarSizeInBits();
 
 2669    return VT->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
 
 
 2679  if (ShiftSize > 32 && TruncSize < 32)
 
 
 2692    MachineInstr &
MI, std::pair<MachineInstr *, LLT> &MatchInfo)
 const {
 
 2693  assert(
MI.getOpcode() == TargetOpcode::G_TRUNC && 
"Expected a G_TRUNC");
 
 2697  if (!
MRI.hasOneNonDBGUse(SrcReg))
 
 2700  LLT SrcTy = 
MRI.getType(SrcReg);
 
 2701  LLT DstTy = 
MRI.getType(DstReg);
 
 2710  case TargetOpcode::G_SHL: {
 
 2719  case TargetOpcode::G_LSHR:
 
 2720  case TargetOpcode::G_ASHR: {
 
 2726    for (
auto &
User : 
MRI.use_instructions(DstReg))
 
 2727      if (
User.getOpcode() == TargetOpcode::G_STORE)
 
 2731    if (NewShiftTy == SrcTy)
 
 2745           {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
 
 2748  MatchInfo = std::make_pair(SrcMI, NewShiftTy);
 
 
 2753    MachineInstr &
MI, std::pair<MachineInstr *, LLT> &MatchInfo)
 const {
 
 2755  LLT NewShiftTy = MatchInfo.second;
 
 2758  LLT DstTy = 
MRI.getType(Dst);
 
 2762  ShiftSrc = 
Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0);
 
 2766          .buildInstr(ShiftMI->
getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt})
 
 2769  if (NewShiftTy == DstTy)
 
 2772    Builder.buildTrunc(Dst, NewShift);
 
 
 2779    return MO.isReg() &&
 
 2780           getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
 
 
 2786    return !MO.isReg() ||
 
 2787           getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
 
 
 2792  assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
 
 2794  return all_of(Mask, [](
int Elt) { 
return Elt < 0; });
 
 
 2798  assert(
MI.getOpcode() == TargetOpcode::G_STORE);
 
 2799  return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, 
MI.getOperand(0).getReg(),
 
 
 2804  assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
 
 2805  return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, 
MI.getOperand(1).getReg(),
 
 
 2811  assert((
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
 
 2812          MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
 
 2813         "Expected an insert/extract element op");
 
 2814  LLT VecTy = 
MRI.getType(
MI.getOperand(1).getReg());
 
 2819      MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
 
 
 2827                                            unsigned &
OpIdx)
 const {
 
 2833  OpIdx = Cst->isZero() ? 3 : 2;
 
 
 2878  if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
 
 2905        return MO.isReg() && MO.getReg().isPhysical();
 
 2915    return I1->isIdenticalTo(*I2);
 
 2923  if (
Builder.getTII().produceSameValue(*I1, *I2, &
MRI)) {
 
 2930    return I1->findRegisterDefOperandIdx(InstAndDef1->Reg, 
nullptr) ==
 
 
 2942  return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
 
 2943         MaybeCst->getSExtValue() == 
C;
 
 
 2950  std::optional<FPValueAndVReg> MaybeCst;
 
 2954  return MaybeCst->Value.isExactlyValue(
C);
 
 
 2958                                                     unsigned OpIdx)
 const {
 
 2959  assert(
MI.getNumExplicitDefs() == 1 && 
"Expected one explicit def?");
 
 2964  MI.eraseFromParent();
 
 
 2969  assert(
MI.getNumExplicitDefs() == 1 && 
"Expected one explicit def?");
 
 2973  MI.eraseFromParent();
 
 
 2977                                                 unsigned ConstIdx)
 const {
 
 2978  Register ConstReg = 
MI.getOperand(ConstIdx).getReg();
 
 2979  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 
 2991  assert((
MI.getOpcode() == TargetOpcode::G_FSHL ||
 
 2992          MI.getOpcode() == TargetOpcode::G_FSHR) &&
 
 2993         "This is not a funnel shift operation");
 
 2995  Register ConstReg = 
MI.getOperand(3).getReg();
 
 2996  LLT ConstTy = 
MRI.getType(ConstReg);
 
 2997  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 3000  assert((VRegAndVal) && 
"Value is not a constant");
 
 3003  APInt NewConst = VRegAndVal->Value.
urem(
 
 3008      MI.getOpcode(), {MI.getOperand(0)},
 
 3009      {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)});
 
 3011  MI.eraseFromParent();
 
 
 3015  assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
 
 
 3029                                        unsigned OpIdx)
 const {
 
 
 3036                                         unsigned OpIdx)
 const {
 
 3038  return MO.
isReg() &&
 
 
 3043                                                        unsigned OpIdx)
 const {
 
 
 3050  assert(
MI.getNumDefs() == 1 && 
"Expected only one def?");
 
 3052  MI.eraseFromParent();
 
 
 3057  assert(
MI.getNumDefs() == 1 && 
"Expected only one def?");
 
 3059  MI.eraseFromParent();
 
 
 3063  assert(
MI.getNumDefs() == 1 && 
"Expected only one def?");
 
 3065  MI.eraseFromParent();
 
 
 3070  assert(
MI.getNumDefs() == 1 && 
"Expected only one def?");
 
 3072  MI.eraseFromParent();
 
 
 3076  assert(
MI.getNumDefs() == 1 && 
"Expected only one def?");
 
 3078  MI.eraseFromParent();
 
 
 3082    MachineInstr &
MI, std::tuple<Register, Register> &MatchInfo)
 const {
 
 3085  Register &NewLHS = std::get<0>(MatchInfo);
 
 3086  Register &NewRHS = std::get<1>(MatchInfo);
 
 3094    NewLHS = MaybeNewLHS;
 
 3098  return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
 
 
 3103  assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
 
 3106  LLT DstTy = 
MRI.getType(DstReg);
 
 3115  if (
MRI.hasOneUse(DstReg) && 
MRI.use_instr_begin(DstReg)->getOpcode() ==
 
 3116                                   TargetOpcode::G_INSERT_VECTOR_ELT)
 
 3122  MatchInfo.
resize(NumElts);
 
 3126    if (IntImm >= NumElts || IntImm < 0)
 
 3128    if (!MatchInfo[IntImm])
 
 3129      MatchInfo[IntImm] = TmpReg;
 
 3133  if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
 
 3135  if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
 
 3144  return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
 
 
 3151  auto GetUndef = [&]() {
 
 3154    LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 3162  Builder.buildBuildVector(
MI.getOperand(0).getReg(), MatchInfo);
 
 3163  MI.eraseFromParent();
 
 
 3167    MachineInstr &
MI, std::tuple<Register, Register> &MatchInfo)
 const {
 
 3169  std::tie(SubLHS, SubRHS) = MatchInfo;
 
 3170  Builder.buildSub(
MI.getOperand(0).getReg(), SubLHS, SubRHS);
 
 3171  MI.eraseFromParent();
 
 
 3182  unsigned LogicOpcode = 
MI.getOpcode();
 
 3183  assert(LogicOpcode == TargetOpcode::G_AND ||
 
 3184         LogicOpcode == TargetOpcode::G_OR ||
 
 3185         LogicOpcode == TargetOpcode::G_XOR);
 
 3192  if (!
MRI.hasOneNonDBGUse(LHSReg) || !
MRI.hasOneNonDBGUse(RHSReg))
 
 3198  if (!LeftHandInst || !RightHandInst)
 
 3200  unsigned HandOpcode = LeftHandInst->
getOpcode();
 
 3201  if (HandOpcode != RightHandInst->
getOpcode())
 
 3215  if (!XTy.
isValid() || XTy != YTy)
 
 3220  switch (HandOpcode) {
 
 3223  case TargetOpcode::G_ANYEXT:
 
 3224  case TargetOpcode::G_SEXT:
 
 3225  case TargetOpcode::G_ZEXT: {
 
 3229  case TargetOpcode::G_TRUNC: {
 
 3234    LLT DstTy = 
MRI.getType(Dst);
 
 3243  case TargetOpcode::G_AND:
 
 3244  case TargetOpcode::G_ASHR:
 
 3245  case TargetOpcode::G_LSHR:
 
 3246  case TargetOpcode::G_SHL: {
 
 3251    ExtraHandOpSrcReg = ZOp.
getReg();
 
 3262  auto NewLogicDst = 
MRI.createGenericVirtualRegister(XTy);
 
 3273  if (ExtraHandOpSrcReg.
isValid())
 
 
 3285         "Expected at least one instr to build?");
 
 3287    assert(InstrToBuild.Opcode && 
"Expected a valid opcode?");
 
 3288    assert(InstrToBuild.OperandFns.size() && 
"Expected at least one operand?");
 
 3290    for (
auto &OperandFn : InstrToBuild.OperandFns)
 
 3293  MI.eraseFromParent();
 
 
 3297    MachineInstr &
MI, std::tuple<Register, int64_t> &MatchInfo)
 const {
 
 3298  assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
 
 3299  int64_t ShlCst, AshrCst;
 
 3305  if (ShlCst != AshrCst)
 
 3308          {TargetOpcode::G_SEXT_INREG, {
MRI.getType(Src)}}))
 
 3310  MatchInfo = std::make_tuple(Src, ShlCst);
 
 
 3315    MachineInstr &
MI, std::tuple<Register, int64_t> &MatchInfo)
 const {
 
 3316  assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
 
 3319  std::tie(Src, ShiftAmt) = MatchInfo;
 
 3320  unsigned Size = 
MRI.getType(Src).getScalarSizeInBits();
 
 3321  Builder.buildSExtInReg(
MI.getOperand(0).getReg(), Src, 
Size - ShiftAmt);
 
 3322  MI.eraseFromParent();
 
 
 3329  assert(
MI.getOpcode() == TargetOpcode::G_AND);
 
 3332  LLT Ty = 
MRI.getType(Dst);
 
 3344      B.buildAnd(Dst, R, 
B.buildConstant(Ty, C1 & C2));
 
 3347    auto Zero = 
B.buildConstant(Ty, 0);
 
 
 3370  assert(
MI.getOpcode() == TargetOpcode::G_AND);
 
 3394      (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
 
 3401      (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
 
 
 3418  assert(
MI.getOpcode() == TargetOpcode::G_OR);
 
 3436      (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
 
 3443      (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
 
 
 3454  unsigned ExtBits = 
MI.getOperand(2).getImm();
 
 3455  unsigned TypeSize = 
MRI.getType(Src).getScalarSizeInBits();
 
 3456  return VT->computeNumSignBits(Src) >= (
TypeSize - ExtBits + 1);
 
 
 3460                             int64_t Cst, 
bool IsVector, 
bool IsFP) {
 
 3462  return (ScalarSizeBits == 1 && Cst == -1) ||
 
 
 3485  unsigned NumOperands = 
BuildMI->getNumSources();
 
 3493  for (
I = 0; 
I < NumOperands; ++
I) {
 
 3494    auto SrcMI = 
MRI.getVRegDef(
BuildMI->getSourceReg(
I));
 
 3495    auto SrcMIOpc = SrcMI->getOpcode();
 
 3498    if (SrcMIOpc == TargetOpcode::G_TRUNC) {
 
 3500        UnmergeMI = 
MRI.getVRegDef(SrcMI->getOperand(1).getReg());
 
 3501        if (UnmergeMI->
getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
 
 3504        auto UnmergeSrcMI = 
MRI.getVRegDef(SrcMI->getOperand(1).getReg());
 
 3505        if (UnmergeMI != UnmergeSrcMI)
 
 3516  for (; 
I < NumOperands; ++
I) {
 
 3517    auto SrcMI = 
MRI.getVRegDef(
BuildMI->getSourceReg(
I));
 
 3518    auto SrcMIOpc = SrcMI->getOpcode();
 
 3520    if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
 
 3526  LLT UnmergeSrcTy = 
MRI.getType(MatchInfo);
 
 3533  LLT UnmergeDstEltTy = 
MRI.getType(UnmergeDstReg);
 
 3534  if (UnmergeSrcEltTy != UnmergeDstEltTy)
 
 3542        !
isLegal({TargetOpcode::G_CONCAT_VECTORS, {MidTy, UnmergeSrcTy}}))
 
 3545    if (!
isLegal({TargetOpcode::G_TRUNC, {DstTy, MidTy}}))
 
 
 3557  LLT DstTy = 
MRI.getType(DstReg);
 
 3558  LLT UnmergeSrcTy = 
MRI.getType(MatchInfo);
 
 3563  if (DstTyNumElt / UnmergeSrcTyNumElt == 1) {
 
 3568    for (
unsigned I = 1; 
I < DstTyNumElt / UnmergeSrcTyNumElt; ++
I)
 
 3572    MidReg = 
Builder.buildConcatVectors(MidTy, ConcatRegs).getReg(0);
 
 3575  Builder.buildTrunc(DstReg, MidReg);
 
 3576  MI.eraseFromParent();
 
 
 3581  assert(
MI.getOpcode() == TargetOpcode::G_XOR);
 
 3582  LLT Ty = 
MRI.getType(
MI.getOperand(0).getReg());
 
 3583  const auto &TLI = *
Builder.getMF().getSubtarget().getTargetLowering();
 
 3591  if (!
MRI.hasOneNonDBGUse(XorSrc))
 
 3601  for (
unsigned I = 0; 
I < RegsToNegate.
size(); ++
I) {
 
 3603    if (!
MRI.hasOneNonDBGUse(Reg))
 
 3606    switch (Def->getOpcode()) {
 
 3611    case TargetOpcode::G_ICMP:
 
 3617    case TargetOpcode::G_FCMP:
 
 3623    case TargetOpcode::G_AND:
 
 3624    case TargetOpcode::G_OR:
 
 3630      RegsToNegate.
push_back(Def->getOperand(1).getReg());
 
 3631      RegsToNegate.
push_back(Def->getOperand(2).getReg());
 
 3639  if (Ty.isVector()) {
 
 3644    if (!
isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, 
true, IsFP))
 
 
 3658  for (
Register Reg : RegsToNegate) {
 
 3663    switch (Def->getOpcode()) {
 
 3666    case TargetOpcode::G_ICMP:
 
 3667    case TargetOpcode::G_FCMP: {
 
 3674    case TargetOpcode::G_AND:
 
 3675      Def->setDesc(
Builder.getTII().get(TargetOpcode::G_OR));
 
 3677    case TargetOpcode::G_OR:
 
 3678      Def->setDesc(
Builder.getTII().get(TargetOpcode::G_AND));
 
 3685  MI.eraseFromParent();
 
 
 3689    MachineInstr &
MI, std::pair<Register, Register> &MatchInfo)
 const {
 
 3691  assert(
MI.getOpcode() == TargetOpcode::G_XOR);
 
 3695  Register SharedReg = 
MI.getOperand(2).getReg();
 
 3709  if (!
MRI.hasOneNonDBGUse(AndReg))
 
 3716  return Y == SharedReg;
 
 
 3720    MachineInstr &
MI, std::pair<Register, Register> &MatchInfo)
 const {
 
 3723  std::tie(
X, 
Y) = MatchInfo;
 
 3726  MI.setDesc(
Builder.getTII().get(TargetOpcode::G_AND));
 
 3727  MI.getOperand(1).setReg(Not->getOperand(0).getReg());
 
 3728  MI.getOperand(2).setReg(
Y);
 
 
 3734  Register DstReg = PtrAdd.getReg(0);
 
 3735  LLT Ty = 
MRI.getType(DstReg);
 
 3738  if (
DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
 
 3741  if (Ty.isPointer()) {
 
 3743    return ConstVal && *ConstVal == 0;
 
 3746  assert(Ty.isVector() && 
"Expecting a vector type");
 
 
 3753  Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
 
 3754  PtrAdd.eraseFromParent();
 
 
 3761  Register Pow2Src1 = 
MI.getOperand(2).getReg();
 
 3762  LLT Ty = 
MRI.getType(DstReg);
 
 3765  auto NegOne = 
Builder.buildConstant(Ty, -1);
 
 3766  auto Add = 
Builder.buildAdd(Ty, Pow2Src1, NegOne);
 
 3768  MI.eraseFromParent();
 
 
 3772                                              unsigned &SelectOpNo)
 const {
 
 3782  if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
 
 3783      !
MRI.hasOneNonDBGUse(LHS)) {
 
 3784    OtherOperandReg = LHS;
 
 3787    if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
 
 3788        !
MRI.hasOneNonDBGUse(RHS))
 
 3804  unsigned BinOpcode = 
MI.getOpcode();
 
 3809  bool CanFoldNonConst =
 
 3810      (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
 
 3815  if (CanFoldNonConst)
 
 
 3836  LLT Ty = 
MRI.getType(Dst);
 
 3837  unsigned BinOpcode = 
MI.getOpcode();
 
 3844  if (SelectOperand == 1) {
 
 3848    FoldTrue = 
Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).
getReg(0);
 
 3850        Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).
getReg(0);
 
 3852    FoldTrue = 
Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).
getReg(0);
 
 3854        Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).
getReg(0);
 
 3857  Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, 
MI.getFlags());
 
 3858  MI.eraseFromParent();
 
 
 3861std::optional<SmallVector<Register, 8>>
 
 3862CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
 const {
 
 3863  assert(Root->
getOpcode() == TargetOpcode::G_OR && 
"Expected G_OR only!");
 
 3892  const unsigned MaxIter =
 
 3894  for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
 
 3902    if (!
MRI.hasOneNonDBGUse(OrLHS) || !
MRI.hasOneNonDBGUse(OrRHS))
 
 3903      return std::nullopt;
 
 3919  if (RegsToVisit.
empty() || RegsToVisit.
size() % 2 != 0)
 
 3920    return std::nullopt;
 
 3932static std::optional<std::pair<GZExtLoad *, int64_t>>
 
 3936         "Expected Reg to only have one non-debug use?");
 
 3945  if (Shift % MemSizeInBits != 0)
 
 3946    return std::nullopt;
 
 3951    return std::nullopt;
 
 3953  if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
 
 3954    return std::nullopt;
 
 3956  return std::make_pair(Load, Shift / MemSizeInBits);
 
 
 3959std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
 
 3960CombinerHelper::findLoadOffsetsForLoadOrCombine(
 
 3963    const unsigned MemSizeInBits)
 const {
 
 3966  SmallSetVector<const MachineInstr *, 8> Loads;
 
 3972  GZExtLoad *LowestIdxLoad = 
nullptr;
 
 3975  SmallSet<int64_t, 8> SeenIdx;
 
 3979  MachineBasicBlock *
MBB = 
nullptr;
 
 3980  const MachineMemOperand *MMO = 
nullptr;
 
 3983  GZExtLoad *EarliestLoad = 
nullptr;
 
 3986  GZExtLoad *LatestLoad = 
nullptr;
 
 3995  for (
auto Reg : RegsToVisit) {
 
 4000      return std::nullopt;
 
 4003    std::tie(Load, DstPos) = *LoadAndPos;
 
 4007    MachineBasicBlock *LoadMBB = 
Load->getParent();
 
 4011      return std::nullopt;
 
 4014    auto &LoadMMO = 
Load->getMMO();
 
 4018      return std::nullopt;
 
 4025      LoadPtr = 
Load->getOperand(1).getReg();
 
 4030    if (!SeenIdx.
insert(Idx).second)
 
 4031      return std::nullopt;
 
 4038    if (BasePtr != LoadPtr)
 
 4039      return std::nullopt;
 
 4041    if (Idx < LowestIdx) {
 
 4043      LowestIdxLoad = 
Load;
 
 4050    if (!MemOffset2Idx.
try_emplace(DstPos, Idx).second)
 
 4051      return std::nullopt;
 
 4059    if (!EarliestLoad || 
dominates(*Load, *EarliestLoad))
 
 4060      EarliestLoad = 
Load;
 
 4061    if (!LatestLoad || 
dominates(*LatestLoad, *Load))
 
 4068         "Expected to find a load for each register?");
 
 4069  assert(EarliestLoad != LatestLoad && EarliestLoad &&
 
 4070         LatestLoad && 
"Expected at least two loads?");
 
 4079  const unsigned MaxIter = 20;
 
 4085    if (
MI.isLoadFoldBarrier())
 
 4086      return std::nullopt;
 
 4087    if (Iter++ == MaxIter)
 
 4088      return std::nullopt;
 
 4091  return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
 
 4097  assert(
MI.getOpcode() == TargetOpcode::G_OR);
 
 4110  LLT Ty = 
MRI.getType(Dst);
 
 4116  const unsigned WideMemSizeInBits = Ty.getSizeInBits();
 
 4117  if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
 
 4121  auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
 
 4128  const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
 
 4129  if (NarrowMemSizeInBits % 8 != 0)
 
 4142  auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
 
 4143      MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
 
 4146  std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
 
 4153  std::optional<bool> IsBigEndian = 
isBigEndian(MemOffset2Idx, LowestIdx);
 
 4156  bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
 
 4168  const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
 
 4169  const unsigned ZeroByteOffset =
 
 4173  auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
 
 4174  if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
 
 4175      ZeroOffsetIdx->second != LowestIdx)
 
 4185          {TargetOpcode::G_LOAD, {Ty, 
MRI.getType(
Ptr)}, {MMDesc}}))
 
 4199    MIB.setInstrAndDebugLoc(*LatestLoad);
 
 4200    Register LoadDst = NeedsBSwap ? 
MRI.cloneVirtualRegister(Dst) : Dst;
 
 4201    MIB.buildLoad(LoadDst, 
Ptr, *NewMMO);
 
 4203      MIB.buildBSwap(Dst, LoadDst);
 
 
 4215  if (
MRI.getType(DstReg).isVector())
 
 4219  if (!
MRI.hasOneNonDBGUse(DstReg))
 
 4221  ExtMI = &*
MRI.use_instr_nodbg_begin(DstReg);
 
 4223  case TargetOpcode::G_ANYEXT:
 
 4225  case TargetOpcode::G_ZEXT:
 
 4226  case TargetOpcode::G_SEXT:
 
 4233  if (
Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, 
MRI))
 
 4240  for (
unsigned I = 0; 
I < 
PHI.getNumIncomingValues(); ++
I) {
 
 4242    switch (
DefMI->getOpcode()) {
 
 4243    case TargetOpcode::G_LOAD:
 
 4244    case TargetOpcode::G_TRUNC:
 
 4245    case TargetOpcode::G_SEXT:
 
 4246    case TargetOpcode::G_ZEXT:
 
 4247    case TargetOpcode::G_ANYEXT:
 
 4248    case TargetOpcode::G_CONSTANT:
 
 4252      if (InSrcs.
size() > 2)
 
 
 4266  LLT ExtTy = 
MRI.getType(DstReg);
 
 4273  for (
unsigned I = 0; 
I < 
PHI.getNumIncomingValues(); ++
I) {
 
 4274    auto SrcReg = 
PHI.getIncomingValue(
I);
 
 4275    auto *SrcMI = 
MRI.getVRegDef(SrcReg);
 
 4276    if (!SrcMIs.
insert(SrcMI))
 
 4280    auto *
MBB = SrcMI->getParent();
 
 4282    if (InsertPt != 
MBB->end() && InsertPt->isPHI())
 
 4283      InsertPt = 
MBB->getFirstNonPHI();
 
 4285    Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
 
 4288    OldToNewSrcMap[SrcMI] = NewExt;
 
 4293  auto NewPhi = 
Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
 
 4294  NewPhi.addDef(DstReg);
 
 4297      NewPhi.addMBB(MO.getMBB());
 
 4300    auto *NewSrc = OldToNewSrcMap[
MRI.getVRegDef(MO.getReg())];
 
 4301    NewPhi.addUse(NewSrc->getOperand(0).getReg());
 
 
 4309  assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
 
 4313  LLT SrcTy = 
MRI.getType(SrcVec);
 
 4314  if (SrcTy.isScalableVector())
 
 4318  if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
 
 4321  unsigned VecIdx = Cst->Value.getZExtValue();
 
 4326  if (SrcVecMI->
getOpcode() == TargetOpcode::G_TRUNC) {
 
 4330  if (SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
 
 4331      SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
 
 4335  if (!
MRI.hasOneNonDBGUse(SrcVec) &&
 
 
 4347  LLT ScalarTy = 
MRI.getType(Reg);
 
 4349  LLT DstTy = 
MRI.getType(DstReg);
 
 4351  if (ScalarTy != DstTy) {
 
 4353    Builder.buildTrunc(DstReg, Reg);
 
 4354    MI.eraseFromParent();
 
 
 4362    SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs)
 const {
 
 4363  assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
 
 4381  LLT DstTy = 
MRI.getType(DstReg);
 
 4386    if (
II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
 
 4391    unsigned Idx = Cst->getZExtValue();
 
 4394    ExtractedElts.
set(Idx);
 
 4395    SrcDstPairs.emplace_back(
 
 4396        std::make_pair(
MI.getOperand(Idx + 1).getReg(), &
II));
 
 4399  return ExtractedElts.
all();
 
 
 4404    SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs)
 const {
 
 4405  assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
 
 4406  for (
auto &Pair : SrcDstPairs) {
 
 4407    auto *ExtMI = Pair.second;
 
 4409    ExtMI->eraseFromParent();
 
 4411  MI.eraseFromParent();
 
 
 4418  MI.eraseFromParent();
 
 
 4428                                               bool AllowScalarConstants,
 
 4430  assert(
MI.getOpcode() == TargetOpcode::G_OR);
 
 4433  LLT Ty = 
MRI.getType(Dst);
 
 4434  unsigned BitWidth = Ty.getScalarSizeInBits();
 
 4436  Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
 
 4437  unsigned FshOpc = 0;
 
 4448  int64_t CstShlAmt = 0, CstLShrAmt;
 
 4451      CstShlAmt + CstLShrAmt == 
BitWidth) {
 
 4452    FshOpc = TargetOpcode::G_FSHR;
 
 4458    FshOpc = TargetOpcode::G_FSHL;
 
 4463    FshOpc = TargetOpcode::G_FSHR;
 
 4468  LLT AmtTy = 
MRI.getType(Amt);
 
 4470      (!AllowScalarConstants || CstShlAmt == 0 || !Ty.isScalar()))
 
 4474    B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
 
 
 4481  unsigned Opc = 
MI.getOpcode();
 
 4482  assert(
Opc == TargetOpcode::G_FSHL || 
Opc == TargetOpcode::G_FSHR);
 
 4487  unsigned RotateOpc =
 
 4488      Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
 
 
 4493  unsigned Opc = 
MI.getOpcode();
 
 4494  assert(
Opc == TargetOpcode::G_FSHL || 
Opc == TargetOpcode::G_FSHR);
 
 4495  bool IsFSHL = 
Opc == TargetOpcode::G_FSHL;
 
 4497  MI.setDesc(
Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
 
 4498                                         : TargetOpcode::G_ROTR));
 
 4499  MI.removeOperand(2);
 
 
 4505  assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
 
 4506         MI.getOpcode() == TargetOpcode::G_ROTR);
 
 4508      MRI.getType(
MI.getOperand(0).getReg()).getScalarSizeInBits();
 
 4510  bool OutOfRange = 
false;
 
 4511  auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
 
 4513      OutOfRange |= CI->getValue().uge(Bitsize);
 
 
 4520  assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
 
 4521         MI.getOpcode() == TargetOpcode::G_ROTR);
 
 4523      MRI.getType(
MI.getOperand(0).getReg()).getScalarSizeInBits();
 
 4525  LLT AmtTy = 
MRI.getType(Amt);
 
 4526  auto Bits = 
Builder.buildConstant(AmtTy, Bitsize);
 
 4527  Amt = 
Builder.buildURem(AmtTy, 
MI.getOperand(2).getReg(), Bits).getReg(0);
 
 4529  MI.getOperand(2).setReg(Amt);
 
 
 4534                                                   int64_t &MatchInfo)
 const {
 
 4535  assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
 
 4546  auto KnownRHS = 
VT->getKnownBits(
MI.getOperand(3).getReg());
 
 4547  if (KnownRHS.isUnknown())
 
 4550  std::optional<bool> KnownVal;
 
 4551  if (KnownRHS.isZero()) {
 
 4561    auto KnownLHS = 
VT->getKnownBits(
MI.getOperand(2).getReg());
 
 4571                           MRI.getType(
MI.getOperand(0).getReg()).isVector(),
 
 
 4580  assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
 
 4596  LLT DstTy = 
MRI.getType(Dst);
 
 4604  auto KnownLHS = 
VT->getKnownBits(LHS);
 
 4605  if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
 
 4608  LLT LHSTy = 
MRI.getType(LHS);
 
 4611  unsigned Op = TargetOpcode::COPY;
 
 4612  if (DstSize != LHSSize)
 
 4613    Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
 
 
 4624  assert(
MI.getOpcode() == TargetOpcode::G_AND);
 
 4628  LLT Ty = 
MRI.getType(
MI.getOperand(0).getReg());
 
 4634  int64_t AndMaskBits;
 
 4642  if (AndMaskBits & OrMaskBits)
 
 4648    if (
MI.getOperand(1).getReg() == AndMaskReg)
 
 4649      MI.getOperand(2).setReg(AndMaskReg);
 
 4650    MI.getOperand(1).setReg(Src);
 
 
 4660  assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
 
 4663  LLT Ty = 
MRI.getType(Src);
 
 4665  if (!
LI || !
LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
 
 4667  int64_t Width = 
MI.getOperand(2).getImm();
 
 4675  if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
 
 4679    auto Cst1 = 
B.buildConstant(ExtractTy, ShiftImm);
 
 4680    auto Cst2 = 
B.buildConstant(ExtractTy, Width);
 
 4681    B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
 
 
 4691  LLT Ty = 
MRI.getType(Dst);
 
 4695  if (
LI && !
LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
 
 4698  int64_t AndImm, LSBImm;
 
 4700  const unsigned Size = Ty.getScalarSizeInBits();
 
 4707  auto MaybeMask = 
static_cast<uint64_t>(AndImm);
 
 4708  if (MaybeMask & (MaybeMask + 1))
 
 4717    auto WidthCst = 
B.buildConstant(ExtractTy, Width);
 
 4718    auto LSBCst = 
B.buildConstant(ExtractTy, LSBImm);
 
 4719    B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
 
 
 4727  const unsigned Opcode = 
MI.getOpcode();
 
 4728  assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
 
 4730  const Register Dst = 
MI.getOperand(0).getReg();
 
 4732  const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
 
 4733                                  ? TargetOpcode::G_SBFX
 
 4734                                  : TargetOpcode::G_UBFX;
 
 4737  LLT Ty = 
MRI.getType(Dst);
 
 4739  if (!
LI || !
LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
 
 4745  const unsigned Size = Ty.getScalarSizeInBits();
 
 4755  if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= 
Size)
 
 4759  if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
 
 4763  const int64_t Pos = ShrAmt - ShlAmt;
 
 4764  const int64_t Width = 
Size - ShrAmt;
 
 4767    auto WidthCst = 
B.buildConstant(ExtractTy, Width);
 
 4768    auto PosCst = 
B.buildConstant(ExtractTy, Pos);
 
 4769    B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
 
 
 4777  const unsigned Opcode = 
MI.getOpcode();
 
 4778  assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
 
 4780  const Register Dst = 
MI.getOperand(0).getReg();
 
 4781  LLT Ty = 
MRI.getType(Dst);
 
 4783  if (
LI && !
LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
 
 4796  const unsigned Size = Ty.getScalarSizeInBits();
 
 4797  if (ShrAmt < 0 || ShrAmt >= 
Size)
 
 4801  if (0 == (SMask >> ShrAmt)) {
 
 4803      B.buildConstant(Dst, 0);
 
 4809  uint64_t UMask = SMask;
 
 4816  const int64_t Pos = ShrAmt;
 
 4821  if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == 
Size)
 
 4825    auto WidthCst = 
B.buildConstant(ExtractTy, Width);
 
 4826    auto PosCst = 
B.buildConstant(ExtractTy, Pos);
 
 4827    B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
 
 
 4832bool CombinerHelper::reassociationCanBreakAddressingModePattern(
 
 4836  Register Src1Reg = PtrAdd.getBaseReg();
 
 4841  Register Src2Reg = PtrAdd.getOffsetReg();
 
 4843  if (
MRI.hasOneNonDBGUse(Src1Reg))
 
 4853  const APInt &C1APIntVal = *C1;
 
 4854  const APInt &C2APIntVal = *C2;
 
 4855  const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
 
 4857  for (
auto &
UseMI : 
MRI.use_nodbg_instructions(PtrAdd.getReg(0))) {
 
 4860    MachineInstr *ConvUseMI = &
UseMI;
 
 4861    unsigned ConvUseOpc = ConvUseMI->
getOpcode();
 
 4862    while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
 
 4863           ConvUseOpc == TargetOpcode::G_PTRTOINT) {
 
 4865      if (!
MRI.hasOneNonDBGUse(DefReg))
 
 4867      ConvUseMI = &*
MRI.use_instr_nodbg_begin(DefReg);
 
 4876    TargetLoweringBase::AddrMode AM;
 
 4879    unsigned AS = 
MRI.getType(LdStMI->getPointerReg()).getAddressSpace();
 
 4881                                   PtrAdd.getMF()->getFunction().getContext());
 
 4882    const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
 
 4883    if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
 
 4889    if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
 
 4901  Register Src1Reg = 
MI.getOperand(1).getReg();
 
 4902  if (RHS->getOpcode() != TargetOpcode::G_ADD)
 
 4914  unsigned PtrAddFlags = 
MI.getFlags();
 
 4915  unsigned AddFlags = RHS->getFlags();
 
 4928    LLT PtrTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 4931        Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg(), Flags);
 
 4933    MI.getOperand(1).setReg(NewBase.getReg(0));
 
 4934    MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
 
 4938  return !reassociationCanBreakAddressingModePattern(
MI);
 
 
 4948  std::optional<ValueAndVReg> LHSCstOff;
 
 4958  unsigned PtrAddFlags = 
MI.getFlags();
 
 4959  unsigned LHSPtrAddFlags = LHSPtrAdd->getFlags();
 
 4961  bool IsNoUSWrap = IsNoUWrap && (PtrAddFlags & LHSPtrAddFlags &
 
 4963  bool IsInBounds = IsNoUWrap && (PtrAddFlags & LHSPtrAddFlags &
 
 4977    LHSPtrAdd->moveBefore(&
MI);
 
 4980    auto NewCst = 
B.buildConstant(
MRI.getType(RHSReg), LHSCstOff->Value);
 
 4982    MI.getOperand(2).setReg(NewCst.getReg(0));
 
 4985    Observer.changingInstr(*LHSPtrAdd);
 
 4986    LHSPtrAdd->getOperand(2).setReg(RHSReg);
 
 4987    LHSPtrAdd->setFlags(Flags);
 
 4990  return !reassociationCanBreakAddressingModePattern(
MI);
 
 
 5001  Register Src2Reg = 
MI.getOperand(2).getReg();
 
 5002  Register LHSSrc1 = LHSPtrAdd->getBaseReg();
 
 5003  Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
 
 5016  unsigned PtrAddFlags = 
MI.getFlags();
 
 5017  unsigned LHSPtrAddFlags = LHSPtrAdd->getFlags();
 
 5030    auto NewCst = 
B.buildConstant(
MRI.getType(Src2Reg), *C1 + *C2);
 
 5032    MI.getOperand(1).setReg(LHSSrc1);
 
 5033    MI.getOperand(2).setReg(NewCst.getReg(0));
 
 5037  return !reassociationCanBreakAddressingModePattern(
MI);
 
 
 5075  LLT OpRHSTy = 
MRI.getType(OpRHS);
 
 5094        auto NewCst = 
B.buildInstr(
Opc, {OpRHSTy}, {OpLHSRHS, OpRHS});
 
 5095        B.buildInstr(
Opc, {DstReg}, {OpLHSLHS, NewCst});
 
 5103        auto NewLHSLHS = 
B.buildInstr(
Opc, {OpRHSTy}, {OpLHSLHS, OpRHS});
 
 5104        B.buildInstr(
Opc, {DstReg}, {NewLHSLHS, OpLHSRHS});
 
 
 5117  unsigned Opc = 
MI.getOpcode();
 
 
 5130                                             APInt &MatchInfo)
 const {
 
 5131  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 5135    MatchInfo = *MaybeCst;
 
 
 5143                                            APInt &MatchInfo)
 const {
 
 5149  MatchInfo = *MaybeCst;
 
 
 5161      ConstantFP::get(
MI.getMF()->getFunction().getContext(), *MaybeCst);
 
 
 5167  assert(
MI.getOpcode() == TargetOpcode::G_FMA ||
 
 5168         MI.getOpcode() == TargetOpcode::G_FMAD);
 
 5169  auto [
_, Op1, Op2, Op3] = 
MI.getFirst4Regs();
 
 5186  MatchInfo = ConstantFP::get(
MI.getMF()->getFunction().getContext(), Op1F);
 
 
 5209  assert(
MI.getOpcode() == TargetOpcode::G_AND);
 
 5213  LLT WideTy = 
MRI.getType(Dst);
 
 5217  if (!WideTy.
isScalar() || !
MRI.hasOneNonDBGUse(AndLHS))
 
 5233  case TargetOpcode::G_ADD:
 
 5234  case TargetOpcode::G_SUB:
 
 5235  case TargetOpcode::G_MUL:
 
 5236  case TargetOpcode::G_AND:
 
 5237  case TargetOpcode::G_OR:
 
 5238  case TargetOpcode::G_XOR:
 
 5246  auto Mask = Cst->Value;
 
 5251  unsigned NarrowWidth = Mask.countr_one();
 
 5257  auto &MF = *
MI.getMF();
 
 5260  if (!TLI.isTruncateFree(WideTy, NarrowTy, Ctx) ||
 
 5261      !TLI.isZExtFree(NarrowTy, WideTy, Ctx))
 
 5269    auto NarrowLHS = 
Builder.buildTrunc(NarrowTy, BinOpLHS);
 
 5270    auto NarrowRHS = 
Builder.buildTrunc(NarrowTy, BinOpRHS);
 
 5272        Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
 
 5273    auto Ext = 
Builder.buildZExt(WideTy, NarrowBinOp);
 
 5275    MI.getOperand(1).setReg(Ext.getReg(0));
 
 
 5283  unsigned Opc = 
MI.getOpcode();
 
 5284  assert(
Opc == TargetOpcode::G_UMULO || 
Opc == TargetOpcode::G_SMULO);
 
 5291    unsigned NewOpc = 
Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
 
 5292                                                   : TargetOpcode::G_SADDO;
 
 5293    MI.setDesc(
Builder.getTII().get(NewOpc));
 
 5294    MI.getOperand(3).setReg(
MI.getOperand(2).getReg());
 
 
 5303  assert(
MI.getOpcode() == TargetOpcode::G_UMULO ||
 
 5304         MI.getOpcode() == TargetOpcode::G_SMULO);
 
 5313    B.buildConstant(Dst, 0);
 
 5314    B.buildConstant(Carry, 0);
 
 
 5323  assert(
MI.getOpcode() == TargetOpcode::G_UADDE ||
 
 5324         MI.getOpcode() == TargetOpcode::G_SADDE ||
 
 5325         MI.getOpcode() == TargetOpcode::G_USUBE ||
 
 5326         MI.getOpcode() == TargetOpcode::G_SSUBE);
 
 5331    switch (
MI.getOpcode()) {
 
 5332    case TargetOpcode::G_UADDE:
 
 5333      NewOpcode = TargetOpcode::G_UADDO;
 
 5335    case TargetOpcode::G_SADDE:
 
 5336      NewOpcode = TargetOpcode::G_SADDO;
 
 5338    case TargetOpcode::G_USUBE:
 
 5339      NewOpcode = TargetOpcode::G_USUBO;
 
 5341    case TargetOpcode::G_SSUBE:
 
 5342      NewOpcode = TargetOpcode::G_SSUBO;
 
 5346    MI.setDesc(
B.getTII().get(NewOpcode));
 
 5347    MI.removeOperand(4);
 
 
 5355  assert(
MI.getOpcode() == TargetOpcode::G_SUB);
 
 5388        auto Zero = 
B.buildConstant(
MRI.getType(Dst), 0);
 
 5389        B.buildSub(Dst, Zero, ReplaceReg);
 
 
 5398  unsigned Opcode = 
MI.getOpcode();
 
 5399  assert(Opcode == TargetOpcode::G_UDIV || Opcode == TargetOpcode::G_UREM);
 
 5401  Register Dst = UDivorRem.getReg(0);
 
 5402  Register LHS = UDivorRem.getReg(1);
 
 5403  Register RHS = UDivorRem.getReg(2);
 
 5404  LLT Ty = 
MRI.getType(Dst);
 
 5412  bool UseSRL = 
false;
 
 5417  auto BuildExactUDIVPattern = [&](
const Constant *
C) {
 
 5419    if (IsSplat && !Factors.
empty()) {
 
 5426    APInt Divisor = CI->getValue();
 
 5435    Shifts.
push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
 
 5436    Factors.
push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
 
 5446    if (Ty.isVector()) {
 
 5447      Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
 
 5448      Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
 
 5451      Factor = Factors[0];
 
 5459    return MIB.buildMul(Ty, Res, Factor);
 
 5462  unsigned KnownLeadingZeros =
 
 5463      VT ? 
VT->getKnownBits(LHS).countMinLeadingZeros() : 0;
 
 5465  bool UseNPQ = 
false;
 
 5467  auto BuildUDIVPattern = [&](
const Constant *
C) {
 
 5469    const APInt &Divisor = CI->getValue();
 
 5471    bool SelNPQ = 
false;
 
 5473    unsigned PreShift = 0, PostShift = 0;
 
 5478    if (!Divisor.
isOne()) {
 
 5484              Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
 
 5486      Magic = std::move(magics.
Magic);
 
 5489             "We shouldn't generate an undefined shift!");
 
 5491             "We shouldn't generate an undefined shift!");
 
 5495      SelNPQ = magics.
IsAdd;
 
 5499        MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
 
 5500    MagicFactors.
push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0));
 
 5502        MIB.buildConstant(ScalarTy,
 
 5507        MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
 
 5515  assert(Matched && 
"Expected unary predicate match to succeed");
 
 5517  Register PreShift, PostShift, MagicFactor, NPQFactor;
 
 5520    PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
 
 5521    MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
 
 5522    NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
 
 5523    PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
 
 5526           "Non-build_vector operation should have been a scalar");
 
 5527    PreShift = PreShifts[0];
 
 5528    MagicFactor = MagicFactors[0];
 
 5529    PostShift = PostShifts[0];
 
 5533  Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
 
 5536  Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
 
 5539    Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0);
 
 5544      NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
 
 5546      NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
 
 5548    Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
 
 5551  Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
 
 5552  auto One = MIB.buildConstant(Ty, 1);
 
 5553  auto IsOne = MIB.buildICmp(
 
 5555      Ty.isScalar() ? 
LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
 
 5556  auto ret = MIB.buildSelect(Ty, IsOne, LHS, Q);
 
 5558  if (Opcode == TargetOpcode::G_UREM) {
 
 5559    auto Prod = MIB.buildMul(Ty, ret, RHS);
 
 5560    return MIB.buildSub(Ty, LHS, Prod);
 
 
 5566  unsigned Opcode = 
MI.getOpcode();
 
 5567  assert(Opcode == TargetOpcode::G_UDIV || Opcode == TargetOpcode::G_UREM);
 
 5570  LLT DstTy = 
MRI.getType(Dst);
 
 5572  auto &MF = *
MI.getMF();
 
 5573  AttributeList Attr = MF.getFunction().getAttributes();
 
 5581  if (MF.getFunction().hasMinSize())
 
 5584  if (Opcode == TargetOpcode::G_UDIV &&
 
 5587        MRI, RHS, [](
const Constant *
C) { 
return C && !
C->isNullValue(); });
 
 5590  auto *RHSDef = 
MRI.getVRegDef(RHS);
 
 5601            {TargetOpcode::G_ICMP,
 
 5605    if (Opcode == TargetOpcode::G_UREM &&
 
 5611      MRI, RHS, [](
const Constant *
C) { 
return C && !
C->isNullValue(); });
 
 
 5620  unsigned Opcode = 
MI.getOpcode();
 
 5621  assert(Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM);
 
 5624  LLT DstTy = 
MRI.getType(Dst);
 
 5628  auto &MF = *
MI.getMF();
 
 5629  AttributeList Attr = MF.getFunction().getAttributes();
 
 5637  if (MF.getFunction().hasMinSize())
 
 5641  if (Opcode == TargetOpcode::G_SDIV &&
 
 5644        MRI, RHS, [](
const Constant *
C) { 
return C && !
C->isNullValue(); });
 
 5647  auto *RHSDef = 
MRI.getVRegDef(RHS);
 
 5655    if (!
isLegal({TargetOpcode::G_SMULH, {DstTy}}) &&
 
 5658    if (Opcode == TargetOpcode::G_SREM &&
 
 5664      MRI, RHS, [](
const Constant *
C) { 
return C && !
C->isNullValue(); });
 
 
 5673  unsigned Opcode = 
MI.getOpcode();
 
 5674  assert(
MI.getOpcode() == TargetOpcode::G_SDIV ||
 
 5675         Opcode == TargetOpcode::G_SREM);
 
 5677  Register Dst = SDivorRem.getReg(0);
 
 5678  Register LHS = SDivorRem.getReg(1);
 
 5679  Register RHS = SDivorRem.getReg(2);
 
 5680  LLT Ty = 
MRI.getType(Dst);
 
 5687  bool UseSRA = 
false;
 
 5693  auto BuildExactSDIVPattern = [&](
const Constant *
C) {
 
 5695    if (IsSplat && !ExactFactors.
empty()) {
 
 5697      ExactFactors.
push_back(ExactFactors[0]);
 
 5702    APInt Divisor = CI->getValue();
 
 5712    ExactShifts.
push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
 
 5713    ExactFactors.
push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
 
 5721    assert(Matched && 
"Expected unary predicate match to succeed");
 
 5724    if (Ty.isVector()) {
 
 5725      Shift = MIB.buildBuildVector(ShiftAmtTy, ExactShifts).getReg(0);
 
 5726      Factor = MIB.buildBuildVector(Ty, ExactFactors).getReg(0);
 
 5728      Shift = ExactShifts[0];
 
 5729      Factor = ExactFactors[0];
 
 5737    return MIB.buildMul(Ty, Res, Factor);
 
 5742  auto BuildSDIVPattern = [&](
const Constant *
C) {
 
 5744    const APInt &Divisor = CI->getValue();
 
 5748    int NumeratorFactor = 0;
 
 5759      NumeratorFactor = 1;
 
 5762      NumeratorFactor = -1;
 
 5765    MagicFactors.
push_back(MIB.buildConstant(ScalarTy, Magics.
Magic).getReg(0));
 
 5766    Factors.
push_back(MIB.buildConstant(ScalarTy, NumeratorFactor).getReg(0));
 
 5768        MIB.buildConstant(ScalarShiftAmtTy, Magics.
ShiftAmount).getReg(0));
 
 5769    ShiftMasks.
push_back(MIB.buildConstant(ScalarTy, ShiftMask).getReg(0));
 
 5777  assert(Matched && 
"Expected unary predicate match to succeed");
 
 5779  Register MagicFactor, Factor, Shift, ShiftMask;
 
 5782    MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
 
 5783    Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
 
 5784    Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
 
 5785    ShiftMask = MIB.buildBuildVector(Ty, ShiftMasks).getReg(0);
 
 5788           "Non-build_vector operation should have been a scalar");
 
 5789    MagicFactor = MagicFactors[0];
 
 5790    Factor = Factors[0];
 
 5792    ShiftMask = ShiftMasks[0];
 
 5796  Q = MIB.buildSMulH(Ty, LHS, MagicFactor).getReg(0);
 
 5799  Factor = MIB.buildMul(Ty, LHS, Factor).getReg(0);
 
 5800  Q = MIB.buildAdd(Ty, Q, Factor).getReg(0);
 
 5803  Q = MIB.buildAShr(Ty, Q, Shift).getReg(0);
 
 5806  auto SignShift = MIB.buildConstant(ShiftAmtTy, EltBits - 1);
 
 5807  auto T = MIB.buildLShr(Ty, Q, SignShift);
 
 5808  T = MIB.buildAnd(Ty, 
T, ShiftMask);
 
 5809  auto ret = MIB.buildAdd(Ty, Q, 
T);
 
 5811  if (Opcode == TargetOpcode::G_SREM) {
 
 5812    auto Prod = MIB.buildMul(Ty, ret, RHS);
 
 5813    return MIB.buildSub(Ty, LHS, Prod);
 
 
 5819  assert((
MI.getOpcode() == TargetOpcode::G_SDIV ||
 
 5820          MI.getOpcode() == TargetOpcode::G_UDIV) &&
 
 5821         "Expected SDIV or UDIV");
 
 5824  auto MatchPow2 = [&](
const Constant *
C) {
 
 5826    return CI && (CI->getValue().isPowerOf2() ||
 
 5827                  (IsSigned && CI->getValue().isNegatedPowerOf2()));
 
 
 5833  assert(
MI.getOpcode() == TargetOpcode::G_SDIV && 
"Expected SDIV");
 
 5838  LLT Ty = 
MRI.getType(Dst);
 
 5858  unsigned BitWidth = Ty.getScalarSizeInBits();
 
 5859  auto Zero = 
Builder.buildConstant(Ty, 0);
 
 5862  auto C1 = 
Builder.buildCTTZ(ShiftAmtTy, RHS);
 
 5863  auto Inexact = 
Builder.buildSub(ShiftAmtTy, Bits, C1);
 
 5865  auto Sign = 
Builder.buildAShr(
 
 5869  auto LSrl = 
Builder.buildLShr(Ty, Sign, Inexact);
 
 5875  auto One = 
Builder.buildConstant(Ty, 1);
 
 5876  auto MinusOne = 
Builder.buildConstant(Ty, -1);
 
 5880  auto IsOneOrMinusOne = 
Builder.buildOr(CCVT, IsOne, IsMinusOne);
 
 5881  AShr = 
Builder.buildSelect(Ty, IsOneOrMinusOne, LHS, AShr);
 
 5885  auto Neg = 
Builder.buildNeg(Ty, AShr);
 
 5887  Builder.buildSelect(
MI.getOperand(0).getReg(), IsNeg, Neg, AShr);
 
 5888  MI.eraseFromParent();
 
 
 5892  assert(
MI.getOpcode() == TargetOpcode::G_UDIV && 
"Expected UDIV");
 
 5897  LLT Ty = 
MRI.getType(Dst);
 
 5900  auto C1 = 
Builder.buildCTTZ(ShiftAmtTy, RHS);
 
 5901  Builder.buildLShr(
MI.getOperand(0).getReg(), LHS, C1);
 
 5902  MI.eraseFromParent();
 
 
 5906  assert(
MI.getOpcode() == TargetOpcode::G_UMULH);
 
 5909  LLT Ty = 
MRI.getType(Dst);
 
 5910  LLT RHSTy = 
MRI.getType(RHS);
 
 5912  auto MatchPow2ExceptOne = [&](
const Constant *
C) {
 
 5914      return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
 
 
 5929  LLT Ty = 
MRI.getType(Dst);
 
 5935      Builder.buildSub(Ty, 
Builder.buildConstant(Ty, NumEltBits), LogBase2);
 
 5936  auto Trunc = 
Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt);
 
 5937  Builder.buildLShr(Dst, LHS, Trunc);
 
 5938  MI.eraseFromParent();
 
 
 5945  LLT DstTy = 
MRI.getType(Dst);
 
 5946  LLT SrcTy = 
MRI.getType(Src);
 
 5948  unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
 
 5949  assert(NumSrcBits > NumDstBits && 
"Unexpected types for truncate operation");
 
 5951  if (!
LI || !
isLegal({TargetOpcode::G_TRUNC_SSAT_S, {DstTy, SrcTy}}))
 
 
 5969  Builder.buildTruncSSatS(Dst, MatchInfo);
 
 5970  MI.eraseFromParent();
 
 
 5977  LLT DstTy = 
MRI.getType(Dst);
 
 5978  LLT SrcTy = 
MRI.getType(Src);
 
 5980  unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
 
 5981  assert(NumSrcBits > NumDstBits && 
"Unexpected types for truncate operation");
 
 5983  if (!
LI || !
isLegal({TargetOpcode::G_TRUNC_SSAT_U, {DstTy, SrcTy}}))
 
 
 6001  Builder.buildTruncSSatU(Dst, MatchInfo);
 
 6002  MI.eraseFromParent();
 
 
 6009  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6010  LLT SrcTy = 
MRI.getType(Val);
 
 6012  unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
 
 6013  assert(NumSrcBits > NumDstBits && 
"Unexpected types for truncate operation");
 
 6015  if (!
LI || !
isLegal({TargetOpcode::G_TRUNC_SSAT_U, {DstTy, SrcTy}}))
 
 
 6024  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 
 6033  unsigned Opc = 
MI.getOpcode();
 
 6034  assert(
Opc == TargetOpcode::G_FADD || 
Opc == TargetOpcode::G_FSUB ||
 
 6035         Opc == TargetOpcode::G_FMUL || 
Opc == TargetOpcode::G_FDIV ||
 
 6036         Opc == TargetOpcode::G_FMAD || 
Opc == TargetOpcode::G_FMA);
 
 6048    Opc = TargetOpcode::G_FSUB;
 
 6053    Opc = TargetOpcode::G_FADD;
 
 6059  else if ((
Opc == TargetOpcode::G_FMUL || 
Opc == TargetOpcode::G_FDIV ||
 
 6060            Opc == TargetOpcode::G_FMAD || 
Opc == TargetOpcode::G_FMA) &&
 
 6069    MI.setDesc(
B.getTII().get(
Opc));
 
 6070    MI.getOperand(1).setReg(
X);
 
 6071    MI.getOperand(2).setReg(
Y);
 
 
 6079  assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
 
 6082  MatchInfo = 
MI.getOperand(2).getReg();
 
 6083  LLT Ty = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6085  const auto LHSCst = Ty.isVector()
 
 6092  if (LHSCst->Value.isNegZero())
 
 6096  if (LHSCst->Value.isPosZero())
 
 
 6106      Dst, 
Builder.buildFCanonicalize(
MRI.getType(Dst), MatchInfo).getReg(0));
 
 
 6113  if (
MI.getOpcode() != TargetOpcode::G_FMUL)
 
 
 6121                       MRI.use_instr_nodbg_end()) >
 
 6123                       MRI.use_instr_nodbg_end());
 
 
 6127                                         bool &AllowFusionGlobally,
 
 6129                                         bool CanReassociate)
 const {
 
 6131  auto *MF = 
MI.getMF();
 
 6132  const auto &TLI = *MF->getSubtarget().getTargetLowering();
 
 6134  LLT DstType = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6142  bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
 
 6145  if (!HasFMAD && !HasFMA)
 
 6153  Aggressive = TLI.enableAggressiveFMAFusion(DstType);
 
 
 6160  assert(
MI.getOpcode() == TargetOpcode::G_FADD);
 
 6162  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6170  unsigned PreferredFusedOpcode =
 
 6171      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6185      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6186                   {LHS.MI->getOperand(1).getReg(),
 
 6187                    LHS.MI->getOperand(2).getReg(), RHS.Reg});
 
 6196      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6197                   {RHS.MI->getOperand(1).getReg(),
 
 6198                    RHS.MI->getOperand(2).getReg(), LHS.Reg});
 
 
 6209  assert(
MI.getOpcode() == TargetOpcode::G_FADD);
 
 6211  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6215  const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
 
 6220  LLT DstType = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6222  unsigned PreferredFusedOpcode =
 
 6223      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6237      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6242      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6243                   {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
 
 6252      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6257      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6258                   {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
 
 
 6269  assert(
MI.getOpcode() == TargetOpcode::G_FADD);
 
 6271  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6279  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6281  unsigned PreferredFusedOpcode =
 
 6282      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6295  if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
 
 6296      (
MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
 
 6297       TargetOpcode::G_FMUL) &&
 
 6298      MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
 
 6299      MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
 
 6304  else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
 
 6305           (
MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
 
 6306            TargetOpcode::G_FMUL) &&
 
 6307           MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
 
 6308           MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
 
 6315    Register X = FMA->getOperand(1).getReg();
 
 6316    Register Y = FMA->getOperand(2).getReg();
 
 6321      Register InnerFMA = 
MRI.createGenericVirtualRegister(DstTy);
 
 6322      B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
 
 6323      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 
 6335  assert(
MI.getOpcode() == TargetOpcode::G_FADD);
 
 6337  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6344  const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
 
 6345  LLT DstType = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6351  unsigned PreferredFusedOpcode =
 
 6352      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6365    Register FpExtU = 
B.buildFPExt(DstType, U).getReg(0);
 
 6366    Register FpExtV = 
B.buildFPExt(DstType, V).getReg(0);
 
 6368        B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
 
 6370    B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6377  if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
 
 6381      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6386                     LHS.MI->getOperand(1).getReg(),
 
 6387                     LHS.MI->getOperand(2).getReg(), 
B);
 
 6398      FMAMI->
getOpcode() == PreferredFusedOpcode) {
 
 6401        TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6406        X = 
B.buildFPExt(DstType, 
X).getReg(0);
 
 6407        Y = 
B.buildFPExt(DstType, 
Y).getReg(0);
 
 6418  if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
 
 6422      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6427                     RHS.MI->getOperand(1).getReg(),
 
 6428                     RHS.MI->getOperand(2).getReg(), 
B);
 
 6439      FMAMI->
getOpcode() == PreferredFusedOpcode) {
 
 6442        TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
 
 6447        X = 
B.buildFPExt(DstType, 
X).getReg(0);
 
 6448        Y = 
B.buildFPExt(DstType, 
Y).getReg(0);
 
 
 6462  assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
 
 6464  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6472  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6476  int FirstMulHasFewerUses = 
true;
 
 6480    FirstMulHasFewerUses = 
false;
 
 6482  unsigned PreferredFusedOpcode =
 
 6483      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6486  if (FirstMulHasFewerUses &&
 
 6490      Register NegZ = 
B.buildFNeg(DstTy, RHS.Reg).getReg(0);
 
 6491      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6492                   {LHS.MI->getOperand(1).getReg(),
 
 6493                    LHS.MI->getOperand(2).getReg(), NegZ});
 
 6502          B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
 
 6503      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6504                   {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
 
 
 6515  assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
 
 6517  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6523  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6525  unsigned PreferredFusedOpcode =
 
 6526      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6537      Register NegZ = 
B.buildFNeg(DstTy, RHSReg).getReg(0);
 
 6538      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6550      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 
 6563  assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
 
 6565  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6571  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6573  unsigned PreferredFusedOpcode =
 
 6574      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6586      Register NegZ = 
B.buildFNeg(DstTy, RHSReg).getReg(0);
 
 6587      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6588                   {FpExtX, FpExtY, NegZ});
 
 6600      Register NegY = 
B.buildFNeg(DstTy, FpExtY).getReg(0);
 
 6603      B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
 
 6604                   {NegY, FpExtZ, LHSReg});
 
 
 6615  assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
 
 6617  bool AllowFusionGlobally, HasFMAD, 
Aggressive;
 
 6621  const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
 
 6622  LLT DstTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6626  unsigned PreferredFusedOpcode =
 
 6627      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
 
 6631    Register FpExtX = 
B.buildFPExt(DstTy, 
X).getReg(0);
 
 6632    Register FpExtY = 
B.buildFPExt(DstTy, 
Y).getReg(0);
 
 6633    B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
 
 6644      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstTy,
 
 6647      Register FMAReg = 
MRI.createGenericVirtualRegister(DstTy);
 
 6650      B.buildFNeg(
MI.getOperand(0).getReg(), FMAReg);
 
 6660      TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstTy,
 
 
 6673                                            unsigned &IdxToPropagate)
 const {
 
 6675  switch (
MI.getOpcode()) {
 
 6678  case TargetOpcode::G_FMINNUM:
 
 6679  case TargetOpcode::G_FMAXNUM:
 
 6680    PropagateNaN = 
false;
 
 6682  case TargetOpcode::G_FMINIMUM:
 
 6683  case TargetOpcode::G_FMAXIMUM:
 
 6684    PropagateNaN = 
true;
 
 6688  auto MatchNaN = [&](
unsigned Idx) {
 
 6689    Register MaybeNaNReg = 
MI.getOperand(Idx).getReg();
 
 6693    IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
 
 6697  return MatchNaN(1) || MatchNaN(2);
 
 
 6705  assert(
MI.getOpcode() == TargetOpcode::G_FDIV);
 
 6715  if (N0CFP && (N0CFP->isExactlyValue(1.0) || N0CFP->isExactlyValue(-1.0)))
 
 6728  for (
auto &U : 
MRI.use_nodbg_instructions(
Y)) {
 
 6729    if (&U == &
MI || U.getParent() != 
MI.getParent())
 
 6731    if (U.getOpcode() == TargetOpcode::G_FDIV &&
 
 6732        U.getOperand(2).getReg() == 
Y && U.getOperand(1).getReg() != 
Y) {
 
 6745  return MatchInfo.
size() >= MinUses;
 
 
 6753  LLT Ty = 
MRI.getType(MatchInfo[0]->getOperand(0).
getReg());
 
 6754  auto Div = 
Builder.buildFDiv(Ty, 
Builder.buildFConstant(Ty, 1.0),
 
 6755                               MatchInfo[0]->getOperand(2).getReg(),
 
 6756                               MatchInfo[0]->getFlags());
 
 6761    Builder.buildFMul(
MI->getOperand(0).getReg(), 
MI->getOperand(1).getReg(),
 
 6762                      Div->getOperand(0).getReg(), 
MI->getFlags());
 
 6763    MI->eraseFromParent();
 
 
 6768  assert(
MI.getOpcode() == TargetOpcode::G_ADD && 
"Expected a G_ADD");
 
 6778           Reg == MaybeSameReg;
 
 6780  return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
 
 
 6801  LLT DstVecTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 6810    return MRI.getType(MatchInfo) == DstVecTy;
 
 6813  std::optional<ValueAndVReg> ShiftAmount;
 
 6822      return MRI.getType(MatchInfo) == DstVecTy;
 
 
 6837  return MRI.getType(MatchInfo) == 
MRI.getType(
MI.getOperand(0).getReg());
 
 
 6844  std::optional<ValueAndVReg> ShiftAmt;
 
 6850  LLT MatchTy = 
MRI.getType(MatchInfo);
 
 6851  return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() &&
 
 6852         MatchTy == 
MRI.getType(
MI.getOperand(0).getReg());
 
 
 6855unsigned CombinerHelper::getFPMinMaxOpcForSelect(
 
 6857    SelectPatternNaNBehaviour VsNaNRetVal)
 const {
 
 6858  assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE &&
 
 6859         "Expected a NaN behaviour?");
 
 6869    if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
 
 6870      return TargetOpcode::G_FMAXNUM;
 
 6871    if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
 
 6872      return TargetOpcode::G_FMAXIMUM;
 
 6873    if (
isLegal({TargetOpcode::G_FMAXNUM, {DstTy}}))
 
 6874      return TargetOpcode::G_FMAXNUM;
 
 6875    if (
isLegal({TargetOpcode::G_FMAXIMUM, {DstTy}}))
 
 6876      return TargetOpcode::G_FMAXIMUM;
 
 6882    if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
 
 6883      return TargetOpcode::G_FMINNUM;
 
 6884    if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
 
 6885      return TargetOpcode::G_FMINIMUM;
 
 6886    if (
isLegal({TargetOpcode::G_FMINNUM, {DstTy}}))
 
 6887      return TargetOpcode::G_FMINNUM;
 
 6888    if (!
isLegal({TargetOpcode::G_FMINIMUM, {DstTy}}))
 
 6890    return TargetOpcode::G_FMINIMUM;
 
 6894CombinerHelper::SelectPatternNaNBehaviour
 
 6896                                        bool IsOrderedComparison)
 const {
 
 6900  if (!LHSSafe && !RHSSafe)
 
 6901    return SelectPatternNaNBehaviour::NOT_APPLICABLE;
 
 6902  if (LHSSafe && RHSSafe)
 
 6903    return SelectPatternNaNBehaviour::RETURNS_ANY;
 
 6906  if (IsOrderedComparison)
 
 6907    return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN
 
 6908                   : SelectPatternNaNBehaviour::RETURNS_OTHER;
 
 6911  return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER
 
 6912                 : SelectPatternNaNBehaviour::RETURNS_NAN;
 
 6921  LLT DstTy = 
MRI.getType(Dst);
 
 6934  SelectPatternNaNBehaviour ResWithKnownNaNInfo =
 
 6936  if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE)
 
 6938  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
 
 6941    if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN)
 
 6942      ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER;
 
 6943    else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER)
 
 6944      ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN;
 
 6946  if (TrueVal != CmpLHS || FalseVal != CmpRHS)
 
 6949  unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, ResWithKnownNaNInfo);
 
 6954  if (
Opc != TargetOpcode::G_FMAXIMUM && 
Opc != TargetOpcode::G_FMINIMUM) {
 
 6959    if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) {
 
 6961      if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero())
 
 6965  MatchInfo = [=](MachineIRBuilder &
B) {
 
 6966    B.buildInstr(
Opc, {Dst}, {CmpLHS, CmpRHS});
 
 6974  assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
 
 6981  Register TrueVal = 
MI.getOperand(2).getReg();
 
 6982  Register FalseVal = 
MI.getOperand(3).getReg();
 
 6983  return matchFPSelectToMinMax(Dst, 
Cond, TrueVal, FalseVal, MatchInfo);
 
 
 6988  assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
 
 7001  if (MatchedSub && 
X != OpLHS)
 
 7009    Y = 
X == OpLHS ? OpRHS : 
X == OpRHS ? OpLHS : 
Register();
 
 7012    auto Zero = 
B.buildConstant(
MRI.getType(
Y), 0);
 
 7013    B.buildICmp(Pred, Dst, 
Y, Zero);
 
 
 7020static std::optional<unsigned>
 
 7022                   std::optional<int64_t> &Result) {
 
 7023  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR ||
 
 7024          Opcode == TargetOpcode::G_ASHR) &&
 
 7025         "Expect G_SHL, G_LSHR or G_ASHR.");
 
 7026  auto SignificantBits = 0;
 
 7028  case TargetOpcode::G_SHL:
 
 7032  case TargetOpcode::G_LSHR:
 
 7036  case TargetOpcode::G_ASHR:
 
 7045      Result = std::nullopt;
 
 
 7056  Register ShiftVal = 
MI.getOperand(1).getReg();
 
 7057  Register ShiftReg = 
MI.getOperand(2).getReg();
 
 7058  LLT ResTy = 
MRI.getType(
MI.getOperand(0).getReg());
 
 7059  auto IsShiftTooBig = [&](
const Constant *
C) {
 
 7064      MatchInfo = std::nullopt;
 
 7068                                                MI.getOpcode(), MatchInfo);
 
 7069    return OptMaxUsefulShift && CI->uge(*OptMaxUsefulShift);
 
 
 7075  unsigned LHSOpndIdx = 1;
 
 7076  unsigned RHSOpndIdx = 2;
 
 7077  switch (
MI.getOpcode()) {
 
 7078  case TargetOpcode::G_UADDO:
 
 7079  case TargetOpcode::G_SADDO:
 
 7080  case TargetOpcode::G_UMULO:
 
 7081  case TargetOpcode::G_SMULO:
 
 7088  Register LHS = 
MI.getOperand(LHSOpndIdx).getReg();
 
 7089  Register RHS = 
MI.getOperand(RHSOpndIdx).getReg();
 
 7094    if (
MRI.getVRegDef(LHS)->getOpcode() !=
 
 7095        TargetOpcode::G_CONSTANT_FOLD_BARRIER)
 
 7099  return MRI.getVRegDef(RHS)->getOpcode() !=
 
 7100             TargetOpcode::G_CONSTANT_FOLD_BARRIER &&
 
 
 7107  std::optional<FPValueAndVReg> ValAndVReg;
 
 
 7115  unsigned LHSOpndIdx = 1;
 
 7116  unsigned RHSOpndIdx = 2;
 
 7117  switch (
MI.getOpcode()) {
 
 7118  case TargetOpcode::G_UADDO:
 
 7119  case TargetOpcode::G_SADDO:
 
 7120  case TargetOpcode::G_UMULO:
 
 7121  case TargetOpcode::G_SMULO:
 
 7128  Register LHSReg = 
MI.getOperand(LHSOpndIdx).getReg();
 
 7129  Register RHSReg = 
MI.getOperand(RHSOpndIdx).getReg();
 
 7130  MI.getOperand(LHSOpndIdx).setReg(RHSReg);
 
 7131  MI.getOperand(RHSOpndIdx).setReg(LHSReg);
 
 
 7135bool CombinerHelper::isOneOrOneSplat(
Register Src, 
bool AllowUndefs)
 const {
 
 7136  LLT SrcTy = 
MRI.getType(Src);
 
 7137  if (SrcTy.isFixedVector())
 
 7138    return isConstantSplatVector(Src, 1, AllowUndefs);
 
 7139  if (SrcTy.isScalar()) {
 
 7143    return IConstant && IConstant->Value == 1;
 
 7148bool CombinerHelper::isZeroOrZeroSplat(
Register Src, 
bool AllowUndefs)
 const {
 
 7149  LLT SrcTy = 
MRI.getType(Src);
 
 7151    return isConstantSplatVector(Src, 0, AllowUndefs);
 
 7156    return IConstant && IConstant->Value == 0;
 
 7163bool CombinerHelper::isConstantSplatVector(
Register Src, int64_t SplatValue,
 
 7164                                           bool AllowUndefs)
 const {
 
 7170  for (
unsigned I = 0; 
I < NumSources; ++
I) {
 
 7171    GImplicitDef *ImplicitDef =
 
 7173    if (ImplicitDef && AllowUndefs)
 
 7175    if (ImplicitDef && !AllowUndefs)
 
 7177    std::optional<ValueAndVReg> IConstant =
 
 7179    if (IConstant && IConstant->Value == SplatValue)
 
 7189CombinerHelper::getConstantOrConstantSplatVector(
Register Src)
 const {
 
 7192    return IConstant->Value;
 
 7196    return std::nullopt;
 
 7199  std::optional<APInt> 
Value = std::nullopt;
 
 7200  for (
unsigned I = 0; 
I < NumSources; ++
I) {
 
 7201    std::optional<ValueAndVReg> IConstant =
 
 7204      return std::nullopt;
 
 7206      Value = IConstant->Value;
 
 7207    else if (*
Value != IConstant->Value)
 
 7208      return std::nullopt;
 
 7214bool CombinerHelper::isConstantOrConstantVectorI(
Register Src)
 const {
 
 7224  for (
unsigned I = 0; 
I < NumSources; ++
I) {
 
 7225    std::optional<ValueAndVReg> IConstant =
 
 7234bool CombinerHelper::tryFoldSelectOfConstants(
GSelect *
Select,
 
 7241  LLT CondTy = 
MRI.getType(
Select->getCondReg());
 
 7242  LLT TrueTy = 
MRI.getType(
Select->getTrueReg());
 
 7252  std::optional<ValueAndVReg> TrueOpt =
 
 7254  std::optional<ValueAndVReg> FalseOpt =
 
 7257  if (!TrueOpt || !FalseOpt)
 
 7260  APInt TrueValue = TrueOpt->Value;
 
 7261  APInt FalseValue = FalseOpt->Value;
 
 7265    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7266      B.setInstrAndDebugLoc(*
Select);
 
 7267      B.buildZExtOrTrunc(Dest, 
Cond);
 
 7274    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7275      B.setInstrAndDebugLoc(*
Select);
 
 7276      B.buildSExtOrTrunc(Dest, 
Cond);
 
 7283    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7284      B.setInstrAndDebugLoc(*
Select);
 
 7285      Register Inner = 
MRI.createGenericVirtualRegister(CondTy);
 
 7286      B.buildNot(Inner, 
Cond);
 
 7287      B.buildZExtOrTrunc(Dest, Inner);
 
 7294    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7295      B.setInstrAndDebugLoc(*
Select);
 
 7296      Register Inner = 
MRI.createGenericVirtualRegister(CondTy);
 
 7297      B.buildNot(Inner, 
Cond);
 
 7298      B.buildSExtOrTrunc(Dest, Inner);
 
 7304  if (TrueValue - 1 == FalseValue) {
 
 7305    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7306      B.setInstrAndDebugLoc(*
Select);
 
 7307      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7308      B.buildZExtOrTrunc(Inner, 
Cond);
 
 7309      B.buildAdd(Dest, Inner, False);
 
 7315  if (TrueValue + 1 == FalseValue) {
 
 7316    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7317      B.setInstrAndDebugLoc(*
Select);
 
 7318      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7319      B.buildSExtOrTrunc(Inner, 
Cond);
 
 7320      B.buildAdd(Dest, Inner, False);
 
 7327    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7328      B.setInstrAndDebugLoc(*
Select);
 
 7329      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7330      B.buildZExtOrTrunc(Inner, 
Cond);
 
 7333      auto ShAmtC = 
B.buildConstant(ShiftTy, TrueValue.
exactLogBase2());
 
 7334      B.buildShl(Dest, Inner, ShAmtC, Flags);
 
 7341    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7342      B.setInstrAndDebugLoc(*
Select);
 
 7344      B.buildNot(Not, 
Cond);
 
 7345      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7346      B.buildZExtOrTrunc(Inner, Not);
 
 7349      auto ShAmtC = 
B.buildConstant(ShiftTy, FalseValue.
exactLogBase2());
 
 7350      B.buildShl(Dest, Inner, ShAmtC, Flags);
 
 7357    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7358      B.setInstrAndDebugLoc(*
Select);
 
 7359      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7360      B.buildSExtOrTrunc(Inner, 
Cond);
 
 7361      B.buildOr(Dest, Inner, False, Flags);
 
 7368    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7369      B.setInstrAndDebugLoc(*
Select);
 
 7371      B.buildNot(Not, 
Cond);
 
 7372      Register Inner = 
MRI.createGenericVirtualRegister(TrueTy);
 
 7373      B.buildSExtOrTrunc(Inner, Not);
 
 7374      B.buildOr(Dest, Inner, True, Flags);
 
 7383bool CombinerHelper::tryFoldBoolSelectToLogic(
GSelect *
Select,
 
 7390  LLT CondTy = 
MRI.getType(
Select->getCondReg());
 
 7391  LLT TrueTy = 
MRI.getType(
Select->getTrueReg());
 
 7400  if (CondTy != TrueTy)
 
 7405  if ((
Cond == True) || isOneOrOneSplat(True,  
true)) {
 
 7406    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7407      B.setInstrAndDebugLoc(*
Select);
 
 7409      B.buildZExtOrTrunc(Ext, 
Cond);
 
 7410      auto FreezeFalse = 
B.buildFreeze(TrueTy, False);
 
 7411      B.buildOr(DstReg, Ext, FreezeFalse, Flags);
 
 7418  if ((
Cond == False) || isZeroOrZeroSplat(False,  
true)) {
 
 7419    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7420      B.setInstrAndDebugLoc(*
Select);
 
 7422      B.buildZExtOrTrunc(Ext, 
Cond);
 
 7423      auto FreezeTrue = 
B.buildFreeze(TrueTy, True);
 
 7424      B.buildAnd(DstReg, Ext, FreezeTrue);
 
 7430  if (isOneOrOneSplat(False,  
true)) {
 
 7431    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7432      B.setInstrAndDebugLoc(*
Select);
 
 7434      Register Inner = 
MRI.createGenericVirtualRegister(CondTy);
 
 7435      B.buildNot(Inner, 
Cond);
 
 7438      B.buildZExtOrTrunc(Ext, Inner);
 
 7439      auto FreezeTrue = 
B.buildFreeze(TrueTy, True);
 
 7440      B.buildOr(DstReg, Ext, FreezeTrue, Flags);
 
 7446  if (isZeroOrZeroSplat(True,  
true)) {
 
 7447    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7448      B.setInstrAndDebugLoc(*
Select);
 
 7450      Register Inner = 
MRI.createGenericVirtualRegister(CondTy);
 
 7451      B.buildNot(Inner, 
Cond);
 
 7454      B.buildZExtOrTrunc(Ext, Inner);
 
 7455      auto FreezeFalse = 
B.buildFreeze(TrueTy, False);
 
 7456      B.buildAnd(DstReg, Ext, FreezeFalse);
 
 7472  LLT DstTy = 
MRI.getType(DstReg);
 
 7478  if (!
MRI.hasOneNonDBGUse(Cmp->getReg(0)))
 
 7487  Register CmpLHS = Cmp->getLHSReg();
 
 7488  Register CmpRHS = Cmp->getRHSReg();
 
 7491  if (True == CmpRHS && False == CmpLHS) {
 
 7499  if (True != CmpLHS || False != CmpRHS)
 
 
 7539  assert(
MI.getOpcode() == TargetOpcode::G_SUB);
 
 7540  Register DestReg = 
MI.getOperand(0).getReg();
 
 7541  LLT DestTy = 
MRI.getType(DestReg);
 
 7553    if (
isLegal({NewOpc, {DestTy}})) {
 
 7555        B.buildInstr(NewOpc, {DestReg}, {
X, Sub0});
 
 
 7567  if (tryFoldSelectOfConstants(
Select, MatchInfo))
 
 7570  if (tryFoldBoolSelectToLogic(
Select, MatchInfo))
 
 
 7580bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(
 
 7582  assert(Logic->
getOpcode() != TargetOpcode::G_XOR && 
"unexpected xor");
 
 7583  bool IsAnd = Logic->
getOpcode() == TargetOpcode::G_AND;
 
 7587  unsigned Flags = Logic->
getFlags();
 
 7600  if (!
MRI.hasOneNonDBGUse(Cmp1->
getReg(0)) ||
 
 7606  std::optional<ValueAndVReg> MaybeC1 =
 
 7610  C1 = MaybeC1->Value;
 
 7612  std::optional<ValueAndVReg> MaybeC2 =
 
 7616  C2 = MaybeC2->Value;
 
 7623  LLT CmpOperandTy = 
MRI.getType(R1);
 
 7637  std::optional<APInt> Offset1;
 
 7638  std::optional<APInt> Offset2;
 
 7641      std::optional<ValueAndVReg> MaybeOffset1 =
 
 7644        R1 = 
Add->getLHSReg();
 
 7645        Offset1 = MaybeOffset1->Value;
 
 7649      std::optional<ValueAndVReg> MaybeOffset2 =
 
 7652        R2 = 
Add->getLHSReg();
 
 7653        Offset2 = MaybeOffset2->Value;
 
 7672  bool CreateMask = 
false;
 
 7685    if (!LowerDiff.
isPowerOf2() || LowerDiff != UpperDiff ||
 
 7698  CR->getEquivalentICmp(NewPred, NewC, 
Offset);
 
 7707  MatchInfo = [=](MachineIRBuilder &
B) {
 
 7708    if (CreateMask && 
Offset != 0) {
 
 7709      auto TildeLowerDiff = 
B.buildConstant(CmpOperandTy, ~LowerDiff);
 
 7710      auto And = 
B.buildAnd(CmpOperandTy, R1, TildeLowerDiff); 
 
 7711      auto OffsetC = 
B.buildConstant(CmpOperandTy, 
Offset);
 
 7712      auto Add = 
B.buildAdd(CmpOperandTy, 
And, OffsetC, Flags);
 
 7713      auto NewCon = 
B.buildConstant(CmpOperandTy, NewC);
 
 7714      auto ICmp = 
B.buildICmp(NewPred, CmpTy, 
Add, NewCon);
 
 7715      B.buildZExtOrTrunc(DstReg, ICmp);
 
 7716    } 
else if (CreateMask && 
Offset == 0) {
 
 7717      auto TildeLowerDiff = 
B.buildConstant(CmpOperandTy, ~LowerDiff);
 
 7718      auto And = 
B.buildAnd(CmpOperandTy, R1, TildeLowerDiff); 
 
 7719      auto NewCon = 
B.buildConstant(CmpOperandTy, NewC);
 
 7720      auto ICmp = 
B.buildICmp(NewPred, CmpTy, 
And, NewCon);
 
 7721      B.buildZExtOrTrunc(DstReg, ICmp);
 
 7722    } 
else if (!CreateMask && 
Offset != 0) {
 
 7723      auto OffsetC = 
B.buildConstant(CmpOperandTy, 
Offset);
 
 7724      auto Add = 
B.buildAdd(CmpOperandTy, R1, OffsetC, Flags);
 
 7725      auto NewCon = 
B.buildConstant(CmpOperandTy, NewC);
 
 7726      auto ICmp = 
B.buildICmp(NewPred, CmpTy, 
Add, NewCon);
 
 7727      B.buildZExtOrTrunc(DstReg, ICmp);
 
 7728    } 
else if (!CreateMask && 
Offset == 0) {
 
 7729      auto NewCon = 
B.buildConstant(CmpOperandTy, NewC);
 
 7730      auto ICmp = 
B.buildICmp(NewPred, CmpTy, R1, NewCon);
 
 7731      B.buildZExtOrTrunc(DstReg, ICmp);
 
 7739bool CombinerHelper::tryFoldLogicOfFCmps(
GLogicalBinOp *Logic,
 
 7745  bool IsAnd = Logic->
getOpcode() == TargetOpcode::G_AND;
 
 7757  LLT CmpTy = 
MRI.getType(Cmp1->
getReg(0));
 
 7763          {TargetOpcode::G_FCMP, {CmpTy, CmpOperandTy}}) ||
 
 7764      !
MRI.hasOneNonDBGUse(Logic->
getReg(0)) ||
 
 7765      !
MRI.hasOneNonDBGUse(Cmp1->
getReg(0)) ||
 
 7766      !
MRI.hasOneNonDBGUse(Cmp2->
getReg(0)) ||
 
 7777  if (LHS0 == RHS1 && LHS1 == RHS0) {
 
 7783  if (LHS0 == RHS0 && LHS1 == RHS1) {
 
 7787    unsigned NewPred = IsAnd ? CmpCodeL & CmpCodeR : CmpCodeL | CmpCodeR;
 
 7789    MatchInfo = [=](MachineIRBuilder &
B) {
 
 7794        auto False = 
B.buildConstant(CmpTy, 0);
 
 7795        B.buildZExtOrTrunc(DestReg, False);
 
 7802        B.buildZExtOrTrunc(DestReg, True);
 
 7804        auto Cmp = 
B.buildFCmp(Pred, CmpTy, LHS0, LHS1, Flags);
 
 7805        B.buildZExtOrTrunc(DestReg, Cmp);
 
 7817  if (tryFoldAndOrOrICmpsUsingRanges(
And, MatchInfo))
 
 7820  if (tryFoldLogicOfFCmps(
And, MatchInfo))
 
 
 7829  if (tryFoldAndOrOrICmpsUsingRanges(
Or, MatchInfo))
 
 7832  if (tryFoldLogicOfFCmps(
Or, MatchInfo))
 
 
 7847  bool IsSigned = 
Add->isSigned();
 
 7848  LLT DstTy = 
MRI.getType(Dst);
 
 7849  LLT CarryTy = 
MRI.getType(Carry);
 
 7852  if (
MRI.use_nodbg_empty(Carry) &&
 
 7855      B.buildAdd(Dst, LHS, RHS);
 
 7856      B.buildUndef(Carry);
 
 7862  if (isConstantOrConstantVectorI(LHS) && !isConstantOrConstantVectorI(RHS)) {
 
 7865        B.buildSAddo(Dst, Carry, RHS, LHS);
 
 7871      B.buildUAddo(Dst, Carry, RHS, LHS);
 
 7876  std::optional<APInt> MaybeLHS = getConstantOrConstantSplatVector(LHS);
 
 7877  std::optional<APInt> MaybeRHS = getConstantOrConstantSplatVector(RHS);
 
 7883    APInt Result = IsSigned ? MaybeLHS->sadd_ov(*MaybeRHS, Overflow)
 
 7884                            : MaybeLHS->uadd_ov(*MaybeRHS, Overflow);
 
 7886      B.buildConstant(Dst, Result);
 
 7887      B.buildConstant(Carry, Overflow);
 
 7895      B.buildCopy(Dst, LHS);
 
 7896      B.buildConstant(Carry, 0);
 
 7905  if (MaybeRHS && AddLHS && 
MRI.hasOneNonDBGUse(
Add->getReg(0)) &&
 
 7908    std::optional<APInt> MaybeAddRHS =
 
 7909        getConstantOrConstantSplatVector(AddLHS->
getRHSReg());
 
 7912      APInt NewC = IsSigned ? MaybeAddRHS->sadd_ov(*MaybeRHS, Overflow)
 
 7913                            : MaybeAddRHS->uadd_ov(*MaybeRHS, Overflow);
 
 7917            auto ConstRHS = 
B.buildConstant(DstTy, NewC);
 
 7918            B.buildSAddo(Dst, Carry, AddLHS->
getLHSReg(), ConstRHS);
 
 7924          auto ConstRHS = 
B.buildConstant(DstTy, NewC);
 
 7925          B.buildUAddo(Dst, Carry, AddLHS->
getLHSReg(), ConstRHS);
 
 7950        B.buildConstant(Carry, 0);
 
 7957        B.buildAdd(Dst, LHS, RHS);
 
 7958        B.buildConstant(Carry, 1);
 
 7970  if (
VT->computeNumSignBits(RHS) > 1 && 
VT->computeNumSignBits(LHS) > 1) {
 
 7973      B.buildConstant(Carry, 0);
 
 7989      B.buildConstant(Carry, 0);
 
 7996      B.buildAdd(Dst, LHS, RHS);
 
 7997      B.buildConstant(Carry, 1);
 
 
 8015  bool OptForSize = 
MI.getMF()->getFunction().hasOptSize();
 
 
 8021  auto [Dst, 
Base] = 
MI.getFirst2Regs();
 
 8022  LLT Ty = 
MRI.getType(Dst);
 
 8026    Builder.buildFConstant(Dst, 1.0);
 
 8027    MI.removeFromParent();
 
 8039  std::optional<SrcOp> Res;
 
 8041  while (ExpVal > 0) {
 
 8046        Res = 
Builder.buildFMul(Ty, *Res, CurSquare);
 
 8049    CurSquare = 
Builder.buildFMul(Ty, CurSquare, CurSquare);
 
 8056    Res = 
Builder.buildFDiv(Ty, 
Builder.buildFConstant(Ty, 1.0), *Res,
 
 8060  MI.eraseFromParent();
 
 
 8069  if (!
MRI.hasOneNonDBGUse(
Add->getReg(0)))
 
 8076  LLT DstTy = 
MRI.getType(Dst);
 
 8079    auto Const = 
B.buildConstant(DstTy, C1 - C2);
 
 8080    B.buildAdd(Dst, 
Add->getLHSReg(), Const);
 
 
 8092  if (!
MRI.hasOneNonDBGUse(
Add->getReg(0)))
 
 8099  LLT DstTy = 
MRI.getType(Dst);
 
 8102    auto Const = 
B.buildConstant(DstTy, C2 - C1);
 
 8103    B.buildSub(Dst, Const, 
Add->getLHSReg());
 
 
 8115  if (!
MRI.hasOneNonDBGUse(Sub2->
getReg(0)))
 
 8122  LLT DstTy = 
MRI.getType(Dst);
 
 8125    auto Const = 
B.buildConstant(DstTy, C1 + C2);
 
 
 8138  if (!
MRI.hasOneNonDBGUse(Sub2->
getReg(0)))
 
 8145  LLT DstTy = 
MRI.getType(Dst);
 
 8148    auto Const = 
B.buildConstant(DstTy, C1 - C2);
 
 
 8161  if (!
MRI.hasOneNonDBGUse(
Sub->getReg(0)))
 
 8168  LLT DstTy = 
MRI.getType(Dst);
 
 8171    auto Const = 
B.buildConstant(DstTy, C2 - C1);
 
 8172    B.buildAdd(Dst, 
Sub->getLHSReg(), Const);
 
 
 8219    if (!
MRI.hasOneNonDBGUse(BV->getReg(0)))
 
 8223    if (BV->getNumSources() % Unmerge->
getNumDefs() != 0)
 
 8226    LLT BigBvTy = 
MRI.getType(BV->getReg(0));
 
 8227    LLT SmallBvTy = DstTy;
 
 8231            {TargetOpcode::G_BUILD_VECTOR, {SmallBvTy, SmallBvElemenTy}}))
 
 8236            {TargetOpcode::G_ANYEXT,
 
 8248          auto AnyExt = 
B.buildAnyExt(SmallBvElemenTy, SourceArray);
 
 8249          Ops.push_back(AnyExt.getReg(0));
 
 
 8267  const LLT SrcTy = 
MRI.getType(Shuffle.getSrc1Reg());
 
 8268  const unsigned NumSrcElems = SrcTy.isVector() ? SrcTy.getNumElements() : 1;
 
 8269  const unsigned NumDstElts = OrigMask.
size();
 
 8270  for (
unsigned i = 0; i != NumDstElts; ++i) {
 
 8271    int Idx = OrigMask[i];
 
 8272    if (Idx >= (
int)NumSrcElems) {
 
 8283    B.buildShuffleVector(
MI.getOperand(0), 
MI.getOperand(1), 
MI.getOperand(2),
 
 8284                         std::move(NewMask));
 
 
 8291  const unsigned MaskSize = Mask.size();
 
 8292  for (
unsigned I = 0; 
I < MaskSize; ++
I) {
 
 8297    if (Idx < (
int)NumElems)
 
 8298      Mask[
I] = Idx + NumElems;
 
 8300      Mask[
I] = Idx - NumElems;
 
 
 8310  if (
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Shuffle.getSrc1Reg(), 
MRI))
 
 8313  if (
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Shuffle.getSrc2Reg(), 
MRI))
 
 8316  const LLT DstTy = 
MRI.getType(Shuffle.getReg(0));
 
 8317  const LLT Src1Ty = 
MRI.getType(Shuffle.getSrc1Reg());
 
 8319          {TargetOpcode::G_SHUFFLE_VECTOR, {DstTy, Src1Ty}}))
 
 8323  const unsigned NumSrcElems = Src1Ty.getNumElements();
 
 8325  bool TouchesSrc1 = 
false;
 
 8326  bool TouchesSrc2 = 
false;
 
 8327  const unsigned NumElems = Mask.size();
 
 8328  for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
 
 8332    if (Mask[Idx] < (
int)NumSrcElems)
 
 8338  if (TouchesSrc1 == TouchesSrc2)
 
 8341  Register NewSrc1 = Shuffle.getSrc1Reg();
 
 8344    NewSrc1 = Shuffle.getSrc2Reg();
 
 8349    auto Undef = 
B.buildUndef(Src1Ty);
 
 8350    B.buildShuffleVector(Shuffle.getReg(0), NewSrc1, Undef, NewMask);
 
 
 8364  LLT DstTy = 
MRI.getType(Dst);
 
 8365  LLT CarryTy = 
MRI.getType(Carry);
 
 8387        B.buildConstant(Carry, 0);
 
 8394        B.buildSub(Dst, LHS, RHS);
 
 8412      B.buildConstant(Carry, 0);
 
 8419      B.buildSub(Dst, LHS, RHS);
 
 
 
unsigned const MachineRegisterInfo * MRI
 
MachineInstrBuilder & UseMI
 
MachineInstrBuilder MachineInstrBuilder & DefMI
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
 
AMDGPU Register Bank Select
 
This file declares a class to represent arbitrary precision floating point values and provide a varie...
 
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
 
static const Function * getParent(const Value *V)
 
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
 
static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo &MRI)
 
static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally)
Checks if MI is TargetOpcode::G_FMUL and contractable either due to global flags or MachineInstr flag...
 
static unsigned getIndexedOpc(unsigned LdStOpc)
 
static APFloat constantFoldFpUnary(const MachineInstr &MI, const MachineRegisterInfo &MRI, const APFloat &Val)
 
static std::optional< std::pair< GZExtLoad *, int64_t > > matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, const MachineRegisterInfo &MRI)
Helper function for findLoadOffsetsForLoadOrCombine.
 
static std::optional< unsigned > getMinUselessShift(KnownBits ValueKB, unsigned Opcode, std::optional< int64_t > &Result)
Return the minimum useless shift amount that results in complete loss of the source value.
 
static Register peekThroughBitcast(Register Reg, const MachineRegisterInfo &MRI)
 
static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I)
 
static cl::opt< bool > ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), cl::desc("Force all indexed operations to be " "legal for the GlobalISel combiner"))
 
static void commuteMask(MutableArrayRef< int > Mask, const unsigned NumElems)
 
static cl::opt< unsigned > PostIndexUseThreshold("post-index-use-threshold", cl::Hidden, cl::init(32), cl::desc("Number of uses of a base pointer to check before it is no longer " "considered for post-indexing."))
 
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
 
static unsigned getExtLoadOpcForExtend(unsigned ExtOpc)
 
static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, int64_t Cst, bool IsVector, bool IsFP)
 
static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy)
 
static bool canFoldInAddressingMode(GLoadStore *MI, const TargetLowering &TLI, MachineRegisterInfo &MRI)
Return true if 'MI' is a load or a store that may be fold it's address operand into the load / store ...
 
static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I)
 
static Register buildLogBase2(Register V, MachineIRBuilder &MIB)
Determines the LogBase2 value for a non-null input value using the transform: LogBase2(V) = (EltBits ...
 
This contains common combine transformations that may be used in a combine pass,or by the target else...
 
This contains common code to allow clients to notify changes to machine instr.
 
Provides analysis for querying information about KnownBits during GISel passes.
 
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
 
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
 
Interface for Targets to specify which operations they can successfully select and how the others sho...
 
Implement a low-level type suitable for MachineInstr level instruction selection.
 
Contains matchers for matching SSA Machine Instructions.
 
This file declares the MachineIRBuilder class.
 
Promote Memory to Register
 
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
 
MachineInstr unsigned OpIdx
 
uint64_t IntrinsicInst * II
 
const SmallVectorImpl< MachineOperand > & Cond
 
Remove Loads Into Fake Uses
 
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
 
This file implements a set that has insertion order iteration characteristics.
 
This file implements the SmallBitVector class.
 
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
 
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
 
This file describes how to lower LLVM code to machine code.
 
static const fltSemantics & IEEEdouble()
 
static constexpr roundingMode rmNearestTiesToEven
 
const fltSemantics & getSemantics() const
 
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
 
APInt bitcastToAPInt() const
 
Class for arbitrary precision integers.
 
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
 
uint64_t getZExtValue() const
Get zero extended value.
 
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
 
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
 
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
 
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
 
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
 
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
 
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
 
unsigned getBitWidth() const
Return the number of bits in the APInt.
 
bool ult(const APInt &RHS) const
Unsigned less than comparison.
 
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
 
bool isNegative() const
Determine sign of this APInt.
 
int32_t exactLogBase2() const
 
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
 
unsigned countr_zero() const
Count the number of trailing zero bits.
 
unsigned countl_zero() const
The APInt version of std::countl_zero.
 
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
 
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
 
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
 
LLVM_ABI APInt multiplicativeInverse() const
 
bool isMask(unsigned numBits) const
 
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
 
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
 
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
 
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
 
bool isOne() const
Determine if this is a value of 1.
 
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
 
int64_t getSExtValue() const
Get sign extended value.
 
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
 
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
 
unsigned countr_one() const
Count the number of trailing one bits.
 
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
 
size_t size() const
size - Get the array size.
 
bool isEquality() const
Determine if this is an equals/not equals predicate.
 
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
 
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
 
@ ICMP_SLT
signed less than
 
@ ICMP_SLE
signed less or equal
 
@ FCMP_OLT
0 1 0 0 True if ordered and less than
 
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
 
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
 
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
 
@ ICMP_UGE
unsigned greater or equal
 
@ ICMP_UGT
unsigned greater than
 
@ ICMP_SGT
signed greater than
 
@ FCMP_ULT
1 1 0 0 True if unordered or less than
 
@ ICMP_ULT
unsigned less than
 
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
 
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
 
@ ICMP_SGE
signed greater or equal
 
@ ICMP_ULE
unsigned less or equal
 
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
 
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
 
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
 
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
 
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
 
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
 
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
 
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchRepeatedFPDivisor(MachineInstr &MI, SmallVector< MachineInstr * > &MatchInfo) const
 
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match expression trees of the form.
 
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
 
void applyPtrAddZero(MachineInstr &MI) const
 
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2) const
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
 
void applyUDivOrURemByConst(MachineInstr &MI) const
 
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
 
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
 
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchSelectSameVal(MachineInstr &MI) const
Optimize (cond ? x : x) -> x.
 
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
 
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo) const
 
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
 
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
 
void applySimplifyURemByPow2(MachineInstr &MI) const
Combine G_UREM x, (known power of 2) to an add and bitmasking.
 
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
 
bool matchPtrAddZero(MachineInstr &MI) const
}
 
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
 
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
 
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false) const
 
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
 
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
 
bool matchShiftsTooBig(MachineInstr &MI, std::optional< int64_t > &MatchInfo) const
Match shifts greater or equal to the range (the bitwidth of the result datatype, or the effective bit...
 
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
 
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
 
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
 
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) const
Delete MI and replace all of its uses with Replacement.
 
void applyCombineShuffleToBuildVector(MachineInstr &MI) const
Replace MI with a build_vector.
 
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
 
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
 
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
 
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate commutative binary operations like G_ADD.
 
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
 
bool matchCommuteConstantToRHS(MachineInstr &MI) const
Match constant LHS ops that should be commuted.
 
const DataLayout & getDataLayout() const
 
bool matchBinOpSameVal(MachineInstr &MI) const
Optimize (x op x) -> x.
 
bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
 
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
 
void applyUMulHToLShr(MachineInstr &MI) const
 
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
 
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
Fold (shift (shift base, x), y) -> (shift base (x+y))
 
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
 
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
 
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
 
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI precedes UseMI or they are the same instruction.
 
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
 
bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
 
const TargetLowering & getTargetLowering() const
 
bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const
Remove references to rhs if it is undef.
 
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Replace MI with a series of instructions described in MatchInfo.
 
void applySDivByPow2(MachineInstr &MI) const
 
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
 
void applyUDivByPow2(MachineInstr &MI) const
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
 
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ors.
 
bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo, MachineInstr &ShiftMI) const
Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
 
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
Return true if MI is a G_ADD which can be simplified to a G_SUB.
 
void replaceInstWithConstant(MachineInstr &MI, int64_t C) const
Replace an instruction with a G_CONSTANT with value C.
 
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
 
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
 
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
 
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const
Checks if constant at ConstIdx is larger than MI 's bitwidth.
 
void applyCombineCopy(MachineInstr &MI) const
 
bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const
Transform G_ADD(x, G_SUB(y, x)) to y.
 
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) const
 
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
 
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
 
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
 
bool matchSextTruncSextLoad(MachineInstr &MI) const
 
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const
Fold away a merge of an unmerge of the corresponding values.
 
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
 
bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
 
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
 
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match (and (load x), mask) -> zextload x.
 
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
 
bool matchCombineCopy(MachineInstr &MI) const
 
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
 
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
 
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
Fold (xor (and x, y), y) -> (and (not x), y) {.
 
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops) const
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
 
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
 
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
 
void replaceInstWithFConstant(MachineInstr &MI, double C) const
Replace an instruction with a G_FCONSTANT with value C.
 
bool matchFunnelShiftToRotate(MachineInstr &MI) const
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
 
bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants, BuildFnTy &MatchInfo) const
 
bool matchRedundantSExtInReg(MachineInstr &MI) const
 
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
 
void applyFunnelShiftConstantModulo(MachineInstr &MI) const
Replaces the shift amount in MI with ShiftAmt % BW.
 
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is zero.
 
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData) const
 
void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
 
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelValueTracking *VT=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
 
bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not reference a.
 
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
Transform a multiply by a power-of-2 value to a left shift.
 
void applyCombineShuffleVector(MachineInstr &MI, ArrayRef< Register > Ops) const
Replace MI with a concat_vectors with Ops.
 
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
 
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo) const
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
 
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo) const
SelectOperand is the operand in binary operator MI that is the select to fold.
 
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
 
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
 
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
 
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
 
bool tryCombineCopy(MachineInstr &MI) const
If MI is COPY, try to combine it.
 
bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const
 
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
 
bool isPreLegalize() const
 
bool matchUndefShuffleVectorMask(MachineInstr &MI) const
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
 
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
 
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
 
bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
 
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is known to be a power of 2.
 
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
If MI is G_CONCAT_VECTORS, try to combine it.
 
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
 
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
 
LLVMContext & getContext() const
 
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
 
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
 
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
Combine inverting a result of a compare into the opposite cond code.
 
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
Match sext_inreg(load p), imm -> sextload p.
 
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine select to integer min/max.
 
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst) const
Transform fp_instr(cst) to constant result of the fp operation.
 
bool isLegal(const LegalityQuery &Query) const
 
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo) const
 
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo) const
Try to reassociate to reassociate operands of a commutative binop.
 
void eraseInst(MachineInstr &MI) const
Erase MI.
 
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const
Do constant FP folding when opportunities are exposed after MIR building.
 
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
 
bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
 
bool matchUndefStore(MachineInstr &MI) const
Return true if a G_STORE instruction MI is storing an undef value.
 
MachineRegisterInfo & MRI
 
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const
Transform PtrToInt(IntToPtr(x)) to x.
 
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
 
bool matchConstantFPOp(const MachineOperand &MOP, double C) const
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
 
MachineInstr * buildUDivOrURemUsingMul(MachineInstr &MI) const
Given an G_UDIV MI or G_UREM MI expressing a divide by constant, return an expression that implements...
 
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
 
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const
Push a binary operator through a select on constants.
 
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount) const
 
bool tryCombineExtendingLoads(MachineInstr &MI) const
If MI is extend that consumes the result of a load, try to combine it.
 
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
 
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo) const
 
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (and x, n), k -> ubfx x, pos, width.
 
void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
 
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
 
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
 
void applyRotateOutOfRange(MachineInstr &MI) const
 
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
 
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
 
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
 
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
 
bool matchUndefSelectCmp(MachineInstr &MI) const
Return true if a G_SELECT instruction MI has an undef comparison.
 
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
void replaceInstWithUndef(MachineInstr &MI) const
Replace an instruction with a G_IMPLICIT_DEF.
 
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
 
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
 
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine addos.
 
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
 
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine selects.
 
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
 
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
 
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
 
bool matchRotateOutOfRange(MachineInstr &MI) const
 
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
 
void setRegBank(Register Reg, const RegisterBank *RegBank) const
Set the register bank of Reg.
 
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const
Return true if a G_SELECT instruction MI has a constant comparison.
 
bool matchCommuteFPConstantToRHS(MachineInstr &MI) const
Match constant LHS FP ops that should be commuted.
 
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
 
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const
 
bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const
 
void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
 
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
 
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
 
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
 
bool matchConstantOp(const MachineOperand &MOP, int64_t C) const
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
 
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
 
bool matchUMulHToLShr(MachineInstr &MI) const
 
MachineDominatorTree * MDT
 
void applyFunnelShiftToRotate(MachineInstr &MI) const
 
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
void applyRepeatedFPDivisor(SmallVector< MachineInstr * > &MatchInfo) const
 
bool matchTruncUSatUToFPTOUISat(MachineInstr &MI, MachineInstr &SrcMI) const
 
const RegisterBankInfo * RBI
 
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*MULO x, 0) -> 0 + no carry out.
 
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
 
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
 
const TargetRegisterInfo * TRI
 
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const
 
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI dominates UseMI.
 
GISelChangeObserver & Observer
 
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
 
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
 
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal) const
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
 
bool matchUDivOrURemByConst(MachineInstr &MI) const
Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
 
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ands.
 
bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const
Constant fold G_FMA/G_FMAD.
 
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
 
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const
Transform zext(trunc(x)) to x.
 
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is undef.
 
void applyLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const
 
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
 
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
void applySDivOrSRemByConst(MachineInstr &MI) const
 
MachineInstr * buildSDivOrSRemUsingMul(MachineInstr &MI) const
Given an G_SDIV MI or G_SREM MI expressing a signed divide by constant, return an expression that imp...
 
bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const
 
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
 
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
 
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
 
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const
Transform anyext(trunc(x)) to x.
 
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
 
MachineIRBuilder & Builder
 
void applyCommuteBinOpOperands(MachineInstr &MI) const
 
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const
Delete MI and replace all of its uses with its OpIdx-th operand.
 
void applySextTruncSextLoad(MachineInstr &MI) const
 
const MachineFunction & getMachineFunction() const
 
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo) const
 
bool matchSDivOrSRemByConst(MachineInstr &MI) const
Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
 
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
 
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal) const
 
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const
Match FPOWI if it's safe to extend it into a series of multiplications.
 
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
 
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
 
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
 
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
Match ashr (shl x, C), C -> sext_inreg (C)
 
void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const
 
ConstantFP - Floating Point Values [float, double].
 
const APFloat & getValue() const
 
const APFloat & getValueAPF() const
 
const APInt & getValue() const
Return the constant as an APInt value reference.
 
This class represents a range of values.
 
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
 
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
 
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
 
const APInt & getLower() const
Return the lower value for this range.
 
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
 
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
 
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
 
const APInt & getUpper() const
Return the upper value for this range.
 
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
 
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
 
@ NeverOverflows
Never overflows.
 
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
 
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
 
@ MayOverflow
May or may not overflow.
 
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
 
This is an important base class in LLVM.
 
A parsed version of the target data layout string in and methods for querying it.
 
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
 
iterator find(const_arg_type_t< KeyT > Val)
 
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
 
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
 
Represents overflowing add operations.
 
Represents an integer addition.
 
Represents a logical and.
 
CmpInst::Predicate getCond() const
 
Register getLHSReg() const
 
Register getRHSReg() const
 
Represents any generic load, including sign/zero extending variants.
 
Register getDstReg() const
Get the definition register of the loaded value.
 
Register getCarryOutReg() const
 
Register getRHSReg() const
 
Register getLHSReg() const
 
Register getLHSReg() const
 
Register getRHSReg() const
 
Represents a G_BUILD_VECTOR.
 
Abstract class that contains various methods for clients to notify about changes.
 
Simple wrapper observer that takes several observers, and calls each one for each event.
 
Represents any type of generic load or store.
 
Register getPointerReg() const
Get the source register of the pointer value.
 
Represents a logical binary operation.
 
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
 
bool isAtomic() const
Returns true if the attached MachineMemOperand has the atomic flag set.
 
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
 
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
 
Register getSourceReg(unsigned I) const
Returns the I'th source register.
 
unsigned getNumSources() const
Returns the number of source registers.
 
Represents a G_MERGE_VALUES.
 
Register getCondReg() const
 
Represents overflowing sub operations.
 
Represents an integer subtraction.
 
Represents a G_UNMERGE_VALUES.
 
unsigned getNumDefs() const
Returns the number of def registers.
 
Register getSourceReg() const
Get the unmerge source register.
 
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
 
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
 
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
 
constexpr unsigned getScalarSizeInBits() const
 
constexpr bool isScalar() const
 
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
 
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
 
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
 
constexpr bool isValid() const
 
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
 
constexpr bool isVector() const
 
constexpr bool isByteSized() const
 
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
 
constexpr bool isPointer() const
 
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
 
constexpr ElementCount getElementCount() const
 
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
 
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
 
constexpr LLT getScalarType() const
 
This is an important class for using LLVM in a threaded context.
 
@ Legalized
Instruction has been legalized and the MachineFunction changed.
 
LLVM_ABI LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
 
LLVM_ABI Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index)
Get a pointer to vector element Index located in memory for a vector of type VecTy starting at a base...
 
TypeSize getValue() const
 
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
 
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
 
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
 
MachineInstrBundleIterator< MachineInstr > iterator
 
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
 
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
 
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
 
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
 
Function & getFunction()
Return the LLVM function that this machine code represents.
 
Helper class to build MachineInstr.
 
const TargetInstrInfo & getTII()
 
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
 
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
 
MachineFunction & getMF()
Getter for the function we currently build.
 
MachineRegisterInfo * getMRI()
Getter for MRI.
 
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
 
Register getReg(unsigned Idx) const
Get the register for the operand index.
 
Representation of each machine instruction.
 
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
 
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
 
const MachineBasicBlock * getParent() const
 
LLVM_ABI bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
 
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
 
unsigned getNumOperands() const
Retuns the total number of operands.
 
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
 
mop_range uses()
Returns all operands which may be register uses.
 
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
 
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
 
const MachineOperand & getOperand(unsigned i) const
 
uint32_t getFlags() const
Return the MI flags bitvector.
 
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
 
A description of a memory reference used in the backend.
 
LLT getMemoryType() const
Return the memory type of the memory reference.
 
unsigned getAddrSpace() const
 
const MachinePointerInfo & getPointerInfo() const
 
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
 
MachineOperand class - Representation of each machine instruction operand.
 
const ConstantInt * getCImm() const
 
bool isReg() const
isReg - Tests if this is a MO_Register operand.
 
MachineBasicBlock * getMBB() const
 
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
 
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
 
void setMBB(MachineBasicBlock *MBB)
 
void setPredicate(unsigned Predicate)
 
Register getReg() const
getReg - Returns the register number.
 
const ConstantFP * getFPImm() const
 
unsigned getPredicate() const
 
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
 
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
 
This class implements the register bank concept.
 
Wrapper class representing virtual and physical registers.
 
constexpr bool isValid() const
 
size_type size() const
Determine the number of elements in the SetVector.
 
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
 
bool insert(const value_type &X)
Insert a new element into the SetVector.
 
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
 
bool all() const
Returns true if all bits are set.
 
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
 
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
 
A SetVector that performs no allocations if smaller than a certain size.
 
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
 
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
 
reference emplace_back(ArgTypes &&... Args)
 
void push_back(const T &Elt)
 
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
 
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
 
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
 
virtual LLVM_READONLY LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
 
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
 
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
 
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
 
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
 
virtual const TargetLowering * getTargetLowering() const
 
The instances of the Type class are immutable: once they are created, they are never changed.
 
A Use represents the edge between a Value definition and its users.
 
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
 
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
 
self_iterator getIterator()
 
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
 
@ Fast
Attempts to make calls as fast as possible (e.g.
 
@ C
The default llvm calling convention, compatible with C.
 
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
 
@ WidenScalar
The operation should be implemented in terms of a wider scalar base-type.
 
operand_type_match m_Reg()
 
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_BUILD_VECTOR, false > m_GBuildVector(const LHS &L, const RHS &R)
 
GCstAndRegMatch m_GCst(std::optional< ValueAndVReg > &ValReg)
 
operand_type_match m_Pred()
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_UMIN, true > m_GUMin(const LHS &L, const RHS &R)
 
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_XOR, true > m_GXor(const LHS &L, const RHS &R)
 
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
 
UnaryOp_match< SrcTy, TargetOpcode::G_FPEXT > m_GFPExt(const SrcTy &Src)
 
ConstantMatch< APInt > m_ICst(APInt &Cst)
 
UnaryOp_match< SrcTy, TargetOpcode::G_INTTOPTR > m_GIntToPtr(const SrcTy &Src)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_OR, true > m_GOr(const LHS &L, const RHS &R)
 
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
 
ICstOrSplatMatch< APInt > m_ICstOrSplat(APInt &Cst)
 
ImplicitDefMatch m_GImplicitDef()
 
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
 
CheckType m_SpecificType(LLT Ty)
 
deferred_ty< Register > m_DeferredReg(Register &R)
Similar to m_SpecificReg/Type, but the specific value to match originated from an earlier sub-pattern...
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_UMAX, true > m_GUMax(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_FADD, true > m_GFAdd(const LHS &L, const RHS &R)
 
UnaryOp_match< SrcTy, TargetOpcode::G_PTRTOINT > m_GPtrToInt(const SrcTy &Src)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_FSUB, false > m_GFSub(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)
 
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
 
Or< Preds... > m_any_of(Preds &&... preds)
 
SpecificConstantOrSplatMatch m_SpecificICstOrSplat(const APInt &RequestedValue)
Matches a RequestedValue constant or a constant splat of RequestedValue.
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
 
UnaryOp_match< SrcTy, TargetOpcode::G_BITCAST > m_GBitcast(const SrcTy &Src)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_BUILD_VECTOR_TRUNC, false > m_GBuildVectorTrunc(const LHS &L, const RHS &R)
 
bind_ty< MachineInstr * > m_MInstr(MachineInstr *&MI)
 
UnaryOp_match< SrcTy, TargetOpcode::G_FNEG > m_GFNeg(const SrcTy &Src)
 
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP, true > m_c_GICmp(const Pred &P, const LHS &L, const RHS &R)
G_ICMP matcher that also matches commuted compares.
 
TernaryOp_match< Src0Ty, Src1Ty, Src2Ty, TargetOpcode::G_INSERT_VECTOR_ELT > m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
 
GFCstOrSplatGFCstMatch m_GFCstOrSplat(std::optional< FPValueAndVReg > &FPValReg)
 
And< Preds... > m_all_of(Preds &&... preds)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_SMIN, true > m_GSMin(const LHS &L, const RHS &R)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
 
UnaryOp_match< SrcTy, TargetOpcode::G_ANYEXT > m_GAnyExt(const SrcTy &Src)
 
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
 
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
 
BinaryOp_match< LHS, RHS, TargetOpcode::G_SMAX, true > m_GSMax(const LHS &L, const RHS &R)
 
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
 
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
 
Not(const Pred &P) -> Not< Pred >
 
initializer< Ty > init(const Ty &Val)
 
This is an optimization pass for GlobalISel generic memory operations.
 
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
 
FunctionAddr VTableAddr Value
 
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
 
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
 
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
 
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
 
static double log2(double V)
 
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
 
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
 
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
 
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
 
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
 
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
 
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
 
std::function< void(MachineIRBuilder &)> BuildFnTy
 
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
 
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
 
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
 
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
 
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
 
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
 
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
 
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
 
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
 
constexpr bool has_single_bit(T Value) noexcept
 
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
 
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
 
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
 
SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > OperandBuildSteps
 
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
 
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
 
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
 
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
 
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
 
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
 
auto instructionsWithoutDebug(IterT It, IterT End, bool SkipPseudoOp=true)
Construct a range iterator which begins at It and moves forwards until End is reached,...
 
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
 
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
 
LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
 
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
 
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
 
@ Xor
Bitwise or logical XOR of integers.
 
@ And
Bitwise or logical AND of integers.
 
@ Sub
Subtraction of integers.
 
DWARFExpression::Operation Op
 
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
 
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
 
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
 
constexpr unsigned BitWidth
 
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
 
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
 
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
 
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
 
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
 
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
 
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
 
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
 
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
 
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
 
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
 
unsigned getFCmpCode(CmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
 
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
 
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
 
This struct is a compact representation of a valid (non-zero power of two) alignment.
 
Simple struct used to hold a Register value and the instruction which defines it.
 
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
 
bool isNonNegative() const
Returns true if this value is known to be non-negative.
 
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
 
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
 
bool isUnknown() const
Returns true if we don't know any bits.
 
unsigned getBitWidth() const
Get the bit width of this value.
 
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
 
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
 
bool isNegative() const
Returns true if this value is known to be negative.
 
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
 
This class contains a discriminated union of information about pointers in memory operands,...
 
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
 
MachinePointerInfo getWithOffset(int64_t O) const
 
const RegisterBank * Bank
 
Register LogicNonShiftReg
 
Magic data for optimising signed division by a constant.
 
unsigned ShiftAmount
shift amount
 
static LLVM_ABI SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
 
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
 
Magic data for optimising unsigned division by a constant.
 
unsigned PreShift
pre-shift amount
 
static LLVM_ABI UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
 
unsigned PostShift
post-shift amount