39#define DEBUG_TYPE "gi-combiner"
42using namespace MIPatternMatch;
48 cl::desc(
"Force all indexed operations to be "
49 "legal for the GlobalISel combiner"));
56 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
57 RBI(
Builder.getMF().getSubtarget().getRegBankInfo()),
58 TRI(
Builder.getMF().getSubtarget().getRegisterInfo()) {
71 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
90 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
91 return ByteWidth -
I - 1;
111static std::optional<bool>
115 unsigned Width = MemOffset2Idx.
size();
118 bool BigEndian =
true, LittleEndian =
true;
119 for (
unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
120 auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
121 if (MemOffsetAndIdx == MemOffset2Idx.
end())
123 const int64_t
Idx = MemOffsetAndIdx->second - LowestIdx;
124 assert(
Idx >= 0 &&
"Expected non-negative byte offset?");
127 if (!BigEndian && !LittleEndian)
131 assert((BigEndian != LittleEndian) &&
132 "Pattern cannot be both big and little endian!");
139 assert(
LI &&
"Must have LegalizerInfo to query isLegal!");
155 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
156 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
183 unsigned ToOpcode)
const {
208 if (
MI.getOpcode() != TargetOpcode::COPY)
217 MI.eraseFromParent();
222 bool IsUndef =
false;
233 assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
234 "Invalid instruction");
244 assert(Def &&
"Operand not defined");
245 switch (Def->getOpcode()) {
246 case TargetOpcode::G_BUILD_VECTOR:
253 case TargetOpcode::G_IMPLICIT_DEF: {
262 "All undefs should have the same type");
266 EltIdx != EltEnd; ++EltIdx)
267 Ops.
push_back(Undef->getOperand(0).getReg());
294 MI.eraseFromParent();
309 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
310 "Invalid instruction kind");
335 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
340 if (DstNumElts % SrcNumElts != 0)
346 unsigned NumConcat = DstNumElts / SrcNumElts;
349 for (
unsigned i = 0; i != DstNumElts; ++i) {
356 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
357 (ConcatSrcs[i / SrcNumElts] >= 0 &&
358 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts)))
361 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
368 for (
auto Src : ConcatSrcs) {
394 MI.eraseFromParent();
404 const LLT TyForCandidate,
405 unsigned OpcodeForCandidate,
410 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
421 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
424 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
425 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
426 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
432 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.
Ty == TyForCandidate) {
434 OpcodeForCandidate == TargetOpcode::G_ZEXT)
436 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
437 OpcodeForCandidate == TargetOpcode::G_SEXT)
438 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
447 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
458static void InsertInsnsWithoutSideEffectsBeforeUse(
470 InsertBB = PredBB->
getMBB();
475 if (InsertBB ==
DefMI.getParent()) {
477 Inserter(InsertBB, std::next(InsertPt), UseMO);
496 unsigned CandidateLoadOpc;
498 case TargetOpcode::G_ANYEXT:
499 CandidateLoadOpc = TargetOpcode::G_LOAD;
501 case TargetOpcode::G_SEXT:
502 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
504 case TargetOpcode::G_ZEXT:
505 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
510 return CandidateLoadOpc;
541 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.
getSizeInBits()))
549 unsigned PreferredOpcode =
551 ? TargetOpcode::G_ANYEXT
552 : isa<GSExtLoad>(&
MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
553 Preferred = {
LLT(), PreferredOpcode,
nullptr};
555 if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
556 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
557 (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
558 const auto &MMO = LoadMI->
getMMO();
560 if (MMO.isAtomic() &&
UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
568 if (
LI->
getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
572 Preferred = ChoosePreferredUse(
MI, Preferred,
583 assert(Preferred.Ty != LoadValueTy &&
"Extending to same type?");
601 if (PreviouslyEmitted) {
611 EmittedInsns[InsertIntoBB] = NewMI;
620 auto &LoadValue =
MI.getOperand(0);
623 Uses.push_back(&UseMO);
625 for (
auto *UseMO :
Uses) {
635 if (UseDstReg != ChosenDstReg) {
636 if (Preferred.
Ty == UseDstTy) {
673 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO,
688 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO, InsertTruncAt);
691 MI.getOperand(0).setReg(ChosenDstReg);
697 assert(
MI.getOpcode() == TargetOpcode::G_AND);
716 APInt MaskVal = MaybeMask->Value;
737 if (MaskSizeBits > LoadSizeBits)
757 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits ==
RegSize)
762 {TargetOpcode::G_ZEXTLOAD, {RegTy,
MRI.
getType(PtrReg)}, {MemDesc}}))
766 B.setInstrAndDebugLoc(*LoadMI);
767 auto &MF =
B.getMF();
769 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.
MemoryTy);
770 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
779 "shouldn't consider debug uses");
787 if (DefOrUse ==
MBB.
end())
789 return &*DefOrUse == &
DefMI;
795 "shouldn't consider debug uses");
798 else if (
DefMI.getParent() !=
UseMI.getParent())
805 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
816 uint64_t SizeInBits =
MI.getOperand(2).getImm();
819 if (
auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser,
MRI)) {
821 auto LoadSizeBits = LoadMI->getMemSizeInBits();
824 if (LoadSizeBits == SizeInBits)
831 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
834 MI.eraseFromParent();
839 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
849 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg,
MRI);
853 uint64_t MemBits = LoadDef->getMemSizeInBits();
858 unsigned NewSizeBits = std::min((
uint64_t)
MI.getOperand(2).getImm(), MemBits);
873 if (LoadDef->isSimple())
875 else if (MemBits > NewSizeBits || MemBits == RegTy.
getSizeInBits())
885 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
891 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
893 unsigned ScalarSizeBits;
894 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
903 auto &MMO = LoadDef->
getMMO();
906 auto PtrInfo = MMO.getPointerInfo();
910 MI.eraseFromParent();
915 auto &MF = *
MI.getParent()->getParent();
916 const auto &TLI = *MF.getSubtarget().getTargetLowering();
919 unsigned Opcode =
MI.getOpcode();
920 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
921 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
924 Base =
MI.getOperand(1).getReg();
926 if (BaseDef && BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
929 LLVM_DEBUG(
dbgs() <<
"Searching for post-indexing opportunity for: " <<
MI);
932 if (
Use.getOpcode() != TargetOpcode::G_PTR_ADD)
938 LLVM_DEBUG(
dbgs() <<
" Ignoring candidate with illegal addrmode: "
948 LLVM_DEBUG(
dbgs() <<
" Ignoring candidate with offset after mem-op: "
957 bool MemOpDominatesAddrUses =
true;
958 for (
auto &PtrAddUse :
961 MemOpDominatesAddrUses =
false;
966 if (!MemOpDominatesAddrUses) {
968 dbgs() <<
" Ignoring candidate as memop does not dominate uses: "
974 Addr =
Use.getOperand(0).getReg();
983 auto &MF = *
MI.getParent()->getParent();
984 const auto &TLI = *MF.getSubtarget().getTargetLowering();
987 unsigned Opcode =
MI.getOpcode();
988 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
989 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
992 Addr =
MI.getOperand(1).getReg();
1009 if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1010 LLVM_DEBUG(
dbgs() <<
" Skipping, frame index would need copy anyway.");
1014 if (
MI.getOpcode() == TargetOpcode::G_STORE) {
1016 if (
Base ==
MI.getOperand(0).getReg()) {
1017 LLVM_DEBUG(
dbgs() <<
" Skipping, storing base so need copy anyway.");
1023 if (
MI.getOperand(0).getReg() ==
Addr) {
1024 LLVM_DEBUG(
dbgs() <<
" Skipping, does not dominate all addr uses");
1034 LLVM_DEBUG(
dbgs() <<
" Skipping, does not dominate all addr uses.");
1052 unsigned Opcode =
MI.getOpcode();
1053 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1054 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1062 MatchInfo.
IsPre = findPreIndexCandidate(
MI, MatchInfo.
Addr, MatchInfo.
Base,
1064 if (!MatchInfo.
IsPre &&
1065 !findPostIndexCandidate(
MI, MatchInfo.
Addr, MatchInfo.
Base,
1076 unsigned Opcode =
MI.getOpcode();
1077 bool IsStore = Opcode == TargetOpcode::G_STORE;
1080 case TargetOpcode::G_LOAD:
1081 NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1083 case TargetOpcode::G_SEXTLOAD:
1084 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1086 case TargetOpcode::G_ZEXTLOAD:
1087 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1089 case TargetOpcode::G_STORE:
1090 NewOpcode = TargetOpcode::G_INDEXED_STORE;
1099 MIB.
addUse(
MI.getOperand(0).getReg());
1101 MIB.
addDef(
MI.getOperand(0).getReg());
1108 MI.eraseFromParent();
1116 unsigned Opcode =
MI.getOpcode();
1117 bool IsDiv, IsSigned;
1122 case TargetOpcode::G_SDIV:
1123 case TargetOpcode::G_UDIV: {
1125 IsSigned = Opcode == TargetOpcode::G_SDIV;
1128 case TargetOpcode::G_SREM:
1129 case TargetOpcode::G_UREM: {
1131 IsSigned = Opcode == TargetOpcode::G_SREM;
1137 unsigned DivOpcode, RemOpcode, DivremOpcode;
1139 DivOpcode = TargetOpcode::G_SDIV;
1140 RemOpcode = TargetOpcode::G_SREM;
1141 DivremOpcode = TargetOpcode::G_SDIVREM;
1143 DivOpcode = TargetOpcode::G_UDIV;
1144 RemOpcode = TargetOpcode::G_UREM;
1145 DivremOpcode = TargetOpcode::G_UDIVREM;
1164 if (
MI.getParent() ==
UseMI.getParent() &&
1165 ((IsDiv &&
UseMI.getOpcode() == RemOpcode) ||
1166 (!IsDiv &&
UseMI.getOpcode() == DivOpcode)) &&
1179 unsigned Opcode =
MI.getOpcode();
1180 assert(OtherMI &&
"OtherMI shouldn't be empty.");
1183 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1184 DestDivReg =
MI.getOperand(0).getReg();
1188 DestRemReg =
MI.getOperand(0).getReg();
1192 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1202 : TargetOpcode::G_UDIVREM,
1203 {DestDivReg, DestRemReg},
1204 {
MI.getOperand(1).
getReg(),
MI.getOperand(2).getReg()});
1205 MI.eraseFromParent();
1211 assert(
MI.getOpcode() == TargetOpcode::G_BR);
1230 assert(std::next(BrIt) ==
MBB->
end() &&
"expected G_BR to be a terminator");
1232 BrCond = &*std::prev(BrIt);
1233 if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
1239 return BrCondTarget !=
MI.getOperand(0).getMBB() &&
1257 MI.getOperand(0).setMBB(FallthroughBB);
1279 return Helper.lowerMemcpyInline(
MI) ==
1291static std::optional<APFloat>
1296 return std::nullopt;
1302 case TargetOpcode::G_FNEG: {
1306 case TargetOpcode::G_FABS: {
1310 case TargetOpcode::G_FPTRUNC:
1312 case TargetOpcode::G_FSQRT: {
1315 V =
APFloat(sqrt(V.convertToDouble()));
1318 case TargetOpcode::G_FLOG2: {
1339 return Cst.has_value();
1344 assert(Cst &&
"Optional is unexpectedly empty!");
1350 MI.eraseFromParent();
1361 if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1371 if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
1384 Type *AccessTy =
nullptr;
1385 auto &MF = *
MI.getMF();
1387 if (
auto *LdSt = dyn_cast<GLoadStore>(&
UseMI)) {
1389 MF.getFunction().getContext());
1394 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1399 AMOld.
BaseOffs = MaybeImm2Val->Value.getSExtValue();
1402 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1403 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1404 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1417 assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD &&
"Expected G_PTR_ADD");
1423 MI.getOperand(1).setReg(MatchInfo.
Base);
1424 MI.getOperand(2).setReg(NewOffset.getReg(0));
1437 unsigned Opcode =
MI.getOpcode();
1438 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1439 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1440 Opcode == TargetOpcode::G_USHLSAT) &&
1441 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1461 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1466 if (Opcode == TargetOpcode::G_USHLSAT &&
1475 unsigned Opcode =
MI.getOpcode();
1476 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1477 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1478 Opcode == TargetOpcode::G_USHLSAT) &&
1479 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1484 auto Imm = MatchInfo.
Imm;
1486 if (Imm >= ScalarSizeInBits) {
1488 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1490 MI.eraseFromParent();
1495 Imm = ScalarSizeInBits - 1;
1501 MI.getOperand(1).setReg(MatchInfo.
Reg);
1502 MI.getOperand(2).setReg(NewImm);
1518 unsigned ShiftOpcode =
MI.getOpcode();
1519 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1520 ShiftOpcode == TargetOpcode::G_ASHR ||
1521 ShiftOpcode == TargetOpcode::G_LSHR ||
1522 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1523 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1524 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1527 Register LogicDest =
MI.getOperand(1).getReg();
1532 unsigned LogicOpcode = LogicMI->
getOpcode();
1533 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1534 LogicOpcode != TargetOpcode::G_XOR)
1538 const Register C1 =
MI.getOperand(2).getReg();
1543 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1547 if (
MI->getOpcode() != ShiftOpcode ||
1557 ShiftVal = MaybeImmVal->Value.getSExtValue();
1568 if (matchFirstShift(LogicMIOp1, C0Val)) {
1570 MatchInfo.
Shift2 = LogicMIOp1;
1571 }
else if (matchFirstShift(LogicMIOp2, C0Val)) {
1573 MatchInfo.
Shift2 = LogicMIOp2;
1577 MatchInfo.
ValSum = C0Val + C1Val;
1583 MatchInfo.
Logic = LogicMI;
1589 unsigned Opcode =
MI.getOpcode();
1590 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1591 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1592 Opcode == TargetOpcode::G_SSHLSAT) &&
1593 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1612 Register Shift2Const =
MI.getOperand(2).getReg();
1624 MI.eraseFromParent();
1628 unsigned &ShiftVal) {
1629 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1635 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1636 return (
static_cast<int32_t
>(ShiftVal) != -1);
1640 unsigned &ShiftVal) {
1641 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1646 MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
1647 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1654 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
KB);
1667 if (!MaybeShiftAmtVal)
1681 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1682 MatchData.
Reg = ExtSrc;
1683 MatchData.
Imm = ShiftAmt;
1686 return MinLeadingZeros >= ShiftAmt;
1692 int64_t ShiftAmtVal = MatchData.
Imm;
1700 MI.eraseFromParent();
1707 for (
unsigned I = 0;
I <
Merge.getNumSources(); ++
I)
1710 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0],
MRI);
1711 if (!Unmerge || Unmerge->getNumDefs() !=
Merge.getNumSources())
1714 for (
unsigned I = 0;
I < MergedValues.
size(); ++
I)
1715 if (MergedValues[
I] != Unmerge->getReg(
I))
1718 MatchInfo = Unmerge->getSourceReg();
1732 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1733 "Expected an unmerge");
1734 auto &Unmerge = cast<GUnmerge>(
MI);
1737 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg,
MRI);
1745 if (SrcMergeTy != Dst0Ty && !SameSize)
1749 for (
unsigned Idx = 0;
Idx < SrcInstr->getNumSources(); ++
Idx)
1756 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1757 "Expected an unmerge");
1759 "Not enough operands to replace all defs");
1760 unsigned NumElems =
MI.getNumOperands() - 1;
1764 bool CanReuseInputDirectly = DstTy == SrcTy;
1766 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
1778 if (CanReuseInputDirectly)
1783 MI.eraseFromParent();
1788 unsigned SrcIdx =
MI.getNumOperands() - 1;
1789 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
1791 if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
1792 SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
1803 for (
unsigned Idx = 0;
Idx != SrcIdx; ++
Idx) {
1805 Val = Val.
lshr(ShiftAmt);
1813 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1814 "Expected an unmerge");
1816 "Not enough operands to replace all defs");
1817 unsigned NumElems =
MI.getNumOperands() - 1;
1819 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
1824 MI.eraseFromParent();
1829 unsigned SrcIdx =
MI.getNumOperands() - 1;
1830 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
1832 unsigned NumElems =
MI.getNumOperands() - 1;
1833 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
1835 B.buildUndef(DstReg);
1842 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1843 "Expected an unmerge");
1845 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
1854 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
1863 Register Dst0Reg =
MI.getOperand(0).getReg();
1870 MI.eraseFromParent();
1874 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1875 "Expected an unmerge");
1876 Register Dst0Reg =
MI.getOperand(0).getReg();
1883 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
1900 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1901 "Expected an unmerge");
1903 Register Dst0Reg =
MI.getOperand(0).getReg();
1908 "Expecting a G_ZEXT");
1920 "ZExt src doesn't fit in destination");
1925 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
1930 MI.eraseFromParent();
1934 unsigned TargetShiftSize,
1935 unsigned &ShiftVal) {
1936 assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
1937 MI.getOpcode() == TargetOpcode::G_LSHR ||
1938 MI.getOpcode() == TargetOpcode::G_ASHR) &&
"Expected a shift");
1946 if (
Size <= TargetShiftSize)
1954 ShiftVal = MaybeImmVal->Value.getSExtValue();
1955 return ShiftVal >=
Size / 2 && ShiftVal <
Size;
1959 const unsigned &ShiftVal) {
1964 unsigned HalfSize =
Size / 2;
1965 assert(ShiftVal >= HalfSize);
1971 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1973 if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
1974 Register Narrowed = Unmerge.getReg(1);
1981 if (NarrowShiftAmt != 0) {
1988 }
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
1989 Register Narrowed = Unmerge.getReg(0);
1994 if (NarrowShiftAmt != 0) {
2002 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2004 HalfTy, Unmerge.getReg(1),
2007 if (ShiftVal == HalfSize) {
2011 }
else if (ShiftVal ==
Size - 1) {
2019 HalfTy, Unmerge.getReg(1),
2028 MI.eraseFromParent();
2032 unsigned TargetShiftAmount) {
2043 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2052 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2056 MI.eraseFromParent();
2060 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2064 MI.eraseFromParent();
2069 assert(
MI.getOpcode() == TargetOpcode::G_ADD);
2076 PtrReg.second =
false;
2086 PtrReg.second =
true;
2098 const bool DoCommute = PtrReg.second;
2108 MI.eraseFromParent();
2113 auto &PtrAdd = cast<GPtrAdd>(
MI);
2124 NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
2134 auto &PtrAdd = cast<GPtrAdd>(
MI);
2139 PtrAdd.eraseFromParent();
2143 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT &&
"Expected a G_ANYEXT");
2152 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT &&
"Expected a G_ZEXT");
2167 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2168 MI.getOpcode() == TargetOpcode::G_SEXT ||
2169 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2170 "Expected a G_[ASZ]EXT");
2174 unsigned Opc =
MI.getOpcode();
2176 if (Opc == SrcOpc ||
2177 (Opc == TargetOpcode::G_ANYEXT &&
2178 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2179 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2188 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2189 MI.getOpcode() == TargetOpcode::G_SEXT ||
2190 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2191 "Expected a G_[ASZ]EXT");
2193 Register Reg = std::get<0>(MatchInfo);
2194 unsigned SrcExtOp = std::get<1>(MatchInfo);
2197 if (
MI.getOpcode() == SrcExtOp) {
2199 MI.getOperand(1).setReg(Reg);
2207 if (
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2208 (
MI.getOpcode() == TargetOpcode::G_SEXT &&
2209 SrcExtOp == TargetOpcode::G_ZEXT)) {
2213 MI.eraseFromParent();
2218 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2226 MI.eraseFromParent();
2231 assert(
MI.getOpcode() == TargetOpcode::G_FABS &&
"Expected a G_FABS");
2240 MI.getOperand(1).setReg(NegSrc);
2248 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2252 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2253 SrcOpc == TargetOpcode::G_ZEXT) {
2262 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2264 unsigned SrcExtOp = MatchInfo.second;
2268 if (SrcTy == DstTy) {
2269 MI.eraseFromParent();
2278 MI.eraseFromParent();
2286 if (ShiftSize > 32 && TruncSize < 32)
2300 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2317 case TargetOpcode::G_SHL: {
2326 case TargetOpcode::G_LSHR:
2327 case TargetOpcode::G_ASHR: {
2334 if (
User.getOpcode() == TargetOpcode::G_STORE)
2338 if (NewShiftTy == SrcTy)
2352 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2355 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2364 LLT NewShiftTy = MatchInfo.second;
2378 if (NewShiftTy == DstTy)
2388 return MO.isReg() &&
2389 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2395 return !MO.isReg() ||
2396 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2401 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2403 return all_of(Mask, [](
int Elt) {
return Elt < 0; });
2407 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
2408 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(0).getReg(),
2413 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2414 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(),
2419 assert((
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2420 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2421 "Expected an insert/extract element op");
2424 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2437 OpIdx = Cst->isZero() ? 3 : 2;
2442 MI.eraseFromParent();
2485 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2512 return MO.isReg() && MO.getReg().isPhysical();
2522 return I1->isIdenticalTo(*I2);
2537 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2548 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2549 MaybeCst->getSExtValue() ==
C;
2554 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2556 Register Replacement =
MI.getOperand(OpIdx).getReg();
2558 MI.eraseFromParent();
2565 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2568 MI.eraseFromParent();
2574 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2595 return MO.
isReg() &&
2606 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2609 MI.eraseFromParent();
2614 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2617 MI.eraseFromParent();
2622 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2625 MI.eraseFromParent();
2630 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2633 MI.eraseFromParent();
2641 Register &NewLHS = std::get<0>(MatchInfo);
2642 Register &NewRHS = std::get<1>(MatchInfo);
2650 NewLHS = MaybeNewLHS;
2659 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2668 TargetOpcode::G_INSERT_VECTOR_ELT)
2674 MatchInfo.
resize(NumElts);
2678 if (IntImm >= NumElts || IntImm < 0)
2680 if (!MatchInfo[IntImm])
2681 MatchInfo[IntImm] = TmpReg;
2685 if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2687 if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2695 return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2702 auto GetUndef = [&]() {
2709 for (
unsigned I = 0;
I < MatchInfo.
size(); ++
I) {
2711 MatchInfo[
I] = GetUndef();
2714 MI.eraseFromParent();
2721 std::tie(SubLHS, SubRHS) = MatchInfo;
2723 MI.eraseFromParent();
2734 unsigned LogicOpcode =
MI.getOpcode();
2735 assert(LogicOpcode == TargetOpcode::G_AND ||
2736 LogicOpcode == TargetOpcode::G_OR ||
2737 LogicOpcode == TargetOpcode::G_XOR);
2750 if (!LeftHandInst || !RightHandInst)
2752 unsigned HandOpcode = LeftHandInst->
getOpcode();
2753 if (HandOpcode != RightHandInst->
getOpcode())
2765 if (!XTy.
isValid() || XTy != YTy)
2770 switch (HandOpcode) {
2773 case TargetOpcode::G_ANYEXT:
2774 case TargetOpcode::G_SEXT:
2775 case TargetOpcode::G_ZEXT: {
2779 case TargetOpcode::G_AND:
2780 case TargetOpcode::G_ASHR:
2781 case TargetOpcode::G_LSHR:
2782 case TargetOpcode::G_SHL: {
2787 ExtraHandOpSrcReg = ZOp.
getReg();
2809 if (ExtraHandOpSrcReg.
isValid())
2821 "Expected at least one instr to build?");
2824 assert(InstrToBuild.Opcode &&
"Expected a valid opcode?");
2825 assert(InstrToBuild.OperandFns.size() &&
"Expected at least one operand?");
2827 for (
auto &OperandFn : InstrToBuild.OperandFns)
2830 MI.eraseFromParent();
2835 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2836 int64_t ShlCst, AshrCst;
2842 if (ShlCst != AshrCst)
2845 {TargetOpcode::G_SEXT_INREG, {
MRI.
getType(Src)}}))
2847 MatchInfo = std::make_tuple(Src, ShlCst);
2853 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2856 std::tie(Src, ShiftAmt) = MatchInfo;
2860 MI.eraseFromParent();
2866 assert(
MI.getOpcode() == TargetOpcode::G_AND);
2881 B.buildAnd(Dst, R,
B.buildConstant(Ty, C1 & C2));
2884 auto Zero =
B.buildConstant(Ty, 0);
2907 assert(
MI.getOpcode() == TargetOpcode::G_AND);
2924 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
2931 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
2947 assert(
MI.getOpcode() == TargetOpcode::G_OR);
2964 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
2971 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
2982 unsigned ExtBits =
MI.getOperand(2).getImm();
2988 int64_t Cst,
bool IsVector,
bool IsFP) {
2990 return (ScalarSizeBits == 1 && Cst == -1) ||
2996 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3016 for (
unsigned I = 0;
I < RegsToNegate.
size(); ++
I) {
3021 switch (Def->getOpcode()) {
3026 case TargetOpcode::G_ICMP:
3032 case TargetOpcode::G_FCMP:
3038 case TargetOpcode::G_AND:
3039 case TargetOpcode::G_OR:
3045 RegsToNegate.
push_back(Def->getOperand(1).getReg());
3046 RegsToNegate.
push_back(Def->getOperand(2).getReg());
3073 for (
Register Reg : RegsToNegate) {
3078 switch (Def->getOpcode()) {
3081 case TargetOpcode::G_ICMP:
3082 case TargetOpcode::G_FCMP: {
3089 case TargetOpcode::G_AND:
3092 case TargetOpcode::G_OR:
3100 MI.eraseFromParent();
3106 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3110 Register SharedReg =
MI.getOperand(2).getReg();
3131 return Y == SharedReg;
3139 std::tie(
X,
Y) = MatchInfo;
3143 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3144 MI.getOperand(2).setReg(
Y);
3149 auto &PtrAdd = cast<GPtrAdd>(
MI);
3150 Register DstReg = PtrAdd.getReg(0);
3159 return ConstVal && *ConstVal == 0;
3168 auto &PtrAdd = cast<GPtrAdd>(
MI);
3171 PtrAdd.eraseFromParent();
3178 Register Pow2Src1 =
MI.getOperand(2).getReg();
3186 MI.eraseFromParent();
3190 unsigned &SelectOpNo) {
3200 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3202 OtherOperandReg =
LHS;
3205 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3222 unsigned BinOpcode =
MI.getOpcode();
3227 bool CanFoldNonConst =
3228 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3233 if (CanFoldNonConst)
3244 const unsigned &SelectOperand) {
3257 unsigned BinOpcode =
MI.getOpcode();
3264 if (SelectOperand == 1) {
3278 MI.eraseFromParent();
3283std::optional<SmallVector<Register, 8>>
3284CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
const {
3285 assert(Root->
getOpcode() == TargetOpcode::G_OR &&
"Expected G_OR only!");
3314 const unsigned MaxIter =
3316 for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3325 return std::nullopt;
3341 if (RegsToVisit.
empty() || RegsToVisit.
size() % 2 != 0)
3342 return std::nullopt;
3354static std::optional<std::pair<GZExtLoad *, int64_t>>
3358 "Expected Reg to only have one non-debug use?");
3367 if (Shift % MemSizeInBits != 0)
3368 return std::nullopt;
3371 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad,
MRI);
3373 return std::nullopt;
3375 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3376 return std::nullopt;
3378 return std::make_pair(Load, Shift / MemSizeInBits);
3381std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3382CombinerHelper::findLoadOffsetsForLoadOrCombine(
3416 for (
auto Reg : RegsToVisit) {
3421 return std::nullopt;
3424 std::tie(Load, DstPos) = *LoadAndPos;
3432 return std::nullopt;
3435 auto &LoadMMO =
Load->getMMO();
3439 return std::nullopt;
3446 LoadPtr =
Load->getOperand(1).getReg();
3452 return std::nullopt;
3459 if (BasePtr != LoadPtr)
3460 return std::nullopt;
3462 if (
Idx < LowestIdx) {
3464 LowestIdxLoad =
Load;
3472 return std::nullopt;
3480 if (!EarliestLoad ||
dominates(*Load, *EarliestLoad))
3481 EarliestLoad =
Load;
3482 if (!LatestLoad ||
dominates(*LatestLoad, *Load))
3489 "Expected to find a load for each register?");
3490 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3491 LatestLoad &&
"Expected at least two loads?");
3500 const unsigned MaxIter = 20;
3506 if (
MI.isLoadFoldBarrier())
3507 return std::nullopt;
3508 if (Iter++ == MaxIter)
3509 return std::nullopt;
3512 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3517 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3537 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3541 auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
3548 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3549 if (NarrowMemSizeInBits % 8 != 0)
3562 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3563 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3566 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3573 std::optional<bool> IsBigEndian =
isBigEndian(MemOffset2Idx, LowestIdx);
3576 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3588 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3589 const unsigned ZeroByteOffset =
3593 auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
3594 if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
3595 ZeroOffsetIdx->second != LowestIdx)
3619 MIB.setInstrAndDebugLoc(*LatestLoad);
3621 MIB.buildLoad(LoadDst,
Ptr, *NewMMO);
3623 MIB.buildBSwap(Dst, LoadDst);
3634static std::optional<int64_t>
3639 return std::nullopt;
3652 if (!SrcVal.
isValid() || TruncVal == SrcVal) {
3657 return std::nullopt;
3660 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
3661 if (ShiftAmt % NarrowBits!= 0)
3662 return std::nullopt;
3663 const unsigned Offset = ShiftAmt / NarrowBits;
3665 if (SrcVal.
isValid() && FoundSrcVal != SrcVal)
3666 return std::nullopt;
3669 SrcVal = FoundSrcVal;
3670 else if (
MRI.getType(SrcVal) !=
MRI.getType(FoundSrcVal))
3671 return std::nullopt;
3699 auto &StoreMI = cast<GStore>(
MI);
3700 LLT MemTy = StoreMI.getMMO().getMemoryType();
3713 if (!StoreMI.isSimple())
3726 auto &LastStore = StoreMI;
3733 BaseReg = LastStore.getPointerReg();
3737 GStore *LowestIdxStore = &LastStore;
3738 int64_t LowestIdxOffset = LastOffset;
3742 if (!LowestShiftAmt)
3750 const unsigned NumStoresRequired =
3754 OffsetMap[*LowestShiftAmt] = LastOffset;
3763 const int MaxInstsToCheck = 10;
3764 int NumInstsChecked = 0;
3765 for (
auto II = ++LastStore.getReverseIterator();
3766 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
3770 if ((NewStore = dyn_cast<GStore>(&*II))) {
3773 }
else if (II->isLoadFoldBarrier() || II->mayLoad()) {
3787 if (BaseReg != NewBaseReg)
3791 if (!ShiftByteOffset)
3793 if (MemOffset < LowestIdxOffset) {
3794 LowestIdxOffset = MemOffset;
3795 LowestIdxStore = NewStore;
3800 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
3801 OffsetMap[*ShiftByteOffset] !=
INT64_MAX)
3803 OffsetMap[*ShiftByteOffset] = MemOffset;
3807 NumInstsChecked = 0;
3808 if (FoundStores.
size() == NumStoresRequired)
3812 if (FoundStores.
size() != NumStoresRequired) {
3816 const auto &
DL = LastStore.getMF()->getDataLayout();
3817 auto &
C = LastStore.getMF()->getFunction().getContext();
3822 if (!Allowed || !
Fast)
3828 auto checkOffsets = [&](
bool MatchLittleEndian) {
3829 if (MatchLittleEndian) {
3830 for (
unsigned i = 0; i != NumStoresRequired; ++i)
3831 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
3834 for (
unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired;
3836 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
3843 bool NeedBswap =
false;
3844 bool NeedRotate =
false;
3845 if (!checkOffsets(
DL.isLittleEndian())) {
3847 if (NarrowBits == 8 && checkOffsets(
DL.isBigEndian()))
3849 else if (NumStoresRequired == 2 && checkOffsets(
DL.isBigEndian()))
3881 "Unexpected type for rotate");
3894 ST->eraseFromParent();
3899 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3913 case TargetOpcode::G_ANYEXT:
3915 case TargetOpcode::G_ZEXT:
3916 case TargetOpcode::G_SEXT:
3930 for (
unsigned Idx = 1;
Idx <
MI.getNumOperands();
Idx += 2) {
3933 case TargetOpcode::G_LOAD:
3934 case TargetOpcode::G_TRUNC:
3935 case TargetOpcode::G_SEXT:
3936 case TargetOpcode::G_ZEXT:
3937 case TargetOpcode::G_ANYEXT:
3938 case TargetOpcode::G_CONSTANT:
3942 if (InSrcs.
size() > 2)
3954 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3963 for (
unsigned SrcIdx = 1; SrcIdx <
MI.getNumOperands(); SrcIdx += 2) {
3965 if (!SrcMIs.
insert(SrcMI))
3971 if (InsertPt !=
MBB->
end() && InsertPt->isPHI())
3977 SrcMI->getOperand(0).getReg());
3978 OldToNewSrcMap[SrcMI] = NewExt;
3987 NewPhi.
addMBB(MO.getMBB());
3991 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3999 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
4009 unsigned VecIdx = Cst->Value.getZExtValue();
4014 if (SrcVecMI->
getOpcode() == TargetOpcode::G_TRUNC) {
4018 if (SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
4019 SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
4040 if (ScalarTy != DstTy) {
4043 MI.eraseFromParent();
4052 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4075 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4080 unsigned Idx = Cst->getZExtValue();
4084 SrcDstPairs.emplace_back(
4085 std::make_pair(
MI.getOperand(
Idx + 1).getReg(), &II));
4088 return ExtractedElts.
all();
4094 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4095 for (
auto &Pair : SrcDstPairs) {
4096 auto *ExtMI = Pair.second;
4098 ExtMI->eraseFromParent();
4100 MI.eraseFromParent();
4107 MI.eraseFromParent();
4118 assert(
MI.getOpcode() == TargetOpcode::G_OR);
4124 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4125 unsigned FshOpc = 0;
4136 int64_t CstShlAmt, CstLShrAmt;
4139 CstShlAmt + CstLShrAmt ==
BitWidth) {
4140 FshOpc = TargetOpcode::G_FSHR;
4147 FshOpc = TargetOpcode::G_FSHL;
4153 FshOpc = TargetOpcode::G_FSHR;
4164 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4171 unsigned Opc =
MI.getOpcode();
4172 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4177 unsigned RotateOpc =
4178 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4183 unsigned Opc =
MI.getOpcode();
4184 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4185 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4188 : TargetOpcode::G_ROTR));
4189 MI.removeOperand(2);
4195 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4196 MI.getOpcode() == TargetOpcode::G_ROTR);
4200 bool OutOfRange =
false;
4201 auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
4202 if (
auto *CI = dyn_cast<ConstantInt>(
C))
4203 OutOfRange |= CI->getValue().uge(Bitsize);
4210 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4211 MI.getOpcode() == TargetOpcode::G_ROTR);
4220 MI.getOperand(2).setReg(Amt);
4225 int64_t &MatchInfo) {
4226 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4230 std::optional<bool> KnownVal;
4279 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4304 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4310 unsigned Op = TargetOpcode::COPY;
4311 if (DstSize != LHSSize)
4312 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4322 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4332 int64_t AndMaskBits;
4340 if (AndMaskBits & OrMaskBits)
4346 if (
MI.getOperand(1).getReg() == AndMaskReg)
4347 MI.getOperand(2).setReg(AndMaskReg);
4348 MI.getOperand(1).setReg(Src);
4357 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4364 int64_t Width =
MI.getOperand(2).getImm();
4372 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4376 auto Cst1 =
B.buildConstant(ExtractTy, ShiftImm);
4377 auto Cst2 =
B.buildConstant(ExtractTy, Width);
4378 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4386 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4391 TargetOpcode::G_UBFX, Ty, ExtractTy))
4394 int64_t AndImm, LSBImm;
4403 auto MaybeMask =
static_cast<uint64_t>(AndImm);
4404 if (MaybeMask & (MaybeMask + 1))
4413 auto WidthCst =
B.buildConstant(ExtractTy, Width);
4414 auto LSBCst =
B.buildConstant(ExtractTy, LSBImm);
4415 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4422 const unsigned Opcode =
MI.getOpcode();
4423 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4425 const Register Dst =
MI.getOperand(0).getReg();
4427 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4428 ? TargetOpcode::G_SBFX
4429 : TargetOpcode::G_UBFX;
4440 const unsigned Size = Ty.getScalarSizeInBits();