44#define DEBUG_TYPE "gi-combiner"
47using namespace MIPatternMatch;
53 cl::desc(
"Force all indexed operations to be "
54 "legal for the GlobalISel combiner"));
60 : Builder(
B),
MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
61 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
62 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
63 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
76 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
95 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
96 return ByteWidth -
I - 1;
116static std::optional<bool>
120 unsigned Width = MemOffset2Idx.
size();
123 bool BigEndian =
true, LittleEndian =
true;
124 for (
unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
125 auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
126 if (MemOffsetAndIdx == MemOffset2Idx.
end())
128 const int64_t
Idx = MemOffsetAndIdx->second - LowestIdx;
129 assert(
Idx >= 0 &&
"Expected non-negative byte offset?");
132 if (!BigEndian && !LittleEndian)
136 assert((BigEndian != LittleEndian) &&
137 "Pattern cannot be both big and little endian!");
144 assert(
LI &&
"Must have LegalizerInfo to query isLegal!");
160 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
161 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
188 unsigned ToOpcode)
const {
213 if (
MI.getOpcode() != TargetOpcode::COPY)
222 MI.eraseFromParent();
244 if (OrigDef->
isPHI() || isa<GUnmerge>(OrigDef))
251 std::optional<MachineOperand> MaybePoisonOperand;
253 if (!Operand.isReg())
259 if (!MaybePoisonOperand)
260 MaybePoisonOperand = Operand;
269 if (!MaybePoisonOperand) {
272 cast<GenericMachineInstr>(OrigDef)->dropPoisonGeneratingFlags();
274 B.buildCopy(
DstOp, OrigOp);
279 Register MaybePoisonOperandReg = MaybePoisonOperand->getReg();
280 LLT MaybePoisonOperandRegTy =
MRI.
getType(MaybePoisonOperandReg);
284 cast<GenericMachineInstr>(OrigDef)->dropPoisonGeneratingFlags();
287 auto Freeze =
B.buildFreeze(MaybePoisonOperandRegTy, MaybePoisonOperandReg);
298 assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
299 "Invalid instruction");
309 assert(Def &&
"Operand not defined");
312 switch (Def->getOpcode()) {
313 case TargetOpcode::G_BUILD_VECTOR:
320 case TargetOpcode::G_IMPLICIT_DEF: {
329 "All undefs should have the same type");
333 EltIdx != EltEnd; ++EltIdx)
334 Ops.
push_back(Undef->getOperand(0).getReg());
345 {TargetOpcode::G_BUILD_VECTOR, {DstTy,
MRI.
getType(Ops[0])}})) {
372 MI.eraseFromParent();
383 if (!ConcatMI1 || !ConcatMI2)
387 if (
MRI.
getType(ConcatMI1->getSourceReg(0)) !=
394 for (
unsigned i = 0; i < Mask.size(); i += ConcatSrcNumElt) {
398 for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
399 if (i + j >= Mask.size())
401 if (Mask[i + j] != -1)
405 {TargetOpcode::G_IMPLICIT_DEF, {ConcatSrcTy}}))
408 }
else if (Mask[i] % ConcatSrcNumElt == 0) {
409 for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
410 if (i + j >= Mask.size())
412 if (Mask[i + j] != Mask[i] +
static_cast<int>(j))
418 Ops.
push_back(ConcatMI1->getSourceReg(Mask[i] / ConcatSrcNumElt));
420 Ops.
push_back(ConcatMI2->getSourceReg(Mask[i] / ConcatSrcNumElt -
421 ConcatMI1->getNumSources()));
429 {TargetOpcode::G_CONCAT_VECTORS,
430 {
MRI.
getType(
MI.getOperand(0).getReg()), ConcatSrcTy}}))
453 MI.eraseFromParent();
467 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
468 "Invalid instruction kind");
493 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
498 if (DstNumElts % SrcNumElts != 0)
504 unsigned NumConcat = DstNumElts / SrcNumElts;
507 for (
unsigned i = 0; i != DstNumElts; ++i) {
514 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
515 (ConcatSrcs[i / SrcNumElts] >= 0 &&
516 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts)))
519 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
526 for (
auto Src : ConcatSrcs) {
552 MI.eraseFromParent();
557 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
558 "Invalid instruction kind");
561 return Mask.size() == 1;
568 int I =
MI.getOperand(3).getShuffleMask()[0];
573 if (
I >= Src1NumElts) {
574 SrcReg =
MI.getOperand(2).getReg();
586 MI.eraseFromParent();
595 const LLT TyForCandidate,
596 unsigned OpcodeForCandidate,
601 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
612 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
615 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
616 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
617 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
623 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.
Ty == TyForCandidate) {
625 OpcodeForCandidate == TargetOpcode::G_ZEXT)
627 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
628 OpcodeForCandidate == TargetOpcode::G_SEXT)
629 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
638 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
649static void InsertInsnsWithoutSideEffectsBeforeUse(
661 InsertBB = PredBB->
getMBB();
666 if (InsertBB ==
DefMI.getParent()) {
668 Inserter(InsertBB, std::next(InsertPt), UseMO);
687 unsigned CandidateLoadOpc;
689 case TargetOpcode::G_ANYEXT:
690 CandidateLoadOpc = TargetOpcode::G_LOAD;
692 case TargetOpcode::G_SEXT:
693 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
695 case TargetOpcode::G_ZEXT:
696 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
701 return CandidateLoadOpc;
732 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.
getSizeInBits()))
740 unsigned PreferredOpcode =
742 ? TargetOpcode::G_ANYEXT
743 : isa<GSExtLoad>(&
MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
744 Preferred = {
LLT(), PreferredOpcode,
nullptr};
746 if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
747 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
748 (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
749 const auto &MMO = LoadMI->
getMMO();
759 if (
LI->
getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
763 Preferred = ChoosePreferredUse(
MI, Preferred,
774 assert(Preferred.Ty != LoadValueTy &&
"Extending to same type?");
792 if (PreviouslyEmitted) {
802 EmittedInsns[InsertIntoBB] = NewMI;
814 Uses.push_back(&UseMO);
816 for (
auto *UseMO :
Uses) {
826 if (UseDstReg != ChosenDstReg) {
827 if (Preferred.
Ty == UseDstTy) {
864 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO,
879 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO, InsertTruncAt);
882 MI.getOperand(0).setReg(ChosenDstReg);
888 assert(
MI.getOpcode() == TargetOpcode::G_AND);
907 APInt MaskVal = MaybeMask->Value;
928 if (MaskSizeBits > LoadSizeBits.
getValue())
948 else if (LoadSizeBits.
getValue() > MaskSizeBits ||
954 {TargetOpcode::G_ZEXTLOAD, {RegTy,
MRI.
getType(PtrReg)}, {MemDesc}}))
958 B.setInstrAndDebugLoc(*LoadMI);
959 auto &MF =
B.getMF();
961 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.
MemoryTy);
962 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
971 "shouldn't consider debug uses");
979 if (DefOrUse ==
MBB.
end())
981 return &*DefOrUse == &
DefMI;
987 "shouldn't consider debug uses");
990 else if (
DefMI.getParent() !=
UseMI.getParent())
997 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1006 LoadUser = TruncSrc;
1008 uint64_t SizeInBits =
MI.getOperand(2).getImm();
1011 if (
auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser,
MRI)) {
1013 auto LoadSizeBits = LoadMI->getMemSizeInBits();
1017 if (LoadSizeBits == SizeInBits)
1024 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1026 MI.eraseFromParent();
1031 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1041 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg,
MRI);
1045 uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
1050 unsigned NewSizeBits = std::min((
uint64_t)
MI.getOperand(2).getImm(), MemBits);
1053 if (NewSizeBits < 8)
1065 if (LoadDef->isSimple())
1067 else if (MemBits > NewSizeBits || MemBits == RegTy.
getSizeInBits())
1077 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
1083 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1085 unsigned ScalarSizeBits;
1086 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
1095 auto &MMO = LoadDef->
getMMO();
1098 auto PtrInfo = MMO.getPointerInfo();
1102 MI.eraseFromParent();
1110 auto *MF =
MI->getMF();
1111 auto *
Addr = getOpcodeDef<GPtrAdd>(
MI->getPointerReg(),
MRI);
1117 AM.
BaseOffs = CstOff->getSExtValue();
1122 MF->getDataLayout(), AM,
1124 MF->getFunction().getContext()),
1125 MI->getMMO().getAddrSpace());
1130 case TargetOpcode::G_LOAD:
1131 return TargetOpcode::G_INDEXED_LOAD;
1132 case TargetOpcode::G_STORE:
1133 return TargetOpcode::G_INDEXED_STORE;
1134 case TargetOpcode::G_ZEXTLOAD:
1135 return TargetOpcode::G_INDEXED_ZEXTLOAD;
1136 case TargetOpcode::G_SEXTLOAD:
1137 return TargetOpcode::G_INDEXED_SEXTLOAD;
1143bool CombinerHelper::isIndexedLoadStoreLegal(
GLoadStore &LdSt)
const {
1153 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
1154 OpTys = {PtrTy, Ty, Ty};
1156 OpTys = {Ty, PtrTy};
1164 cl::desc(
"Number of uses of a base pointer to check before it is no longer "
1165 "considered for post-indexing."));
1169 bool &RematOffset) {
1182 if (!isIndexedLoadStoreLegal(LdSt))
1191 unsigned NumUsesChecked = 0;
1196 auto *PtrAdd = dyn_cast<GPtrAdd>(&
Use);
1204 if (StoredValDef == &
Use)
1207 Offset = PtrAdd->getOffsetReg();
1209 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(),
Offset,
1215 RematOffset =
false;
1219 if (OffsetDef->
getOpcode() != TargetOpcode::G_CONSTANT)
1225 if (&BasePtrUse == PtrDef)
1230 auto *BasePtrLdSt = dyn_cast<GLoadStore>(&BasePtrUse);
1231 if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
1233 isIndexedLoadStoreLegal(*BasePtrLdSt))
1238 if (
auto *BasePtrUseDef = dyn_cast<GPtrAdd>(&BasePtrUse)) {
1239 Register PtrAddDefReg = BasePtrUseDef->getReg(0);
1243 if (BaseUseUse.getParent() != LdSt.
getParent())
1246 if (
auto *UseUseLdSt = dyn_cast<GLoadStore>(&BaseUseUse))
1255 Addr = PtrAdd->getReg(0);
1256 Base = PtrAdd->getBaseReg();
1277 if (!isIndexedLoadStoreLegal(LdSt))
1281 if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
1284 if (
auto *St = dyn_cast<GStore>(&LdSt)) {
1286 if (
Base == St->getValueReg())
1291 if (St->getValueReg() ==
Addr)
1297 if (AddrUse.getParent() != LdSt.
getParent())
1302 bool RealUse =
false;
1309 if (
auto *UseLdSt = dyn_cast<GLoadStore>(&AddrUse)) {
1321 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1324 auto *LoadMI = getOpcodeDef<GLoad>(
MI.getOperand(1).getReg(),
MRI);
1338 if (!LoadMI->isSimple())
1350 const unsigned MaxIter = 20;
1353 if (
II->isLoadFoldBarrier())
1355 if (Iter++ == MaxIter)
1371 int Elt = CVal->getZExtValue();
1384 Register VecPtr = LoadMI->getPointerReg();
1392 LegalityQuery Q = {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}};
1417 B.buildLoad(Result, finalPtr, PtrInfo, Alignment);
1427 auto &LdSt = cast<GLoadStore>(
MI);
1432 MatchInfo.
IsPre = findPreIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1434 if (!MatchInfo.
IsPre &&
1435 !findPostIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1445 unsigned Opcode =
MI.getOpcode();
1446 bool IsStore = Opcode == TargetOpcode::G_STORE;
1454 *OldCst->getOperand(1).getCImm());
1455 MatchInfo.
Offset = NewCst.getReg(0);
1461 MIB.
addUse(
MI.getOperand(0).getReg());
1463 MIB.
addDef(
MI.getOperand(0).getReg());
1471 MI.eraseFromParent();
1479 unsigned Opcode =
MI.getOpcode();
1480 bool IsDiv, IsSigned;
1485 case TargetOpcode::G_SDIV:
1486 case TargetOpcode::G_UDIV: {
1488 IsSigned = Opcode == TargetOpcode::G_SDIV;
1491 case TargetOpcode::G_SREM:
1492 case TargetOpcode::G_UREM: {
1494 IsSigned = Opcode == TargetOpcode::G_SREM;
1500 unsigned DivOpcode, RemOpcode, DivremOpcode;
1502 DivOpcode = TargetOpcode::G_SDIV;
1503 RemOpcode = TargetOpcode::G_SREM;
1504 DivremOpcode = TargetOpcode::G_SDIVREM;
1506 DivOpcode = TargetOpcode::G_UDIV;
1507 RemOpcode = TargetOpcode::G_UREM;
1508 DivremOpcode = TargetOpcode::G_UDIVREM;
1527 if (
MI.getParent() ==
UseMI.getParent() &&
1528 ((IsDiv &&
UseMI.getOpcode() == RemOpcode) ||
1529 (!IsDiv &&
UseMI.getOpcode() == DivOpcode)) &&
1542 unsigned Opcode =
MI.getOpcode();
1543 assert(OtherMI &&
"OtherMI shouldn't be empty.");
1546 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1547 DestDivReg =
MI.getOperand(0).getReg();
1551 DestRemReg =
MI.getOperand(0).getReg();
1555 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1565 : TargetOpcode::G_UDIVREM,
1566 {DestDivReg, DestRemReg},
1568 MI.eraseFromParent();
1574 assert(
MI.getOpcode() == TargetOpcode::G_BR);
1593 assert(std::next(BrIt) ==
MBB->
end() &&
"expected G_BR to be a terminator");
1595 BrCond = &*std::prev(BrIt);
1596 if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
1602 return BrCondTarget !=
MI.getOperand(0).getMBB() &&
1620 MI.getOperand(0).setMBB(FallthroughBB);
1636 return Helper.lowerMemcpyInline(
MI) ==
1652 switch (
MI.getOpcode()) {
1655 case TargetOpcode::G_FNEG: {
1656 Result.changeSign();
1659 case TargetOpcode::G_FABS: {
1663 case TargetOpcode::G_FPTRUNC: {
1665 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1670 case TargetOpcode::G_FSQRT: {
1674 Result =
APFloat(sqrt(Result.convertToDouble()));
1677 case TargetOpcode::G_FLOG2: {
1698 MI.eraseFromParent();
1709 if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1719 if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
1732 Type *AccessTy =
nullptr;
1733 auto &MF = *
MI.getMF();
1735 if (
auto *LdSt = dyn_cast<GLoadStore>(&
UseMI)) {
1737 MF.getFunction().getContext());
1742 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1747 AMOld.
BaseOffs = MaybeImmVal->Value.getSExtValue();
1750 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1751 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1752 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1765 assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD &&
"Expected G_PTR_ADD");
1771 MI.getOperand(1).setReg(MatchInfo.
Base);
1772 MI.getOperand(2).setReg(NewOffset.getReg(0));
1785 unsigned Opcode =
MI.getOpcode();
1786 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1787 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1788 Opcode == TargetOpcode::G_USHLSAT) &&
1789 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1809 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
1814 if (Opcode == TargetOpcode::G_USHLSAT &&
1823 unsigned Opcode =
MI.getOpcode();
1824 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1825 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1826 Opcode == TargetOpcode::G_USHLSAT) &&
1827 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1831 auto Imm = MatchInfo.
Imm;
1833 if (Imm >= ScalarSizeInBits) {
1835 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1837 MI.eraseFromParent();
1842 Imm = ScalarSizeInBits - 1;
1848 MI.getOperand(1).setReg(MatchInfo.
Reg);
1849 MI.getOperand(2).setReg(NewImm);
1865 unsigned ShiftOpcode =
MI.getOpcode();
1866 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1867 ShiftOpcode == TargetOpcode::G_ASHR ||
1868 ShiftOpcode == TargetOpcode::G_LSHR ||
1869 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1870 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1871 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1874 Register LogicDest =
MI.getOperand(1).getReg();
1879 unsigned LogicOpcode = LogicMI->
getOpcode();
1880 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1881 LogicOpcode != TargetOpcode::G_XOR)
1885 const Register C1 =
MI.getOperand(2).getReg();
1887 if (!MaybeImmVal || MaybeImmVal->Value == 0)
1890 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1894 if (
MI->getOpcode() != ShiftOpcode ||
1904 ShiftVal = MaybeImmVal->Value.getSExtValue();
1915 if (matchFirstShift(LogicMIOp1, C0Val)) {
1917 MatchInfo.
Shift2 = LogicMIOp1;
1918 }
else if (matchFirstShift(LogicMIOp2, C0Val)) {
1920 MatchInfo.
Shift2 = LogicMIOp2;
1924 MatchInfo.
ValSum = C0Val + C1Val;
1930 MatchInfo.
Logic = LogicMI;
1936 unsigned Opcode =
MI.getOpcode();
1937 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1938 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1939 Opcode == TargetOpcode::G_SSHLSAT) &&
1940 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1958 Register Shift2Const =
MI.getOperand(2).getReg();
1970 MI.eraseFromParent();
1974 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
"Expected G_SHL");
1977 auto &Shl = cast<GenericMachineInstr>(
MI);
1997 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
1998 SrcDef->getOpcode() == TargetOpcode::G_OR) &&
"Unexpected op");
2001 auto S1 =
B.buildShl(SrcTy,
X, ShiftReg);
2002 auto S2 =
B.buildShl(SrcTy, C1, ShiftReg);
2003 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {
S1, S2});
2009 unsigned &ShiftVal) {
2010 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2016 ShiftVal = MaybeImmVal->Value.exactLogBase2();
2017 return (
static_cast<int32_t
>(ShiftVal) != -1);
2021 unsigned &ShiftVal) {
2022 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2027 MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
2028 MI.getOperand(2).setReg(ShiftCst.getReg(0));
2035 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
KB);
2050 if (!MaybeShiftAmtVal)
2064 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
2065 MatchData.
Reg = ExtSrc;
2066 MatchData.
Imm = ShiftAmt;
2070 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
2076 int64_t ShiftAmtVal = MatchData.
Imm;
2083 MI.eraseFromParent();
2090 for (
unsigned I = 0;
I <
Merge.getNumSources(); ++
I)
2093 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0],
MRI);
2094 if (!Unmerge || Unmerge->getNumDefs() !=
Merge.getNumSources())
2097 for (
unsigned I = 0;
I < MergedValues.
size(); ++
I)
2098 if (MergedValues[
I] != Unmerge->getReg(
I))
2101 MatchInfo = Unmerge->getSourceReg();
2115 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2116 "Expected an unmerge");
2117 auto &Unmerge = cast<GUnmerge>(
MI);
2120 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg,
MRI);
2128 if (SrcMergeTy != Dst0Ty && !SameSize)
2132 for (
unsigned Idx = 0;
Idx < SrcInstr->getNumSources(); ++
Idx)
2139 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2140 "Expected an unmerge");
2142 "Not enough operands to replace all defs");
2143 unsigned NumElems =
MI.getNumOperands() - 1;
2147 bool CanReuseInputDirectly = DstTy == SrcTy;
2148 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2160 if (CanReuseInputDirectly)
2165 MI.eraseFromParent();
2170 unsigned SrcIdx =
MI.getNumOperands() - 1;
2171 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2173 if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
2174 SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
2185 for (
unsigned Idx = 0;
Idx != SrcIdx; ++
Idx) {
2187 Val = Val.
lshr(ShiftAmt);
2195 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2196 "Expected an unmerge");
2198 "Not enough operands to replace all defs");
2199 unsigned NumElems =
MI.getNumOperands() - 1;
2200 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2205 MI.eraseFromParent();
2210 unsigned SrcIdx =
MI.getNumOperands() - 1;
2211 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2213 unsigned NumElems =
MI.getNumOperands() - 1;
2214 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2216 B.buildUndef(DstReg);
2223 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2224 "Expected an unmerge");
2229 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2237 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2238 Register Dst0Reg =
MI.getOperand(0).getReg();
2240 MI.eraseFromParent();
2244 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2245 "Expected an unmerge");
2246 Register Dst0Reg =
MI.getOperand(0).getReg();
2253 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2270 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2271 "Expected an unmerge");
2273 Register Dst0Reg =
MI.getOperand(0).getReg();
2278 "Expecting a G_ZEXT");
2288 "ZExt src doesn't fit in destination");
2293 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2298 MI.eraseFromParent();
2302 unsigned TargetShiftSize,
2303 unsigned &ShiftVal) {
2304 assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
2305 MI.getOpcode() == TargetOpcode::G_LSHR ||
2306 MI.getOpcode() == TargetOpcode::G_ASHR) &&
"Expected a shift");
2314 if (
Size <= TargetShiftSize)
2322 ShiftVal = MaybeImmVal->Value.getSExtValue();
2323 return ShiftVal >=
Size / 2 && ShiftVal <
Size;
2327 const unsigned &ShiftVal) {
2332 unsigned HalfSize =
Size / 2;
2333 assert(ShiftVal >= HalfSize);
2338 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2340 if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
2341 Register Narrowed = Unmerge.getReg(1);
2348 if (NarrowShiftAmt != 0) {
2355 }
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
2356 Register Narrowed = Unmerge.getReg(0);
2361 if (NarrowShiftAmt != 0) {
2369 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2371 HalfTy, Unmerge.getReg(1),
2374 if (ShiftVal == HalfSize) {
2378 }
else if (ShiftVal ==
Size - 1) {
2386 HalfTy, Unmerge.getReg(1),
2395 MI.eraseFromParent();
2399 unsigned TargetShiftAmount) {
2410 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2419 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2422 MI.eraseFromParent();
2426 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2429 MI.eraseFromParent();
2434 assert(
MI.getOpcode() == TargetOpcode::G_ADD);
2441 PtrReg.second =
false;
2451 PtrReg.second =
true;
2463 const bool DoCommute = PtrReg.second;
2472 MI.eraseFromParent();
2477 auto &PtrAdd = cast<GPtrAdd>(
MI);
2488 NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
2498 auto &PtrAdd = cast<GPtrAdd>(
MI);
2502 PtrAdd.eraseFromParent();
2506 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT &&
"Expected a G_ANYEXT");
2511 SrcReg = OriginalSrcReg;
2518 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT &&
"Expected a G_ZEXT");
2533 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2534 MI.getOpcode() == TargetOpcode::G_SEXT ||
2535 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2536 "Expected a G_[ASZ]EXT");
2540 SrcReg = OriginalSrcReg;
2543 unsigned Opc =
MI.getOpcode();
2545 if (Opc == SrcOpc ||
2546 (Opc == TargetOpcode::G_ANYEXT &&
2547 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2548 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2557 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2558 MI.getOpcode() == TargetOpcode::G_SEXT ||
2559 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2560 "Expected a G_[ASZ]EXT");
2562 Register Reg = std::get<0>(MatchInfo);
2563 unsigned SrcExtOp = std::get<1>(MatchInfo);
2566 if (
MI.getOpcode() == SrcExtOp) {
2568 MI.getOperand(1).setReg(Reg);
2576 if (
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2577 (
MI.getOpcode() == TargetOpcode::G_SEXT &&
2578 SrcExtOp == TargetOpcode::G_ZEXT)) {
2581 MI.eraseFromParent();
2587 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2591 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2592 SrcOpc == TargetOpcode::G_ZEXT) {
2601 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2603 unsigned SrcExtOp = MatchInfo.second;
2607 if (SrcTy == DstTy) {
2608 MI.eraseFromParent();
2616 MI.eraseFromParent();
2624 if (ShiftSize > 32 && TruncSize < 32)
2638 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2655 case TargetOpcode::G_SHL: {
2664 case TargetOpcode::G_LSHR:
2665 case TargetOpcode::G_ASHR: {
2672 if (
User.getOpcode() == TargetOpcode::G_STORE)
2676 if (NewShiftTy == SrcTy)
2690 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2693 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2700 LLT NewShiftTy = MatchInfo.second;
2714 if (NewShiftTy == DstTy)
2724 return MO.isReg() &&
2725 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2731 return !MO.isReg() ||
2732 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2737 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2739 return all_of(Mask, [](
int Elt) {
return Elt < 0; });
2743 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
2744 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(0).getReg(),
2749 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2750 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(),
2755 assert((
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2756 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2757 "Expected an insert/extract element op");
2760 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2773 OpIdx = Cst->isZero() ? 3 : 2;
2818 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2845 return MO.isReg() && MO.getReg().isPhysical();
2855 return I1->isIdenticalTo(*I2);
2870 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg,
nullptr) ==
2881 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2882 MaybeCst->getSExtValue() ==
C;
2888 std::optional<FPValueAndVReg> MaybeCst;
2892 return MaybeCst->Value.isExactlyValue(
C);
2897 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2899 Register Replacement =
MI.getOperand(OpIdx).getReg();
2901 MI.eraseFromParent();
2907 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2910 MI.eraseFromParent();
2915 unsigned ConstIdx) {
2916 Register ConstReg =
MI.getOperand(ConstIdx).getReg();
2929 assert((
MI.getOpcode() == TargetOpcode::G_FSHL ||
2930 MI.getOpcode() == TargetOpcode::G_FSHR) &&
2931 "This is not a funnel shift operation");
2933 Register ConstReg =
MI.getOperand(3).getReg();
2938 assert((VRegAndVal) &&
"Value is not a constant");
2941 APInt NewConst = VRegAndVal->Value.
urem(
2946 MI.getOpcode(), {MI.getOperand(0)},
2947 {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)});
2949 MI.eraseFromParent();
2953 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2974 return MO.
isReg() &&
2985 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2987 MI.eraseFromParent();
2991 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2993 MI.eraseFromParent();
2997 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2999 MI.eraseFromParent();
3004 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
3006 MI.eraseFromParent();
3010 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
3012 MI.eraseFromParent();
3019 Register &NewLHS = std::get<0>(MatchInfo);
3020 Register &NewRHS = std::get<1>(MatchInfo);
3028 NewLHS = MaybeNewLHS;
3037 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
3046 TargetOpcode::G_INSERT_VECTOR_ELT)
3052 MatchInfo.
resize(NumElts);
3056 if (IntImm >= NumElts || IntImm < 0)
3058 if (!MatchInfo[IntImm])
3059 MatchInfo[IntImm] = TmpReg;
3063 if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
3065 if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
3074 return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
3081 auto GetUndef = [&]() {
3093 MI.eraseFromParent();
3099 std::tie(SubLHS, SubRHS) = MatchInfo;
3101 MI.eraseFromParent();
3112 unsigned LogicOpcode =
MI.getOpcode();
3113 assert(LogicOpcode == TargetOpcode::G_AND ||
3114 LogicOpcode == TargetOpcode::G_OR ||
3115 LogicOpcode == TargetOpcode::G_XOR);
3128 if (!LeftHandInst || !RightHandInst)
3130 unsigned HandOpcode = LeftHandInst->
getOpcode();
3131 if (HandOpcode != RightHandInst->
getOpcode())
3143 if (!XTy.
isValid() || XTy != YTy)
3148 switch (HandOpcode) {
3151 case TargetOpcode::G_ANYEXT:
3152 case TargetOpcode::G_SEXT:
3153 case TargetOpcode::G_ZEXT: {
3157 case TargetOpcode::G_TRUNC: {
3173 case TargetOpcode::G_AND:
3174 case TargetOpcode::G_ASHR:
3175 case TargetOpcode::G_LSHR:
3176 case TargetOpcode::G_SHL: {
3181 ExtraHandOpSrcReg = ZOp.
getReg();
3203 if (ExtraHandOpSrcReg.
isValid())
3215 "Expected at least one instr to build?");
3217 assert(InstrToBuild.Opcode &&
"Expected a valid opcode?");
3218 assert(InstrToBuild.OperandFns.size() &&
"Expected at least one operand?");
3220 for (
auto &OperandFn : InstrToBuild.OperandFns)
3223 MI.eraseFromParent();
3228 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3229 int64_t ShlCst, AshrCst;
3235 if (ShlCst != AshrCst)
3238 {TargetOpcode::G_SEXT_INREG, {
MRI.
getType(Src)}}))
3240 MatchInfo = std::make_tuple(Src, ShlCst);
3246 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3249 std::tie(Src, ShiftAmt) = MatchInfo;
3252 MI.eraseFromParent();
3258 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3273 B.buildAnd(Dst, R,
B.buildConstant(Ty, C1 & C2));
3276 auto Zero =
B.buildConstant(Ty, 0);
3299 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3323 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3330 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3346 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3364 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3371 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3382 unsigned ExtBits =
MI.getOperand(2).getImm();
3388 int64_t Cst,
bool IsVector,
bool IsFP) {
3390 return (ScalarSizeBits == 1 && Cst == -1) ||
3396 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3416 for (
unsigned I = 0;
I < RegsToNegate.
size(); ++
I) {
3421 switch (Def->getOpcode()) {
3426 case TargetOpcode::G_ICMP:
3432 case TargetOpcode::G_FCMP:
3438 case TargetOpcode::G_AND:
3439 case TargetOpcode::G_OR:
3445 RegsToNegate.
push_back(Def->getOperand(1).getReg());
3446 RegsToNegate.
push_back(Def->getOperand(2).getReg());
3473 for (
Register Reg : RegsToNegate) {
3478 switch (Def->getOpcode()) {
3481 case TargetOpcode::G_ICMP:
3482 case TargetOpcode::G_FCMP: {
3489 case TargetOpcode::G_AND:
3492 case TargetOpcode::G_OR:
3500 MI.eraseFromParent();
3506 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3510 Register SharedReg =
MI.getOperand(2).getReg();
3531 return Y == SharedReg;
3538 std::tie(
X,
Y) = MatchInfo;
3542 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3543 MI.getOperand(2).setReg(
Y);
3548 auto &PtrAdd = cast<GPtrAdd>(
MI);
3549 Register DstReg = PtrAdd.getReg(0);
3558 return ConstVal && *ConstVal == 0;
3567 auto &PtrAdd = cast<GPtrAdd>(
MI);
3569 PtrAdd.eraseFromParent();
3576 Register Pow2Src1 =
MI.getOperand(2).getReg();
3583 MI.eraseFromParent();
3587 unsigned &SelectOpNo) {
3597 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3599 OtherOperandReg =
LHS;
3602 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3619 unsigned BinOpcode =
MI.getOpcode();
3624 bool CanFoldNonConst =
3625 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3630 if (CanFoldNonConst)
3641 const unsigned &SelectOperand) {
3652 unsigned BinOpcode =
MI.getOpcode();
3659 if (SelectOperand == 1) {
3673 MI.eraseFromParent();
3676std::optional<SmallVector<Register, 8>>
3677CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
const {
3678 assert(Root->
getOpcode() == TargetOpcode::G_OR &&
"Expected G_OR only!");
3707 const unsigned MaxIter =
3709 for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3718 return std::nullopt;
3734 if (RegsToVisit.
empty() || RegsToVisit.
size() % 2 != 0)
3735 return std::nullopt;
3747static std::optional<std::pair<GZExtLoad *, int64_t>>
3751 "Expected Reg to only have one non-debug use?");
3760 if (Shift % MemSizeInBits != 0)
3761 return std::nullopt;
3764 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad,
MRI);
3766 return std::nullopt;
3768 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3769 return std::nullopt;
3771 return std::make_pair(Load, Shift / MemSizeInBits);
3774std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3775CombinerHelper::findLoadOffsetsForLoadOrCombine(
3809 for (
auto Reg : RegsToVisit) {
3814 return std::nullopt;
3817 std::tie(Load, DstPos) = *LoadAndPos;
3825 return std::nullopt;
3828 auto &LoadMMO =
Load->getMMO();
3832 return std::nullopt;
3839 LoadPtr =
Load->getOperand(1).getReg();
3845 return std::nullopt;
3852 if (BasePtr != LoadPtr)
3853 return std::nullopt;
3855 if (
Idx < LowestIdx) {
3857 LowestIdxLoad =
Load;
3865 return std::nullopt;
3873 if (!EarliestLoad ||
dominates(*Load, *EarliestLoad))
3874 EarliestLoad =
Load;
3875 if (!LatestLoad ||
dominates(*LatestLoad, *Load))
3882 "Expected to find a load for each register?");
3883 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3884 LatestLoad &&
"Expected at least two loads?");
3893 const unsigned MaxIter = 20;
3899 if (
MI.isLoadFoldBarrier())
3900 return std::nullopt;
3901 if (Iter++ == MaxIter)
3902 return std::nullopt;
3905 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3910 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3930 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3934 auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
3941 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3942 if (NarrowMemSizeInBits % 8 != 0)
3955 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3956 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3959 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3966 std::optional<bool> IsBigEndian =
isBigEndian(MemOffset2Idx, LowestIdx);
3969 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3981 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3982 const unsigned ZeroByteOffset =
3986 auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
3987 if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
3988 ZeroOffsetIdx->second != LowestIdx)
4012 MIB.setInstrAndDebugLoc(*LatestLoad);
4014 MIB.buildLoad(LoadDst,
Ptr, *NewMMO);
4016 MIB.buildBSwap(Dst, LoadDst);
4023 auto &
PHI = cast<GPhi>(
MI);
4036 case TargetOpcode::G_ANYEXT:
4038 case TargetOpcode::G_ZEXT:
4039 case TargetOpcode::G_SEXT:
4053 for (
unsigned I = 0;
I <
PHI.getNumIncomingValues(); ++
I) {
4056 case TargetOpcode::G_LOAD:
4057 case TargetOpcode::G_TRUNC:
4058 case TargetOpcode::G_SEXT:
4059 case TargetOpcode::G_ZEXT:
4060 case TargetOpcode::G_ANYEXT:
4061 case TargetOpcode::G_CONSTANT:
4065 if (InSrcs.
size() > 2)
4077 auto &
PHI = cast<GPhi>(
MI);
4086 for (
unsigned I = 0;
I <
PHI.getNumIncomingValues(); ++
I) {
4087 auto SrcReg =
PHI.getIncomingValue(
I);
4089 if (!SrcMIs.
insert(SrcMI))
4095 if (InsertPt !=
MBB->
end() && InsertPt->isPHI())
4101 OldToNewSrcMap[SrcMI] = NewExt;
4110 NewPhi.
addMBB(MO.getMBB());
4114 NewPhi.addUse(NewSrc->getOperand(0).getReg());
4122 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
4132 unsigned VecIdx = Cst->Value.getZExtValue();
4137 if (SrcVecMI->
getOpcode() == TargetOpcode::G_TRUNC) {
4141 if (SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
4142 SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
4162 if (ScalarTy != DstTy) {
4165 MI.eraseFromParent();
4174 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4197 if (
II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4202 unsigned Idx = Cst->getZExtValue();
4206 SrcDstPairs.emplace_back(
4207 std::make_pair(
MI.getOperand(
Idx + 1).getReg(), &
II));
4210 return ExtractedElts.
all();
4216 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4217 for (
auto &Pair : SrcDstPairs) {
4218 auto *ExtMI = Pair.second;
4220 ExtMI->eraseFromParent();
4222 MI.eraseFromParent();
4228 MI.eraseFromParent();
4238 assert(
MI.getOpcode() == TargetOpcode::G_OR);
4244 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4245 unsigned FshOpc = 0;
4256 int64_t CstShlAmt, CstLShrAmt;
4259 CstShlAmt + CstLShrAmt ==
BitWidth) {
4260 FshOpc = TargetOpcode::G_FSHR;
4267 FshOpc = TargetOpcode::G_FSHL;
4273 FshOpc = TargetOpcode::G_FSHR;
4284 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4291 unsigned Opc =
MI.getOpcode();
4292 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4297 unsigned RotateOpc =
4298 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4303 unsigned Opc =
MI.getOpcode();
4304 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4305 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4308 : TargetOpcode::G_ROTR));
4309 MI.removeOperand(2);
4315 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4316 MI.getOpcode() == TargetOpcode::G_ROTR);
4320 bool OutOfRange =
false;
4321 auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
4322 if (
auto *CI = dyn_cast<ConstantInt>(
C))
4323 OutOfRange |= CI->getValue().uge(Bitsize);
4330 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4331 MI.getOpcode() == TargetOpcode::G_ROTR);
4339 MI.getOperand(2).setReg(Amt);
4344 int64_t &MatchInfo) {
4345 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4357 if (KnownRHS.isUnknown())
4360 std::optional<bool> KnownVal;
4361 if (KnownRHS.isZero()) {
4422 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4447 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4453 unsigned Op = TargetOpcode::COPY;
4454 if (DstSize != LHSSize)
4455 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4465 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4475 int64_t AndMaskBits;
4483 if (AndMaskBits & OrMaskBits)
4489 if (
MI.getOperand(1).getReg() == AndMaskReg)
4490 MI.getOperand(2).setReg(AndMaskReg);
4491 MI.getOperand(1).setReg(Src);
4500 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4507 int64_t Width =
MI.getOperand(2).getImm();
4519 auto Cst1 =
B.buildConstant(ExtractTy, ShiftImm);
4520 auto Cst2 =
B.buildConstant(ExtractTy, Width);
4521 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4538 int64_t AndImm, LSBImm;
4547 auto MaybeMask =
static_cast<uint64_t>(AndImm);
4548 if (MaybeMask & (MaybeMask + 1))
4557 auto WidthCst =
B.buildConstant(ExtractTy, Width);
4558 auto LSBCst =
B.buildConstant(ExtractTy, LSBImm);
4559 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4566 const unsigned Opcode =
MI.getOpcode();
4567 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4569 const Register Dst =
MI.getOperand(0).getReg();
4571 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4572 ? TargetOpcode::G_SBFX
4573 : TargetOpcode::G_UBFX;
4594 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >=
Size)
4598 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4602 const int64_t Pos = ShrAmt - ShlAmt;
4603 const int64_t Width =
Size - ShrAmt;
4606 auto WidthCst =
B.buildConstant(ExtractTy, Width);
4607 auto PosCst =
B.buildConstant(ExtractTy, Pos);
4608 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4615 const unsigned Opcode =
MI.getOpcode();
4616 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4618 const Register Dst =
MI.getOperand(0).getReg();
4635 if (ShrAmt < 0 || ShrAmt >=
Size)
4639 if (0 == (SMask >> ShrAmt)) {
4641 B.buildConstant(Dst, 0);
4648 UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4649 UMask &= maskTrailingOnes<uint64_t>(
Size);
4654 const int64_t Pos = ShrAmt;
4659 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt ==
Size)
4663 auto WidthCst =
B.buildConstant(ExtractTy, Width);
4664 auto PosCst =
B.buildConstant(ExtractTy, Pos);
4665 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4670bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4672 auto &PtrAdd = cast<GPtrAdd>(
MI);
4674 Register Src1Reg = PtrAdd.getBaseReg();
4675 auto *Src1Def = getOpcodeDef<GPtrAdd>(Src1Reg,
MRI);
4679 Register Src2Reg = PtrAdd.getOffsetReg();
4691 const APInt &C1APIntVal = *C1;
4692 const APInt &C2APIntVal = *C2;
4693 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4699 unsigned ConvUseOpc = ConvUseMI->
getOpcode();
4700 while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4701 ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4708 auto *LdStMI = dyn_cast<GLoadStore>(ConvUseMI);
4719 PtrAdd.getMF()->getFunction().getContext());
4720 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4721 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4727 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4739 Register Src1Reg =
MI.getOperand(1).getReg();
4740 if (
RHS->getOpcode() != TargetOpcode::G_ADD)
4752 MI.getOperand(1).setReg(NewBase.getReg(0));
4753 MI.getOperand(2).setReg(
RHS->getOperand(2).getReg());
4756 return !reassociationCanBreakAddressingModePattern(
MI);
4766 std::optional<ValueAndVReg> LHSCstOff;
4771 auto *LHSPtrAdd = cast<GPtrAdd>(
LHS);
4776 LHSPtrAdd->moveBefore(&
MI);
4779 auto NewCst =
B.buildConstant(
MRI.
getType(RHSReg), LHSCstOff->Value);
4781 MI.getOperand(2).setReg(NewCst.getReg(0));
4784 LHSPtrAdd->getOperand(2).setReg(RHSReg);
4787 return !reassociationCanBreakAddressingModePattern(
MI);
4795 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(
LHS);
4799 Register Src2Reg =
MI.getOperand(2).getReg();
4800 Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4801 Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4810 auto NewCst =
B.buildConstant(
MRI.
getType(Src2Reg), *C1 + *C2);
4812 MI.getOperand(1).setReg(LHSSrc1);
4813 MI.getOperand(2).setReg(NewCst.getReg(0));
4816 return !reassociationCanBreakAddressingModePattern(
MI);
4821 auto &PtrAdd = cast<GPtrAdd>(
MI);
4873 auto NewCst =
B.buildInstr(Opc, {OpRHSTy}, {OpLHSRHS, OpRHS});
4874 B.buildInstr(Opc, {DstReg}, {OpLHSLHS, NewCst});
4882 auto NewLHSLHS =
B.buildInstr(Opc, {OpRHSTy}, {OpLHSLHS, OpRHS});
4883 B.buildInstr(Opc, {DstReg}, {NewLHSLHS, OpLHSRHS});
4896 unsigned Opc =
MI.getOpcode();
4913 MatchInfo = *MaybeCst;
4926 MatchInfo = *MaybeCst;
4937 ConstantFP::get(
MI.getMF()->getFunction().getContext(), *MaybeCst);
4943 assert(
MI.getOpcode() == TargetOpcode::G_FMA ||
4944 MI.getOpcode() == TargetOpcode::G_FMAD);
4945 auto [
_, Op1, Op2, Op3] =
MI.getFirst4Regs();
4962 MatchInfo = ConstantFP::get(
MI.getMF()->getFunction().getContext(), Op1F);
4984 assert(
MI.getOpcode() == TargetOpcode::G_AND);
5008 case TargetOpcode::G_ADD:
5009 case TargetOpcode::G_SUB:
5010 case TargetOpcode::G_MUL:
5011 case TargetOpcode::G_AND:
5012 case TargetOpcode::G_OR:
5013 case TargetOpcode::G_XOR:
5021 auto Mask = Cst->Value;
5026 unsigned NarrowWidth = Mask.countr_one();
5032 auto &MF = *
MI.getMF();
5035 auto &
DL = MF.getDataLayout();
5036 if (!TLI.isTruncateFree(WideTy, NarrowTy,
DL, Ctx) ||
5037 !TLI.isZExtFree(NarrowTy, WideTy,
DL, Ctx))
5051 MI.getOperand(1).setReg(Ext.getReg(0));
5058 unsigned Opc =
MI.getOpcode();
5059 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
5066 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
5067 : TargetOpcode::G_SADDO;
5069 MI.getOperand(3).setReg(
MI.getOperand(2).getReg());
5077 assert(
MI.getOpcode() == TargetOpcode::G_UMULO ||
5078 MI.getOpcode() == TargetOpcode::G_SMULO);
5087 B.buildConstant(Dst, 0);
5088 B.buildConstant(Carry, 0);
5096 assert(
MI.getOpcode() == TargetOpcode::G_UADDE ||
5097 MI.getOpcode() == TargetOpcode::G_SADDE ||
5098 MI.getOpcode() == TargetOpcode::G_USUBE ||
5099 MI.getOpcode() == TargetOpcode::G_SSUBE);
5104 switch (
MI.getOpcode()) {
5105 case TargetOpcode::G_UADDE:
5106 NewOpcode = TargetOpcode::G_UADDO;
5108 case TargetOpcode::G_SADDE:
5109 NewOpcode = TargetOpcode::G_SADDO;
5111 case TargetOpcode::G_USUBE:
5112 NewOpcode = TargetOpcode::G_USUBO;
5114 case TargetOpcode::G_SSUBE:
5115 NewOpcode = TargetOpcode::G_SSUBO;
5119 MI.setDesc(
B.getTII().get(NewOpcode));
5120 MI.removeOperand(4);
5128 assert(
MI.getOpcode() == TargetOpcode::G_SUB);
5162 B.buildSub(Dst, Zero, ReplaceReg);
5171 assert(
MI.getOpcode() == TargetOpcode::G_UDIV);
5172 auto &UDiv = cast<GenericMachineInstr>(
MI);
5182 unsigned KnownLeadingZeros =
5186 bool UseSRL =
false;
5187 bool UseNPQ =
false;
5193 auto BuildExactUDIVPattern = [&](
const Constant *
C) {
5195 if (IsSplat && !Factors.
empty()) {
5201 auto *CI = cast<ConstantInt>(
C);
5202 APInt Divisor = CI->getValue();
5211 Shifts.
push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
5212 Factors.
push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
5216 auto BuildUDIVPattern = [&](
const Constant *
C) {
5217 auto *CI = cast<ConstantInt>(
C);
5218 const APInt &Divisor = CI->getValue();
5220 bool SelNPQ =
false;
5222 unsigned PreShift = 0, PostShift = 0;
5227 if (!Divisor.
isOne()) {
5233 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
5235 Magic = std::move(magics.
Magic);
5238 "We shouldn't generate an undefined shift!");
5240 "We shouldn't generate an undefined shift!");
5244 SelNPQ = magics.
IsAdd;
5248 MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
5249 MagicFactors.
push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0));
5251 MIB.buildConstant(ScalarTy,
5256 MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
5265 assert(Matched &&
"Expected unary predicate match to succeed");
5269 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
5270 Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
5273 Factor = Factors[0];
5281 return MIB.buildMul(Ty, Res, Factor);
5287 assert(Matched &&
"Expected unary predicate match to succeed");
5289 Register PreShift, PostShift, MagicFactor, NPQFactor;
5290 auto *RHSDef = getOpcodeDef<GBuildVector>(
RHS,
MRI);
5292 PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
5293 MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
5294 NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
5295 PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
5298 "Non-build_vector operation should have been a scalar");
5299 PreShift = PreShifts[0];
5300 MagicFactor = MagicFactors[0];
5301 PostShift = PostShifts[0];
5305 Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
5308 Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
5311 Register NPQ = MIB.buildSub(Ty,
LHS, Q).getReg(0);
5316 NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
5318 NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
5320 Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
5323 Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
5324 auto One = MIB.buildConstant(Ty, 1);
5325 auto IsOne = MIB.buildICmp(
5328 return MIB.buildSelect(Ty, IsOne,
LHS, Q);
5332 assert(
MI.getOpcode() == TargetOpcode::G_UDIV);
5337 auto &MF = *
MI.getMF();
5341 auto &
DL = MF.getDataLayout();
5347 if (MF.getFunction().hasMinSize())
5366 {TargetOpcode::G_ICMP,
5382 assert(
MI.getOpcode() == TargetOpcode::G_SDIV &&
"Expected SDIV");
5387 auto &MF = *
MI.getMF();
5391 auto &
DL = MF.getDataLayout();
5397 if (MF.getFunction().hasMinSize())
5416 assert(
MI.getOpcode() == TargetOpcode::G_SDIV &&
"Expected SDIV");
5417 auto &SDiv = cast<GenericMachineInstr>(
MI);
5427 bool UseSRA =
false;
5433 auto BuildSDIVPattern = [&](
const Constant *
C) {
5435 if (IsSplat && !Factors.
empty()) {
5441 auto *CI = cast<ConstantInt>(
C);
5442 APInt Divisor = CI->getValue();
5452 Shifts.
push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
5453 Factors.
push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
5460 assert(Matched &&
"Expected unary predicate match to succeed");
5464 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
5465 Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
5468 Factor = Factors[0];
5476 return MIB.buildMul(Ty, Res, Factor);
5480 assert((
MI.getOpcode() == TargetOpcode::G_SDIV ||
5481 MI.getOpcode() == TargetOpcode::G_UDIV) &&
5482 "Expected SDIV or UDIV");
5483 auto &Div = cast<GenericMachineInstr>(
MI);
5485 auto MatchPow2 = [&](
const Constant *
C) {
5486 auto *CI = dyn_cast<ConstantInt>(
C);
5487 return CI && (CI->getValue().isPowerOf2() ||
5488 (IsSigned && CI->getValue().isNegatedPowerOf2()));
5494 assert(
MI.getOpcode() == TargetOpcode::G_SDIV &&
"Expected SDIV");
5495 auto &SDiv = cast<GenericMachineInstr>(
MI);
5549 MI.eraseFromParent();
5553 assert(
MI.getOpcode() == TargetOpcode::G_UDIV &&
"Expected UDIV");
5554 auto &UDiv = cast<GenericMachineInstr>(
MI);
5563 MI.eraseFromParent();
5567 assert(
MI.getOpcode() == TargetOpcode::G_UMULH);
5572 auto MatchPow2ExceptOne = [&](
const Constant *
C) {
5573 if (
auto *CI = dyn_cast<ConstantInt>(
C))
5574 return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
5595 MI.eraseFromParent();
5600 unsigned Opc =
MI.getOpcode();
5601 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
5602 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5603 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);
5615 Opc = TargetOpcode::G_FSUB;
5620 Opc = TargetOpcode::G_FADD;
5626 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5627 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5636 MI.setDesc(
B.getTII().get(Opc));
5637 MI.getOperand(1).setReg(
X);
5638 MI.getOperand(2).setReg(
Y);
5645 assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
5648 MatchInfo =
MI.getOperand(2).getReg();
5658 if (LHSCst->Value.isNegZero())
5662 if (LHSCst->Value.isPosZero())
5678 if (
MI.getOpcode() != TargetOpcode::G_FMUL)
5686 MRI.use_instr_nodbg_end()) >
5688 MRI.use_instr_nodbg_end());
5692 bool &AllowFusionGlobally,
5694 bool CanReassociate) {
5696 auto *MF =
MI.getMF();
5697 const auto &TLI = *MF->getSubtarget().getTargetLowering();
5701 if (CanReassociate &&
5708 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
5711 if (!HasFMAD && !HasFMA)
5715 Options.UnsafeFPMath || HasFMAD;
5720 Aggressive = TLI.enableAggressiveFMAFusion(DstType);
5726 assert(
MI.getOpcode() == TargetOpcode::G_FADD);
5728 bool AllowFusionGlobally, HasFMAD,
Aggressive;
5736 unsigned PreferredFusedOpcode =
5737 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5751 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5752 {
LHS.MI->getOperand(1).getReg(),
5753 LHS.MI->getOperand(2).getReg(),
RHS.Reg});
5762 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5763 {
RHS.MI->getOperand(1).getReg(),
5764 RHS.MI->getOperand(2).getReg(),
LHS.Reg});
5774 assert(
MI.getOpcode() == TargetOpcode::G_FADD);
5776 bool AllowFusionGlobally, HasFMAD,
Aggressive;
5780 const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
5787 unsigned PreferredFusedOpcode =
5788 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5802 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
5807 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5808 {FpExtX.getReg(0), FpExtY.getReg(0),
RHS.Reg});
5817 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
5822 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5823 {FpExtX.getReg(0), FpExtY.getReg(0),
LHS.Reg});
5833 assert(
MI.getOpcode() == TargetOpcode::G_FADD);
5835 bool AllowFusionGlobally, HasFMAD,
Aggressive;
5845 unsigned PreferredFusedOpcode =
5846 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5859 if (
LHS.MI->getOpcode() == PreferredFusedOpcode &&
5861 TargetOpcode::G_FMUL) &&
5868 else if (
RHS.MI->getOpcode() == PreferredFusedOpcode &&
5870 TargetOpcode::G_FMUL) &&
5879 Register X = FMA->getOperand(1).getReg();
5880 Register Y = FMA->getOperand(2).getReg();
5886 B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
5887 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5898 assert(
MI.getOpcode() == TargetOpcode::G_FADD);
5900 bool AllowFusionGlobally, HasFMAD,
Aggressive;
5907 const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
5914 unsigned PreferredFusedOpcode =
5915 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5928 Register FpExtU =
B.buildFPExt(DstType, U).getReg(0);
5929 Register FpExtV =
B.buildFPExt(DstType, V).getReg(0);
5931 B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
5933 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
5940 if (
LHS.MI->getOpcode() == PreferredFusedOpcode &&
5944 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
5949 LHS.MI->getOperand(1).getReg(),
5950 LHS.MI->getOperand(2).getReg(),
B);
5961 FMAMI->
getOpcode() == PreferredFusedOpcode) {
5964 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
5969 X =
B.buildFPExt(DstType,
X).getReg(0);
5970 Y =
B.buildFPExt(DstType,
Y).getReg(0);
5981 if (
RHS.MI->getOpcode() == PreferredFusedOpcode &&
5985 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
5990 RHS.MI->getOperand(1).getReg(),
5991 RHS.MI->getOperand(2).getReg(),
B);
6002 FMAMI->
getOpcode() == PreferredFusedOpcode) {
6005 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstType,
6010 X =
B.buildFPExt(DstType,
X).getReg(0);
6011 Y =
B.buildFPExt(DstType,
Y).getReg(0);
6024 assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
6026 bool AllowFusionGlobally, HasFMAD,
Aggressive;
6038 int FirstMulHasFewerUses =
true;
6042 FirstMulHasFewerUses =
false;
6044 unsigned PreferredFusedOpcode =
6045 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6048 if (FirstMulHasFewerUses &&
6053 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6054 {
LHS.MI->getOperand(1).getReg(),
6055 LHS.MI->getOperand(2).getReg(), NegZ});
6064 B.buildFNeg(DstTy,
RHS.MI->getOperand(1).getReg()).
getReg(0);
6065 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6066 {NegY,
RHS.MI->getOperand(2).getReg(),
LHS.Reg});
6076 assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
6078 bool AllowFusionGlobally, HasFMAD,
Aggressive;
6086 unsigned PreferredFusedOpcode =
6087 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6098 Register NegZ =
B.buildFNeg(DstTy, RHSReg).getReg(0);
6099 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6111 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6123 assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
6125 bool AllowFusionGlobally, HasFMAD,
Aggressive;
6133 unsigned PreferredFusedOpcode =
6134 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6146 Register NegZ =
B.buildFNeg(DstTy, RHSReg).getReg(0);
6147 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6148 {FpExtX, FpExtY, NegZ});
6160 Register NegY =
B.buildFNeg(DstTy, FpExtY).getReg(0);
6163 B.buildInstr(PreferredFusedOpcode, {
MI.getOperand(0).getReg()},
6164 {NegY, FpExtZ, LHSReg});
6174 assert(
MI.getOpcode() == TargetOpcode::G_FSUB);
6176 bool AllowFusionGlobally, HasFMAD,
Aggressive;
6180 const auto &TLI = *
MI.getMF()->getSubtarget().getTargetLowering();
6185 unsigned PreferredFusedOpcode =
6186 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6190 Register FpExtX =
B.buildFPExt(DstTy,
X).getReg(0);
6191 Register FpExtY =
B.buildFPExt(DstTy,
Y).getReg(0);
6192 B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
6203 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstTy,
6209 B.buildFNeg(
MI.getOperand(0).getReg(), FMAReg);
6219 TLI.isFPExtFoldable(
MI, PreferredFusedOpcode, DstTy,
6232 unsigned &IdxToPropagate) {
6234 switch (
MI.getOpcode()) {
6237 case TargetOpcode::G_FMINNUM:
6238 case TargetOpcode::G_FMAXNUM:
6239 PropagateNaN =
false;
6241 case TargetOpcode::G_FMINIMUM:
6242 case TargetOpcode::G_FMAXIMUM:
6243 PropagateNaN =
true;
6247 auto MatchNaN = [&](
unsigned Idx) {
6252 IdxToPropagate = PropagateNaN ?
Idx : (
Idx == 1 ? 2 : 1);
6256 return MatchNaN(1) || MatchNaN(2);
6260 assert(
MI.getOpcode() == TargetOpcode::G_ADD &&
"Expected a G_ADD");
6270 Reg == MaybeSameReg;
6305 std::optional<ValueAndVReg> ShiftAmount;
6336 std::optional<ValueAndVReg> ShiftAmt;
6343 return ShiftAmt->Value.getZExtValue() == MatchTy.
getSizeInBits() &&
6347unsigned CombinerHelper::getFPMinMaxOpcForSelect(
6349 SelectPatternNaNBehaviour VsNaNRetVal)
const {
6350 assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE &&
6351 "Expected a NaN behaviour?");
6361 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
6362 return TargetOpcode::G_FMAXNUM;
6363 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
6364 return TargetOpcode::G_FMAXIMUM;
6365 if (
isLegal({TargetOpcode::G_FMAXNUM, {DstTy}}))
6366 return TargetOpcode::G_FMAXNUM;
6367 if (
isLegal({TargetOpcode::G_FMAXIMUM, {DstTy}}))
6368 return TargetOpcode::G_FMAXIMUM;
6374 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
6375 return TargetOpcode::G_FMINNUM;
6376 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
6377 return TargetOpcode::G_FMINIMUM;
6378 if (
isLegal({TargetOpcode::G_FMINNUM, {DstTy}}))
6379 return TargetOpcode::G_FMINNUM;
6380 if (!
isLegal({TargetOpcode::G_FMINIMUM, {DstTy}}))
6382 return TargetOpcode::G_FMINIMUM;
6386CombinerHelper::SelectPatternNaNBehaviour
6388 bool IsOrderedComparison)
const {
6392 if (!LHSSafe && !RHSSafe)
6393 return SelectPatternNaNBehaviour::NOT_APPLICABLE;
6394 if (LHSSafe && RHSSafe)
6395 return SelectPatternNaNBehaviour::RETURNS_ANY;
6398 if (IsOrderedComparison)
6399 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN
6400 : SelectPatternNaNBehaviour::RETURNS_OTHER;
6403 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER
6404 : SelectPatternNaNBehaviour::RETURNS_NAN;
6426 SelectPatternNaNBehaviour ResWithKnownNaNInfo =
6428 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE)
6430 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6433 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN)
6434 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER;
6435 else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER)
6436 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN;
6438 if (TrueVal != CmpLHS || FalseVal != CmpRHS)
6441 unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, ResWithKnownNaNInfo);
6442 if (!Opc || !
isLegal({Opc, {DstTy}}))
6446 if (Opc != TargetOpcode::G_FMAXIMUM && Opc != TargetOpcode::G_FMINIMUM) {
6451 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) {
6453 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero())
6458 B.buildInstr(Opc, {Dst}, {CmpLHS, CmpRHS});
6466 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
6473 Register TrueVal =
MI.getOperand(2).getReg();
6474 Register FalseVal =
MI.getOperand(3).getReg();
6475 return matchFPSelectToMinMax(Dst,
Cond, TrueVal, FalseVal, MatchInfo);
6480 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
6493 if (MatchedSub &&
X != OpLHS)
6501 Y =
X == OpLHS ? OpRHS :
X == OpRHS ? OpLHS :
Register();
6505 B.buildICmp(Pred, Dst,
Y, Zero);
6511 Register ShiftReg =
MI.getOperand(2).getReg();
6513 auto IsShiftTooBig = [&](
const Constant *
C) {
6514 auto *CI = dyn_cast<ConstantInt>(
C);
6521 unsigned LHSOpndIdx = 1;
6522 unsigned RHSOpndIdx = 2;
6523 switch (
MI.getOpcode()) {
6524 case TargetOpcode::G_UADDO:
6525 case TargetOpcode::G_SADDO:
6526 case TargetOpcode::G_UMULO:
6527 case TargetOpcode::G_SMULO:
6541 TargetOpcode::G_CONSTANT_FOLD_BARRIER)
6546 TargetOpcode::G_CONSTANT_FOLD_BARRIER &&
6553 std::optional<FPValueAndVReg> ValAndVReg;
6561 unsigned LHSOpndIdx = 1;
6562 unsigned RHSOpndIdx = 2;
6563 switch (
MI.getOpcode()) {
6564 case TargetOpcode::G_UADDO:
6565 case TargetOpcode::G_SADDO:
6566 case TargetOpcode::G_UMULO:
6567 case TargetOpcode::G_SMULO:
6574 Register LHSReg =
MI.getOperand(LHSOpndIdx).getReg();
6575 Register RHSReg =
MI.getOperand(RHSOpndIdx).getReg();
6576 MI.getOperand(LHSOpndIdx).setReg(RHSReg);
6577 MI.getOperand(RHSOpndIdx).setReg(LHSReg);
6581bool CombinerHelper::isOneOrOneSplat(
Register Src,
bool AllowUndefs) {
6584 return isConstantSplatVector(Src, 1, AllowUndefs);
6586 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Src,
MRI) !=
nullptr)
6589 return IConstant && IConstant->Value == 1;
6594bool CombinerHelper::isZeroOrZeroSplat(
Register Src,
bool AllowUndefs) {
6597 return isConstantSplatVector(Src, 0, AllowUndefs);
6599 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Src,
MRI) !=
nullptr)
6602 return IConstant && IConstant->Value == 0;
6609bool CombinerHelper::isConstantSplatVector(
Register Src, int64_t SplatValue,
6616 for (
unsigned I = 0;
I < NumSources; ++
I) {
6619 if (ImplicitDef && AllowUndefs)
6621 if (ImplicitDef && !AllowUndefs)
6623 std::optional<ValueAndVReg> IConstant =
6625 if (IConstant && IConstant->Value == SplatValue)
6635CombinerHelper::getConstantOrConstantSplatVector(
Register Src) {
6638 return IConstant->Value;
6642 return std::nullopt;
6645 std::optional<APInt>
Value = std::nullopt;
6646 for (
unsigned I = 0;
I < NumSources; ++
I) {
6647 std::optional<ValueAndVReg> IConstant =
6650 return std::nullopt;
6654 return std::nullopt;
6660bool CombinerHelper::isConstantOrConstantVectorI(
Register Src)
const {
6670 for (
unsigned I = 0;
I < NumSources; ++
I) {
6671 std::optional<ValueAndVReg> IConstant =
6680bool CombinerHelper::tryFoldSelectOfConstants(
GSelect *
Select,
6698 std::optional<ValueAndVReg> TrueOpt =
6700 std::optional<ValueAndVReg> FalseOpt =
6703 if (!TrueOpt || !FalseOpt)
6706 APInt TrueValue = TrueOpt->Value;
6707 APInt FalseValue = FalseOpt->Value;
6712 B.setInstrAndDebugLoc(*
Select);
6713 B.buildZExtOrTrunc(Dest,
Cond);
6721 B.setInstrAndDebugLoc(*
Select);
6722 B.buildSExtOrTrunc(Dest,
Cond);
6730 B.setInstrAndDebugLoc(*
Select);
6732 B.buildNot(Inner,
Cond);
6733 B.buildZExtOrTrunc(Dest, Inner);
6741 B.setInstrAndDebugLoc(*
Select);
6743 B.buildNot(Inner,
Cond);
6744 B.buildSExtOrTrunc(Dest, Inner);
6750 if (TrueValue - 1 == FalseValue) {
6752 B.setInstrAndDebugLoc(*
Select);
6754 B.buildZExtOrTrunc(Inner,
Cond);
6755 B.buildAdd(Dest, Inner, False);
6761 if (TrueValue + 1 == FalseValue) {
6763 B.setInstrAndDebugLoc(*
Select);
6765 B.buildSExtOrTrunc(Inner,
Cond);
6766 B.buildAdd(Dest, Inner, False);
6774 B.setInstrAndDebugLoc(*
Select);
6776 B.buildZExtOrTrunc(Inner,
Cond);
6779 auto ShAmtC =
B.buildConstant(ShiftTy, TrueValue.
exactLogBase2());
6780 B.buildShl(Dest, Inner, ShAmtC, Flags);
6787 B.setInstrAndDebugLoc(*
Select);
6789 B.buildSExtOrTrunc(Inner,
Cond);
6790 B.buildOr(Dest, Inner, False, Flags);
6798 B.setInstrAndDebugLoc(*
Select);
6800 B.buildNot(Not,
Cond);
6802 B.buildSExtOrTrunc(Inner, Not);
6803 B.buildOr(Dest, Inner, True, Flags);
6812bool CombinerHelper::tryFoldBoolSelectToLogic(
GSelect *
Select,
6829 if (CondTy != TrueTy)
6834 if ((
Cond == True) || isOneOrOneSplat(True,
true)) {
6836 B.setInstrAndDebugLoc(*
Select);
6838 B.buildZExtOrTrunc(Ext,
Cond);
6839 auto FreezeFalse =
B.buildFreeze(TrueTy, False);
6840 B.buildOr(DstReg, Ext, FreezeFalse, Flags);
6847 if ((
Cond == False) || isZeroOrZeroSplat(False,
true)) {
6849 B.setInstrAndDebugLoc(*
Select);
6851 B.buildZExtOrTrunc(Ext,
Cond);
6852 auto FreezeTrue =
B.buildFreeze(TrueTy, True);
6853 B.buildAnd(DstReg, Ext, FreezeTrue);
6859 if (isOneOrOneSplat(False,
true)) {
6861 B.setInstrAndDebugLoc(*
Select);
6864 B.buildNot(Inner,
Cond);
6867 B.buildZExtOrTrunc(Ext, Inner);
6868 auto FreezeTrue =
B.buildFreeze(TrueTy, True);
6869 B.buildOr(DstReg, Ext, FreezeTrue, Flags);
6875 if (isZeroOrZeroSplat(True,
true)) {
6877 B.setInstrAndDebugLoc(*
Select);
6880 B.buildNot(Inner,
Cond);
6883 B.buildZExtOrTrunc(Ext, Inner);
6884 auto FreezeFalse =
B.buildFreeze(TrueTy, False);
6885 B.buildAnd(DstReg, Ext, FreezeFalse);
6916 Register CmpLHS = Cmp->getLHSReg();
6917 Register CmpRHS = Cmp->getRHSReg();
6920 if (True == CmpRHS && False == CmpLHS) {
6928 if (True != CmpLHS || False != CmpRHS)
6968 if (tryFoldSelectOfConstants(
Select, MatchInfo))
6971 if (tryFoldBoolSelectToLogic(
Select, MatchInfo))
6981bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(
GLogicalBinOp *Logic,
6983 assert(Logic->
getOpcode() != TargetOpcode::G_XOR &&
"unexpected xor");
6984 bool IsAnd = Logic->
getOpcode() == TargetOpcode::G_AND;
6988 unsigned Flags = Logic->
getFlags();
6991 GICmp *Cmp1 = getOpcodeDef<GICmp>(LHS,
MRI);
6996 GICmp *Cmp2 = getOpcodeDef<GICmp>(RHS,
MRI);
7007 std::optional<ValueAndVReg> MaybeC1 =
7011 C1 = MaybeC1->Value;
7013 std::optional<ValueAndVReg> MaybeC2 =
7017 C2 = MaybeC2->Value;
7038 std::optional<APInt> Offset1;
7039 std::optional<APInt> Offset2;
7041 if (
GAdd *
Add = getOpcodeDef<GAdd>(R1,
MRI)) {
7042 std::optional<ValueAndVReg> MaybeOffset1 =
7045 R1 =
Add->getLHSReg();
7046 Offset1 = MaybeOffset1->Value;
7050 std::optional<ValueAndVReg> MaybeOffset2 =
7053 R2 =
Add->getLHSReg();
7054 Offset2 = MaybeOffset2->Value;
7073 bool CreateMask =
false;
7086 if (!LowerDiff.
isPowerOf2() || LowerDiff != UpperDiff ||
7099 CR->getEquivalentICmp(NewPred, NewC,
Offset);
7109 if (CreateMask &&
Offset != 0) {
7110 auto TildeLowerDiff =
B.buildConstant(CmpOperandTy, ~LowerDiff);
7111 auto And =
B.buildAnd(CmpOperandTy, R1, TildeLowerDiff);
7112 auto OffsetC =
B.buildConstant(CmpOperandTy,
Offset);
7113 auto Add =
B.buildAdd(CmpOperandTy,
And, OffsetC, Flags);
7114 auto NewCon =
B.buildConstant(CmpOperandTy, NewC);
7115 auto ICmp =
B.buildICmp(NewPred, CmpTy,
Add, NewCon);
7116 B.buildZExtOrTrunc(DstReg, ICmp);
7117 }
else if (CreateMask &&
Offset == 0) {
7118 auto TildeLowerDiff =
B.buildConstant(CmpOperandTy, ~LowerDiff);
7119 auto And =
B.buildAnd(CmpOperandTy, R1, TildeLowerDiff);
7120 auto NewCon =
B.buildConstant(CmpOperandTy, NewC);
7121 auto ICmp =
B.buildICmp(NewPred, CmpTy,
And, NewCon);
7122 B.buildZExtOrTrunc(DstReg, ICmp);
7123 }
else if (!CreateMask &&
Offset != 0) {
7124 auto OffsetC =
B.buildConstant(CmpOperandTy,
Offset);
7125 auto Add =
B.buildAdd(CmpOperandTy, R1, OffsetC, Flags);
7126 auto NewCon =
B.buildConstant(CmpOperandTy, NewC);
7127 auto ICmp =
B.buildICmp(NewPred, CmpTy,
Add, NewCon);
7128 B.buildZExtOrTrunc(DstReg, ICmp);
7129 }
else if (!CreateMask &&
Offset == 0) {
7130 auto NewCon =
B.buildConstant(CmpOperandTy, NewC);
7131 auto ICmp =
B.buildICmp(NewPred, CmpTy, R1, NewCon);
7132 B.buildZExtOrTrunc(DstReg, ICmp);
7140bool CombinerHelper::tryFoldLogicOfFCmps(
GLogicalBinOp *Logic,
7146 bool IsAnd = Logic->
getOpcode() == TargetOpcode::G_AND;
7149 GFCmp *Cmp1 = getOpcodeDef<GFCmp>(LHS,
MRI);
7154 GFCmp *Cmp2 = getOpcodeDef<GFCmp>(RHS,
MRI);
7164 {TargetOpcode::G_FCMP, {CmpTy, CmpOperandTy}}) ||
7178 if (LHS0 == RHS1 && LHS1 == RHS0) {
7184 if (LHS0 == RHS0 && LHS1 == RHS1) {
7188 unsigned NewPred = IsAnd ? CmpCodeL & CmpCodeR : CmpCodeL | CmpCodeR;
7195 auto False =
B.buildConstant(CmpTy, 0);
7196 B.buildZExtOrTrunc(DestReg, False);
7203 B.buildZExtOrTrunc(DestReg, True);
7205 auto Cmp =
B.buildFCmp(Pred, CmpTy, LHS0, LHS1, Flags);
7206 B.buildZExtOrTrunc(DestReg, Cmp);
7218 if (tryFoldAndOrOrICmpsUsingRanges(
And, MatchInfo))
7221 if (tryFoldLogicOfFCmps(
And, MatchInfo))
7230 if (tryFoldAndOrOrICmpsUsingRanges(
Or, MatchInfo))
7233 if (tryFoldLogicOfFCmps(
Or, MatchInfo))
7247 bool IsSigned =
Add->isSigned();
7256 B.buildUndef(Carry);
7262 if (isConstantOrConstantVectorI(
LHS) && !isConstantOrConstantVectorI(
RHS)) {
7265 B.buildSAddo(Dst, Carry,
RHS,
LHS);
7271 B.buildUAddo(Dst, Carry,
RHS,
LHS);
7276 std::optional<APInt> MaybeLHS = getConstantOrConstantSplatVector(
LHS);
7277 std::optional<APInt> MaybeRHS = getConstantOrConstantSplatVector(
RHS);
7283 APInt Result = IsSigned ? MaybeLHS->sadd_ov(*MaybeRHS, Overflow)
7284 : MaybeLHS->uadd_ov(*MaybeRHS, Overflow);
7286 B.buildConstant(Dst, Result);
7287 B.buildConstant(Carry, Overflow);
7295 B.buildCopy(Dst,
LHS);
7296 B.buildConstant(Carry, 0);
7308 std::optional<APInt> MaybeAddRHS =
7309 getConstantOrConstantSplatVector(AddLHS->
getRHSReg());
7312 APInt NewC = IsSigned ? MaybeAddRHS->sadd_ov(*MaybeRHS, Overflow)
7313 : MaybeAddRHS->uadd_ov(*MaybeRHS, Overflow);
7317 auto ConstRHS =
B.buildConstant(DstTy, NewC);
7318 B.buildSAddo(Dst, Carry, AddLHS->
getLHSReg(), ConstRHS);
7324 auto ConstRHS =
B.buildConstant(DstTy, NewC);
7325 B.buildUAddo(Dst, Carry, AddLHS->
getLHSReg(), ConstRHS);
7350 B.buildConstant(Carry, 0);
7358 B.buildConstant(Carry, 1);
7373 B.buildConstant(Carry, 0);
7389 B.buildConstant(Carry, 0);
7397 B.buildConstant(Carry, 1);
7414 bool OptForSize =
MI.getMF()->getFunction().hasOptSize();
7419 auto [Dst,
Base] =
MI.getFirst2Regs();
7425 MI.removeFromParent();
7437 std::optional<SrcOp> Res;
7439 while (ExpVal > 0) {
7458 MI.eraseFromParent();
7472 if (DstTy == SrcTy) {
7505 if (DstTy == SrcTy) {
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
amdgpu AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo &MRI)
static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally)
Checks if MI is TargetOpcode::G_FMUL and contractable either due to global flags or MachineInstr flag...
static unsigned getIndexedOpc(unsigned LdStOpc)
static APFloat constantFoldFpUnary(const MachineInstr &MI, const MachineRegisterInfo &MRI, const APFloat &Val)
static std::optional< std::pair< GZExtLoad *, int64_t > > matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, const MachineRegisterInfo &MRI)
Helper function for findLoadOffsetsForLoadOrCombine.
static Register peekThroughBitcast(Register Reg, const MachineRegisterInfo &MRI)
static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I)
static cl::opt< bool > ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), cl::desc("Force all indexed operations to be " "legal for the GlobalISel combiner"))
static cl::opt< unsigned > PostIndexUseThreshold("post-index-use-threshold", cl::Hidden, cl::init(32), cl::desc("Number of uses of a base pointer to check before it is no longer " "considered for post-indexing."))
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static unsigned getExtLoadOpcForExtend(unsigned ExtOpc)
static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, int64_t Cst, bool IsVector, bool IsFP)
static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy)
static bool canFoldInAddressingMode(GLoadStore *MI, const TargetLowering &TLI, MachineRegisterInfo &MRI)
Return true if 'MI' is a load or a store that may be fold it's address operand into the load / store ...
static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I)
static Register buildLogBase2(Register V, MachineIRBuilder &MIB)
Determines the LogBase2 value for a non-null input value using the transform: LogBase2(V) = (EltBits ...
This contains common combine transformations that may be used in a combine pass,or by the target else...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Rewrite Partial Register Uses
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
mir Rename Register Operands
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file implements the SmallBitVector class.
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
int32_t exactLogBase2() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
unsigned countl_one() const
Count the number of leading one bits.
APInt multiplicativeInverse() const
bool isMask(unsigned numBits) const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isOne() const
Determine if this is a value of 1.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
AttributeSet getAttributes(unsigned Index) const
The attributes for the specified index are returned.
bool isEquality() const
Determine if this is an equals/not equals predicate.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
void applyUDivByConst(MachineInstr &MI)
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops)
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
bool matchPtrAddZero(MachineInstr &MI)
}
bool matchAllExplicitUsesAreUndef(MachineInstr &MI)
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx)
Delete MI and replace all of its uses with its OpIdx-th operand.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchUDivByConst(MachineInstr &MI)
Combine G_UDIV by constant into a multiply by magic constant.
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI)
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchShiftsTooBig(MachineInstr &MI)
Match shifts greater or equal to the bitwidth of the operation.
bool tryCombineCopy(MachineInstr &MI)
If MI is COPY, try to combine it.
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
bool matchUndefStore(MachineInstr &MI)
Return true if a G_STORE instruction MI is storing an undef value.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchRedundantSExtInReg(MachineInstr &MI)
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine sext of trunc.
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo)
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent)
Match FPOWI if it's safe to extend it into a series of multiplications.
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo)
Do constant FP folding when opportunities are exposed after MIR building.
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI)
void applyCommuteBinOpOperands(MachineInstr &MI)
bool matchBinOpSameVal(MachineInstr &MI)
Optimize (x op x) -> x.
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineCopy(MachineInstr &MI)
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx)
Return true if a G_SELECT instruction MI has a constant comparison.
void eraseInst(MachineInstr &MI)
Erase MI.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src)
Transform G_ADD(x, G_SUB(y, x)) to y.
void applyRotateOutOfRange(MachineInstr &MI)
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchRotateOutOfRange(MachineInstr &MI)
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
void applyCombineShuffleVector(MachineInstr &MI, const ArrayRef< Register > Ops)
Replace MI with a concat_vectors with Ops.
const TargetLowering & getTargetLowering() const
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
void applyPtrAddZero(MachineInstr &MI)
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
void setRegBank(Register Reg, const RegisterBank *RegBank)
Set the register bank of Reg.
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement)
void replaceInstWithConstant(MachineInstr &MI, int64_t C)
Replace an instruction with a G_CONSTANT with value C.
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
Match ashr (shl x, C), C -> sext_inreg (C)
bool tryCombineExtendingLoads(MachineInstr &MI)
If MI is extend that consumes the result of a load, try to combine it.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount)
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo)
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
void applySDivByConst(MachineInstr &MI)
bool matchUndefSelectCmp(MachineInstr &MI)
Return true if a G_SELECT instruction MI has an undef comparison.
void replaceInstWithUndef(MachineInstr &MI)
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantOr(MachineInstr &MI, Register &Replacement)
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is undef.
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void replaceInstWithFConstant(MachineInstr &MI, double C)
Replace an instruction with a G_FCONSTANT with value C.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2)
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
Fold (shift (shift base, x), y) -> (shift base (x+y))
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*MULO x, 0) -> 0 + no carry out.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement)
Delete MI and replace all of its uses with Replacement.
bool matchFunnelShiftToRotate(MachineInstr &MI)
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
Combine inverting a result of a compare into the opposite cond code.
void applyCombineExtOfExt(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is known to be a power of 2.
void applyCombineCopy(MachineInstr &MI)
void applyCombineTruncOfExt(MachineInstr &MI, std::pair< Register, unsigned > &MatchInfo)
bool matchAnyExplicitUseIsUndef(MachineInstr &MI)
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
bool matchSextTruncSextLoad(MachineInstr &MI)
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
MachineInstr * buildSDivUsingMul(MachineInstr &MI)
Given an G_SDIV MI expressing a signed divide by constant, return an expression that implements it by...
void applySDivByPow2(MachineInstr &MI)
void applyFunnelShiftConstantModulo(MachineInstr &MI)
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
bool isPreLegalize() const
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo)
Match (and (load x), mask) -> zextload x.
bool matchConstantOp(const MachineOperand &MOP, int64_t C)
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ands.
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg)
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
bool matchConstantFPOp(const MachineOperand &MOP, double C)
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
Return true if MI is a G_ADD which can be simplified to a G_SUB.
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
Optimize memcpy intrinsics et al, e.g.
bool matchSelectSameVal(MachineInstr &MI)
Optimize (cond ? x : x) -> x.
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst)
Transform fp_instr(cst) to constant result of the fp operation.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo)
Try to reassociate to reassociate operands of a commutative binop.
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool tryEmitMemcpyInline(MachineInstr &MI)
Emit loads and stores that perform the given memcpy.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info)
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData)
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo)
Constant fold G_FMA/G_FMAD.
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent)
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
bool isLegal(const LegalityQuery &Query) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine selects.
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo)
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg)
Transform anyext(trunc(x)) to x.
void applySimplifyURemByPow2(MachineInstr &MI)
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
MachineRegisterInfo & MRI
void applyUMulHToLShr(MachineInstr &MI)
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo)
Match expression trees of the form.
bool matchShuffleToExtract(MachineInstr &MI)
bool matchUndefShuffleVectorMask(MachineInstr &MI)
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
Transform a multiply by a power-of-2 value to a left shift.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo)
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo)
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo)
Fold away a merge of an unmerge of the corresponding values.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI)
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx)
Checks if constant at ConstIdx is larger than MI 's bitwidth.
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelKnownBits *KB=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchCombineTruncOfExt(MachineInstr &MI, std::pair< Register, unsigned > &MatchInfo)
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchDivByPow2(MachineInstr &MI, bool IsSigned)
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchUMulHToLShr(MachineInstr &MI)
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI dominates UseMI.
MachineInstr * buildUDivUsingMul(MachineInstr &MI)
Given an G_UDIV MI expressing a divide by constant, return an expression that implements it by multip...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg)
Transform zext(trunc(x)) to x.
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData)
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext nneg to sext.
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false)
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext of trunc.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
void applyShuffleToExtract(MachineInstr &MI)
MachineDominatorTree * MDT
bool matchSDivByConst(MachineInstr &MI)
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
const RegisterBankInfo * RBI
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
const TargetRegisterInfo * TRI
bool tryCombineShuffleVector(MachineInstr &MI)
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg)
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo)
GISelChangeObserver & Observer
bool matchCombineExtOfExt(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Transform [asz]ext([asz]ext(x)) to [asz]ext x.
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Match sext_inreg(load p), imm -> sextload p.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ors.
void applyFunnelShiftToRotate(MachineInstr &MI)
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine addos.
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg)
Transform PtrToInt(IntToPtr(x)) to x.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal)
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchCommuteConstantToRHS(MachineInstr &MI)
Match constant LHS ops that should be commuted.
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Replace MI with a series of instructions described in MatchInfo.
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
MachineIRBuilder & Builder
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine select to integer min/max.
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (and x, n), k -> ubfx x, pos, width.
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate commutative binary operations like G_ADD.
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo)
Push a binary operator through a select on constants.
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is zero.
bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyUDivByPow2(MachineInstr &MI)
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
void applySextTruncSextLoad(MachineInstr &MI)
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
bool matchCommuteFPConstantToRHS(MachineInstr &MI)
Match constant LHS FP ops that should be commuted.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents a range of values.
std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
const APInt & getLower() const
Return the lower value for this range.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
const APInt & getUpper() const
Return the upper value for this range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents overflowing add operations.
Represents an integer addition.
Represents a logical and.
CmpInst::Predicate getCond() const
Register getLHSReg() const
Register getRHSReg() const
Represents any generic load, including sign/zero extending variants.
Register getDstReg() const
Get the definition register of the loaded value.
Register getLHSReg() const
Register getRHSReg() const
Represents a G_BUILD_VECTOR.
Register getSrcReg() const
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
void finishedChangingAllUsesOfReg()
All instructions reported as changing by changingAllUsesOfReg() have finished being changed.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
virtual void erasingInstr(MachineInstr &MI)=0
An instruction is about to be erased.
void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg)
All the instructions using the given register are being changed.
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
KnownBits getKnownBits(Register R)
APInt getKnownZeroes(Register R)
Simple wrapper observer that takes several observers, and calls each one for each event.
Represents a G_IMPLICIT_DEF.
Represents any type of generic load or store.
Register getPointerReg() const
Get the source register of the pointer value.
Represents a logical binary operation.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
bool isAtomic() const
Returns true if the attached MachineMemOperand has the atomic flag set.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_MERGE_VALUES.
Register getCondReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isByteSized() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
constexpr unsigned getAddressSpace() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index)
Get a pointer to vector element Index located in memory for a vector of type VecTy starting at a base...
bool isLegalOrCustom(const LegalityQuery &Query) const
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
TypeSize getValue() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildCTTZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTTZ Op0, Src0.
MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_UREM Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildNeg(const DstOp &Dst, const SrcOp &Src0)
Build and insert integer negation Zero = G_CONSTANT 0 Res = G_SUB Zero, Op0.
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FDIV Op0, Op1.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FNEG Op0.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_XOR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
MachineInstrBuilder buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0, std::optional< unsigned > Flags=std::nullopt)
Build and insert Dst = G_FCANONICALIZE Src0.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
unsigned getNumOperands() const
Retuns the total number of operands.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
unsigned getAddrSpace() const
const MachinePointerInfo & getPointerInfo() const
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setMBB(MachineBasicBlock *MBB)
void setPredicate(unsigned Predicate)
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
unsigned getPredicate() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
void setRegClassOrRegBank(Register Reg, const RegClassOrRegBank &RCOrRB)
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
bool constrainRegAttrs(Register Reg, Register ConstrainingReg, unsigned MinNumRegs=0)
Constrain the register class or the register bank of the virtual register Reg (and low-level type) to...
iterator_range< use_iterator > use_operands(Register Reg) const
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
size_type size() const
Determine the number of elements in the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool all() const
Returns true if all bits are set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual LLVM_READONLY LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Value(Type *Ty, unsigned scid)
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
operand_type_match m_Reg()
SpecificConstantOrSplatMatch m_SpecificICstOrSplat(int64_t RequestedValue)
Matches a RequestedValue constant or a constant splat of RequestedValue.
BinaryOp_match< LHS, RHS, TargetOpcode::G_BUILD_VECTOR, false > m_GBuildVector(const LHS &L, const RHS &R)
GCstAndRegMatch m_GCst(std::optional< ValueAndVReg > &ValReg)
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
BinaryOp_match< LHS, RHS, TargetOpcode::G_XOR, true > m_GXor(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
UnaryOp_match< SrcTy, TargetOpcode::G_FPEXT > m_GFPExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
UnaryOp_match< SrcTy, TargetOpcode::G_INTTOPTR > m_GIntToPtr(const SrcTy &Src)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_OR, true > m_GOr(const LHS &L, const RHS &R)
ICstOrSplatMatch< APInt > m_ICstOrSplat(APInt &Cst)
ImplicitDefMatch m_GImplicitDef()
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CheckType m_SpecificType(LLT Ty)
BinaryOp_match< LHS, RHS, TargetOpcode::G_FADD, true > m_GFAdd(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_PTRTOINT > m_GPtrToInt(const SrcTy &Src)
BinaryOp_match< LHS, RHS, TargetOpcode::G_FSUB, false > m_GFSub(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
Or< Preds... > m_any_of(Preds &&... preds)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_BITCAST > m_GBitcast(const SrcTy &Src)
BinaryOp_match< LHS, RHS, TargetOpcode::G_BUILD_VECTOR_TRUNC, false > m_GBuildVectorTrunc(const LHS &L, const RHS &R)
bind_ty< MachineInstr * > m_MInstr(MachineInstr *&MI)
UnaryOp_match< SrcTy, TargetOpcode::G_FNEG > m_GFNeg(const SrcTy &Src)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP, true > m_c_GICmp(const Pred &P, const LHS &L, const RHS &R)
G_ICMP matcher that also matches commuted compares.
TernaryOp_match< Src0Ty, Src1Ty, Src2Ty, TargetOpcode::G_INSERT_VECTOR_ELT > m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
GFCstOrSplatGFCstMatch m_GFCstOrSplat(std::optional< FPValueAndVReg > &FPValReg)
And< Preds... > m_all_of(Preds &&... preds)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_ANYEXT > m_GAnyExt(const SrcTy &Src)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
static double log2(double V)
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx)
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
std::function< void(MachineIRBuilder &)> BuildFnTy
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
auto instructionsWithoutDebug(IterT It, IterT End, bool SkipPseudoOp=true)
Construct a range iterator which begins at It and moves forwards until End is reached,...
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
@ Xor
Bitwise or logical XOR of integers.
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
constexpr unsigned BitWidth
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
unsigned getFCmpCode(CmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEdouble() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
Simple struct used to hold a Register value and the instruction which defines it.
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
static std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
bool isUnknown() const
Returns true if we don't know any bits.
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
LegalizeAction Action
The action to take or the final answer.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
const RegisterBank * Bank
Register LogicNonShiftReg
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount