36#include "llvm/IR/IntrinsicsMips.h"
52#define DEBUG_TYPE "mips-isel"
59 cl::desc(
"Expand double precision loads and "
60 "stores to their single precision "
126 for (
const auto &VecTy : VecTys) {
346 if (VT == MVT::Untyped)
347 return Subtarget.
hasDSP() ? &Mips::ACC64DSPRegClass : &Mips::ACC64RegClass;
391 if (Ty == MVT::v4i32 || Ty == MVT::v2i64) {
422 if (Ty != MVT::v8f16) {
449 EVT ResTy =
Op->getValueType(0);
489 switch(
Op.getOpcode()) {
541 int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
543 if (Log2IfPositive <= 0)
547 EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
549 unsigned Log2 = Log2IfPositive;
552 Log2 == ExtendTySize) {
578 APInt SplatValue, SplatUndef;
579 unsigned SplatBitSize;
582 if (!
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
596 N =
N->getOperand(0);
603 APInt SplatValue, SplatUndef;
604 unsigned SplatBitSize;
609 if (BVN->
isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs))
621 return N->getOperand(1) == OfNode;
624 return N->getOperand(0) == OfNode;
641 EVT Ty =
N->getValueType(0);
654 bool IsLittleEndian = !Subtarget.
isLittle();
657 bool IsConstantMask =
false;
664 if (
isVSplat(Op0Op0, Mask, IsLittleEndian)) {
668 if (
isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
669 Mask.getBitWidth() == InvMask.
getBitWidth() && Mask == ~InvMask)
671 else if (
isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
672 Mask.getBitWidth() == InvMask.
getBitWidth() && Mask == ~InvMask)
675 IsConstantMask =
true;
685 if (
isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
686 Mask.getBitWidth() == InvMask.
getBitWidth() && Mask == ~InvMask)
688 else if (
isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
689 Mask.getBitWidth() == InvMask.
getBitWidth() && Mask == ~InvMask)
692 IsConstantMask =
true;
741 if (IsConstantMask) {
742 if (Mask.isAllOnes())
789 while (!WorkStack.
empty()) {
792 if (Val == 0 || Val == 1)
806 if ((Val - Floor).ule(Ceil - Val)) {
854 if ((
C - Floor).ule(Ceil -
C)) {
871 EVT VT =
N->getValueType(0);
875 C->getAPIntValue(), VT, DAG, Subtarget))
887 APInt SplatValue, SplatUndef;
888 unsigned SplatBitSize;
897 !BV->
isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
899 (SplatBitSize != EltSize) ||
904 return DAG.
getNode(Opc,
DL, Ty,
N->getOperand(0),
911 EVT Ty =
N->getValueType(0);
913 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
934 EVT Ty =
N->getValueType(0);
955 EVT ExtendTy = cast<VTSDNode>(Op0Op0->
getOperand(2))->getVT();
958 if (TotalBits == 32 ||
970 if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget.
hasDSPR2()))
980 EVT Ty =
N->getValueType(0);
982 if (((Ty != MVT::v2i16) || !Subtarget.
hasDSPR2()) && (Ty != MVT::v4i8))
989 bool IsV216 = (Ty == MVT::v2i16);
1002 default:
return false;
1007 EVT Ty =
N->getValueType(0);
1009 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
1016 N->getOperand(1),
N->getOperand(2));
1020 EVT Ty =
N->getValueType(0);
1022 if (Ty == MVT::v2i16 || Ty == MVT::v4i8) {
1030 N->getOperand(1),
N->getOperand(2), SetCC.
getOperand(2));
1038 EVT Ty =
N->getValueType(0);
1068 switch (
N->getOpcode()) {
1096 N->printrWithDepth(
dbgs(), &DAG);
dbgs() <<
"\n=> \n";
1107 switch (
MI.getOpcode()) {
1110 case Mips::BPOSGE32_PSEUDO:
1111 return emitBPOSGE32(
MI, BB);
1112 case Mips::SNZ_B_PSEUDO:
1113 return emitMSACBranchPseudo(
MI, BB, Mips::BNZ_B);
1114 case Mips::SNZ_H_PSEUDO:
1115 return emitMSACBranchPseudo(
MI, BB, Mips::BNZ_H);
1116 case Mips::SNZ_W_PSEUDO:
1117 return emitMSACBranchPseudo(
MI, BB, Mips::BNZ_W);
1118 case Mips::SNZ_D_PSEUDO:
1119 return emitMSACBranchPseudo(
MI, BB, Mips::BNZ_D);
1120 case Mips::SNZ_V_PSEUDO:
1121 return emitMSACBranchPseudo(
MI, BB, Mips::BNZ_V);
1122 case Mips::SZ_B_PSEUDO:
1123 return emitMSACBranchPseudo(
MI, BB, Mips::BZ_B);
1124 case Mips::SZ_H_PSEUDO:
1125 return emitMSACBranchPseudo(
MI, BB, Mips::BZ_H);
1126 case Mips::SZ_W_PSEUDO:
1127 return emitMSACBranchPseudo(
MI, BB, Mips::BZ_W);
1128 case Mips::SZ_D_PSEUDO:
1129 return emitMSACBranchPseudo(
MI, BB, Mips::BZ_D);
1130 case Mips::SZ_V_PSEUDO:
1131 return emitMSACBranchPseudo(
MI, BB, Mips::BZ_V);
1132 case Mips::COPY_FW_PSEUDO:
1133 return emitCOPY_FW(
MI, BB);
1134 case Mips::COPY_FD_PSEUDO:
1135 return emitCOPY_FD(
MI, BB);
1136 case Mips::INSERT_FW_PSEUDO:
1137 return emitINSERT_FW(
MI, BB);
1138 case Mips::INSERT_FD_PSEUDO:
1139 return emitINSERT_FD(
MI, BB);
1140 case Mips::INSERT_B_VIDX_PSEUDO:
1141 case Mips::INSERT_B_VIDX64_PSEUDO:
1142 return emitINSERT_DF_VIDX(
MI, BB, 1,
false);
1143 case Mips::INSERT_H_VIDX_PSEUDO:
1144 case Mips::INSERT_H_VIDX64_PSEUDO:
1145 return emitINSERT_DF_VIDX(
MI, BB, 2,
false);
1146 case Mips::INSERT_W_VIDX_PSEUDO:
1147 case Mips::INSERT_W_VIDX64_PSEUDO:
1148 return emitINSERT_DF_VIDX(
MI, BB, 4,
false);
1149 case Mips::INSERT_D_VIDX_PSEUDO:
1150 case Mips::INSERT_D_VIDX64_PSEUDO:
1151 return emitINSERT_DF_VIDX(
MI, BB, 8,
false);
1152 case Mips::INSERT_FW_VIDX_PSEUDO:
1153 case Mips::INSERT_FW_VIDX64_PSEUDO:
1154 return emitINSERT_DF_VIDX(
MI, BB, 4,
true);
1155 case Mips::INSERT_FD_VIDX_PSEUDO:
1156 case Mips::INSERT_FD_VIDX64_PSEUDO:
1157 return emitINSERT_DF_VIDX(
MI, BB, 8,
true);
1158 case Mips::FILL_FW_PSEUDO:
1159 return emitFILL_FW(
MI, BB);
1160 case Mips::FILL_FD_PSEUDO:
1161 return emitFILL_FD(
MI, BB);
1162 case Mips::FEXP2_W_1_PSEUDO:
1163 return emitFEXP2_W_1(
MI, BB);
1164 case Mips::FEXP2_D_1_PSEUDO:
1165 return emitFEXP2_D_1(
MI, BB);
1167 return emitST_F16_PSEUDO(
MI, BB);
1169 return emitLD_F16_PSEUDO(
MI, BB);
1170 case Mips::MSA_FP_EXTEND_W_PSEUDO:
1171 return emitFPEXTEND_PSEUDO(
MI, BB,
false);
1172 case Mips::MSA_FP_ROUND_W_PSEUDO:
1173 return emitFPROUND_PSEUDO(
MI, BB,
false);
1174 case Mips::MSA_FP_EXTEND_D_PSEUDO:
1175 return emitFPEXTEND_PSEUDO(
MI, BB,
true);
1176 case Mips::MSA_FP_ROUND_D_PSEUDO:
1177 return emitFPROUND_PSEUDO(
MI, BB,
true);
1181bool MipsSETargetLowering::isEligibleForTailCallOptimization(
1182 const CCState &CCInfo,
unsigned NextStackOffset,
1200void MipsSETargetLowering::
1202 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
1203 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
1204 bool IsCallReloc, CallLoweringInfo &CLI,
SDValue Callee,
1208 InternalLinkage, IsCallReloc, CLI, Callee,
1221 EVT PtrVT =
Ptr.getValueType();
1250 EVT PtrVT =
Ptr.getValueType();
1273 MVT Src =
Op.getOperand(0).getValueType().getSimpleVT();
1274 MVT Dest =
Op.getValueType().getSimpleVT();
1277 if (Src == MVT::i64 && Dest == MVT::f64) {
1285 if (Src == MVT::f64 && Dest == MVT::i64) {
1305 bool HasLo,
bool HasHi,
1310 EVT Ty =
Op.getOperand(0).getValueType();
1313 Op.getOperand(0),
Op.getOperand(1));
1321 if (!HasLo || !HasHi)
1322 return HasLo ?
Lo :
Hi;
1330 std::tie(InLo, InHi) = DAG.
SplitScalar(In,
DL, MVT::i32, MVT::i32);
1354 bool HasChainIn =
Op->getOperand(0).getValueType() == MVT::Other;
1366 SDValue Opnd =
Op->getOperand(++OpNo), In64;
1384 for (
EVT Ty :
Op->values())
1385 ResTys.
push_back((Ty == MVT::i64) ? MVT::Untyped : Ty);
1404 EVT ResTy =
Op->getValueType(0);
1414 EVT ResVecTy =
Op->getValueType(0);
1415 EVT ViaVecTy = ResVecTy;
1425 if (ResVecTy == MVT::v2i64) {
1428 if (isa<ConstantSDNode>(LaneA))
1436 ViaVecTy = MVT::v4i32;
1442 SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
1443 LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
1448 if (ViaVecTy != ResVecTy) {
1458 bool IsSigned =
false) {
1459 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
1461 APInt(
Op->getValueType(0).getScalarType().getSizeInBits(),
1462 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
1468 EVT ViaVecTy = VecTy;
1469 SDValue SplatValueA = SplatValue;
1470 SDValue SplatValueB = SplatValue;
1473 if (VecTy == MVT::v2i64) {
1475 ViaVecTy = MVT::v4i32;
1488 SDValue Ops[16] = { SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1489 SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1490 SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1491 SplatValueA, SplatValueB, SplatValueA, SplatValueB };
1496 if (VecTy != ViaVecTy)
1505 EVT VecTy =
Op->getValueType(0);
1511 if (VecTy == MVT::v2i64) {
1513 APInt BitImm =
APInt(64, 1) << CImm->getAPIntValue();
1525 {BitImmLoOp, BitImmHiOp, BitImmLoOp, BitImmHiOp}));
1534 if (VecTy == MVT::v2i64)
1543 return DAG.
getNode(Opc,
DL, VecTy,
Op->getOperand(1), Exp2Imm);
1548 EVT ResTy =
Op->getValueType(0);
1551 MVT ResEltTy = ResTy == MVT::v2i64 ? MVT::i64 : MVT::i32;
1560 EVT ResTy =
Op->getValueType(0);
1571 EVT ResTy =
Op->getValueType(0);
1573 <<
Op->getConstantOperandAPInt(2);
1582 unsigned Intrinsic =
Op->getConstantOperandVal(0);
1583 switch (Intrinsic) {
1586 case Intrinsic::mips_shilo:
1588 case Intrinsic::mips_dpau_h_qbl:
1590 case Intrinsic::mips_dpau_h_qbr:
1592 case Intrinsic::mips_dpsu_h_qbl:
1594 case Intrinsic::mips_dpsu_h_qbr:
1596 case Intrinsic::mips_dpa_w_ph:
1598 case Intrinsic::mips_dps_w_ph:
1600 case Intrinsic::mips_dpax_w_ph:
1602 case Intrinsic::mips_dpsx_w_ph:
1604 case Intrinsic::mips_mulsa_w_ph:
1606 case Intrinsic::mips_mult:
1608 case Intrinsic::mips_multu:
1610 case Intrinsic::mips_madd:
1612 case Intrinsic::mips_maddu:
1614 case Intrinsic::mips_msub:
1616 case Intrinsic::mips_msubu:
1618 case Intrinsic::mips_addv_b:
1619 case Intrinsic::mips_addv_h:
1620 case Intrinsic::mips_addv_w:
1621 case Intrinsic::mips_addv_d:
1624 case Intrinsic::mips_addvi_b:
1625 case Intrinsic::mips_addvi_h:
1626 case Intrinsic::mips_addvi_w:
1627 case Intrinsic::mips_addvi_d:
1630 case Intrinsic::mips_and_v:
1633 case Intrinsic::mips_andi_b:
1636 case Intrinsic::mips_bclr_b:
1637 case Intrinsic::mips_bclr_h:
1638 case Intrinsic::mips_bclr_w:
1639 case Intrinsic::mips_bclr_d:
1641 case Intrinsic::mips_bclri_b:
1642 case Intrinsic::mips_bclri_h:
1643 case Intrinsic::mips_bclri_w:
1644 case Intrinsic::mips_bclri_d:
1646 case Intrinsic::mips_binsli_b:
1647 case Intrinsic::mips_binsli_h:
1648 case Intrinsic::mips_binsli_w:
1649 case Intrinsic::mips_binsli_d: {
1651 EVT VecTy =
Op->getValueType(0);
1656 Op->getConstantOperandVal(3) + 1);
1659 Op->getOperand(2),
Op->getOperand(1));
1661 case Intrinsic::mips_binsri_b:
1662 case Intrinsic::mips_binsri_h:
1663 case Intrinsic::mips_binsri_w:
1664 case Intrinsic::mips_binsri_d: {
1666 EVT VecTy =
Op->getValueType(0);
1671 Op->getConstantOperandVal(3) + 1);
1674 Op->getOperand(2),
Op->getOperand(1));
1676 case Intrinsic::mips_bmnz_v:
1678 Op->getOperand(2),
Op->getOperand(1));
1679 case Intrinsic::mips_bmnzi_b:
1683 case Intrinsic::mips_bmz_v:
1685 Op->getOperand(1),
Op->getOperand(2));
1686 case Intrinsic::mips_bmzi_b:
1690 case Intrinsic::mips_bneg_b:
1691 case Intrinsic::mips_bneg_h:
1692 case Intrinsic::mips_bneg_w:
1693 case Intrinsic::mips_bneg_d: {
1694 EVT VecTy =
Op->getValueType(0);
1701 case Intrinsic::mips_bnegi_b:
1702 case Intrinsic::mips_bnegi_h:
1703 case Intrinsic::mips_bnegi_w:
1704 case Intrinsic::mips_bnegi_d:
1707 case Intrinsic::mips_bnz_b:
1708 case Intrinsic::mips_bnz_h:
1709 case Intrinsic::mips_bnz_w:
1710 case Intrinsic::mips_bnz_d:
1713 case Intrinsic::mips_bnz_v:
1716 case Intrinsic::mips_bsel_v:
1719 Op->getOperand(1),
Op->getOperand(3),
1721 case Intrinsic::mips_bseli_b:
1726 case Intrinsic::mips_bset_b:
1727 case Intrinsic::mips_bset_h:
1728 case Intrinsic::mips_bset_w:
1729 case Intrinsic::mips_bset_d: {
1730 EVT VecTy =
Op->getValueType(0);
1737 case Intrinsic::mips_bseti_b:
1738 case Intrinsic::mips_bseti_h:
1739 case Intrinsic::mips_bseti_w:
1740 case Intrinsic::mips_bseti_d:
1743 case Intrinsic::mips_bz_b:
1744 case Intrinsic::mips_bz_h:
1745 case Intrinsic::mips_bz_w:
1746 case Intrinsic::mips_bz_d:
1749 case Intrinsic::mips_bz_v:
1752 case Intrinsic::mips_ceq_b:
1753 case Intrinsic::mips_ceq_h:
1754 case Intrinsic::mips_ceq_w:
1755 case Intrinsic::mips_ceq_d:
1758 case Intrinsic::mips_ceqi_b:
1759 case Intrinsic::mips_ceqi_h:
1760 case Intrinsic::mips_ceqi_w:
1761 case Intrinsic::mips_ceqi_d:
1764 case Intrinsic::mips_cle_s_b:
1765 case Intrinsic::mips_cle_s_h:
1766 case Intrinsic::mips_cle_s_w:
1767 case Intrinsic::mips_cle_s_d:
1770 case Intrinsic::mips_clei_s_b:
1771 case Intrinsic::mips_clei_s_h:
1772 case Intrinsic::mips_clei_s_w:
1773 case Intrinsic::mips_clei_s_d:
1776 case Intrinsic::mips_cle_u_b:
1777 case Intrinsic::mips_cle_u_h:
1778 case Intrinsic::mips_cle_u_w:
1779 case Intrinsic::mips_cle_u_d:
1782 case Intrinsic::mips_clei_u_b:
1783 case Intrinsic::mips_clei_u_h:
1784 case Intrinsic::mips_clei_u_w:
1785 case Intrinsic::mips_clei_u_d:
1788 case Intrinsic::mips_clt_s_b:
1789 case Intrinsic::mips_clt_s_h:
1790 case Intrinsic::mips_clt_s_w:
1791 case Intrinsic::mips_clt_s_d:
1794 case Intrinsic::mips_clti_s_b:
1795 case Intrinsic::mips_clti_s_h:
1796 case Intrinsic::mips_clti_s_w:
1797 case Intrinsic::mips_clti_s_d:
1800 case Intrinsic::mips_clt_u_b:
1801 case Intrinsic::mips_clt_u_h:
1802 case Intrinsic::mips_clt_u_w:
1803 case Intrinsic::mips_clt_u_d:
1806 case Intrinsic::mips_clti_u_b:
1807 case Intrinsic::mips_clti_u_h:
1808 case Intrinsic::mips_clti_u_w:
1809 case Intrinsic::mips_clti_u_d:
1812 case Intrinsic::mips_copy_s_b:
1813 case Intrinsic::mips_copy_s_h:
1814 case Intrinsic::mips_copy_s_w:
1816 case Intrinsic::mips_copy_s_d:
1824 Op->getValueType(0),
Op->getOperand(1),
1827 case Intrinsic::mips_copy_u_b:
1828 case Intrinsic::mips_copy_u_h:
1829 case Intrinsic::mips_copy_u_w:
1831 case Intrinsic::mips_copy_u_d:
1842 Op->getValueType(0),
Op->getOperand(1),
1845 case Intrinsic::mips_div_s_b:
1846 case Intrinsic::mips_div_s_h:
1847 case Intrinsic::mips_div_s_w:
1848 case Intrinsic::mips_div_s_d:
1851 case Intrinsic::mips_div_u_b:
1852 case Intrinsic::mips_div_u_h:
1853 case Intrinsic::mips_div_u_w:
1854 case Intrinsic::mips_div_u_d:
1857 case Intrinsic::mips_fadd_w:
1858 case Intrinsic::mips_fadd_d:
1863 case Intrinsic::mips_fceq_w:
1864 case Intrinsic::mips_fceq_d:
1867 case Intrinsic::mips_fcle_w:
1868 case Intrinsic::mips_fcle_d:
1871 case Intrinsic::mips_fclt_w:
1872 case Intrinsic::mips_fclt_d:
1875 case Intrinsic::mips_fcne_w:
1876 case Intrinsic::mips_fcne_d:
1879 case Intrinsic::mips_fcor_w:
1880 case Intrinsic::mips_fcor_d:
1883 case Intrinsic::mips_fcueq_w:
1884 case Intrinsic::mips_fcueq_d:
1887 case Intrinsic::mips_fcule_w:
1888 case Intrinsic::mips_fcule_d:
1891 case Intrinsic::mips_fcult_w:
1892 case Intrinsic::mips_fcult_d:
1895 case Intrinsic::mips_fcun_w:
1896 case Intrinsic::mips_fcun_d:
1899 case Intrinsic::mips_fcune_w:
1900 case Intrinsic::mips_fcune_d:
1903 case Intrinsic::mips_fdiv_w:
1904 case Intrinsic::mips_fdiv_d:
1908 case Intrinsic::mips_ffint_u_w:
1909 case Intrinsic::mips_ffint_u_d:
1912 case Intrinsic::mips_ffint_s_w:
1913 case Intrinsic::mips_ffint_s_d:
1916 case Intrinsic::mips_fill_b:
1917 case Intrinsic::mips_fill_h:
1918 case Intrinsic::mips_fill_w:
1919 case Intrinsic::mips_fill_d: {
1920 EVT ResTy =
Op->getValueType(0);
1928 case Intrinsic::mips_fexp2_w:
1929 case Intrinsic::mips_fexp2_d: {
1931 EVT ResTy =
Op->getValueType(0);
1936 case Intrinsic::mips_flog2_w:
1937 case Intrinsic::mips_flog2_d:
1939 case Intrinsic::mips_fmadd_w:
1940 case Intrinsic::mips_fmadd_d:
1942 Op->getOperand(1),
Op->getOperand(2),
Op->getOperand(3));
1943 case Intrinsic::mips_fmul_w:
1944 case Intrinsic::mips_fmul_d:
1948 case Intrinsic::mips_fmsub_w:
1949 case Intrinsic::mips_fmsub_d: {
1952 Op->getOperand(1),
Op->getOperand(2),
Op->getOperand(3));
1954 case Intrinsic::mips_frint_w:
1955 case Intrinsic::mips_frint_d:
1957 case Intrinsic::mips_fsqrt_w:
1958 case Intrinsic::mips_fsqrt_d:
1960 case Intrinsic::mips_fsub_w:
1961 case Intrinsic::mips_fsub_d:
1965 case Intrinsic::mips_ftrunc_u_w:
1966 case Intrinsic::mips_ftrunc_u_d:
1969 case Intrinsic::mips_ftrunc_s_w:
1970 case Intrinsic::mips_ftrunc_s_d:
1973 case Intrinsic::mips_ilvev_b:
1974 case Intrinsic::mips_ilvev_h:
1975 case Intrinsic::mips_ilvev_w:
1976 case Intrinsic::mips_ilvev_d:
1978 Op->getOperand(1),
Op->getOperand(2));
1979 case Intrinsic::mips_ilvl_b:
1980 case Intrinsic::mips_ilvl_h:
1981 case Intrinsic::mips_ilvl_w:
1982 case Intrinsic::mips_ilvl_d:
1984 Op->getOperand(1),
Op->getOperand(2));
1985 case Intrinsic::mips_ilvod_b:
1986 case Intrinsic::mips_ilvod_h:
1987 case Intrinsic::mips_ilvod_w:
1988 case Intrinsic::mips_ilvod_d:
1990 Op->getOperand(1),
Op->getOperand(2));
1991 case Intrinsic::mips_ilvr_b:
1992 case Intrinsic::mips_ilvr_h:
1993 case Intrinsic::mips_ilvr_w:
1994 case Intrinsic::mips_ilvr_d:
1996 Op->getOperand(1),
Op->getOperand(2));
1997 case Intrinsic::mips_insert_b:
1998 case Intrinsic::mips_insert_h:
1999 case Intrinsic::mips_insert_w:
2000 case Intrinsic::mips_insert_d:
2002 Op->getOperand(1),
Op->getOperand(3),
Op->getOperand(2));
2003 case Intrinsic::mips_insve_b:
2004 case Intrinsic::mips_insve_h:
2005 case Intrinsic::mips_insve_w:
2006 case Intrinsic::mips_insve_d: {
2009 switch (Intrinsic) {
2010 case Intrinsic::mips_insve_b:
Max = 15;
break;
2011 case Intrinsic::mips_insve_h:
Max = 7;
break;
2012 case Intrinsic::mips_insve_w:
Max = 3;
break;
2013 case Intrinsic::mips_insve_d:
Max = 1;
break;
2016 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(2))->getSExtValue();
2017 if (Value < 0 || Value > Max)
2020 Op->getOperand(1),
Op->getOperand(2),
Op->getOperand(3),
2023 case Intrinsic::mips_ldi_b:
2024 case Intrinsic::mips_ldi_h:
2025 case Intrinsic::mips_ldi_w:
2026 case Intrinsic::mips_ldi_d:
2028 case Intrinsic::mips_lsa:
2029 case Intrinsic::mips_dlsa: {
2030 EVT ResTy =
Op->getValueType(0);
2033 Op->getOperand(2),
Op->getOperand(3)));
2035 case Intrinsic::mips_maddv_b:
2036 case Intrinsic::mips_maddv_h:
2037 case Intrinsic::mips_maddv_w:
2038 case Intrinsic::mips_maddv_d: {
2039 EVT ResTy =
Op->getValueType(0);
2042 Op->getOperand(2),
Op->getOperand(3)));
2044 case Intrinsic::mips_max_s_b:
2045 case Intrinsic::mips_max_s_h:
2046 case Intrinsic::mips_max_s_w:
2047 case Intrinsic::mips_max_s_d:
2049 Op->getOperand(1),
Op->getOperand(2));
2050 case Intrinsic::mips_max_u_b:
2051 case Intrinsic::mips_max_u_h:
2052 case Intrinsic::mips_max_u_w:
2053 case Intrinsic::mips_max_u_d:
2055 Op->getOperand(1),
Op->getOperand(2));
2056 case Intrinsic::mips_maxi_s_b:
2057 case Intrinsic::mips_maxi_s_h:
2058 case Intrinsic::mips_maxi_s_w:
2059 case Intrinsic::mips_maxi_s_d:
2062 case Intrinsic::mips_maxi_u_b:
2063 case Intrinsic::mips_maxi_u_h:
2064 case Intrinsic::mips_maxi_u_w:
2065 case Intrinsic::mips_maxi_u_d:
2068 case Intrinsic::mips_min_s_b:
2069 case Intrinsic::mips_min_s_h:
2070 case Intrinsic::mips_min_s_w:
2071 case Intrinsic::mips_min_s_d:
2073 Op->getOperand(1),
Op->getOperand(2));
2074 case Intrinsic::mips_min_u_b:
2075 case Intrinsic::mips_min_u_h:
2076 case Intrinsic::mips_min_u_w:
2077 case Intrinsic::mips_min_u_d:
2079 Op->getOperand(1),
Op->getOperand(2));
2080 case Intrinsic::mips_mini_s_b:
2081 case Intrinsic::mips_mini_s_h:
2082 case Intrinsic::mips_mini_s_w:
2083 case Intrinsic::mips_mini_s_d:
2086 case Intrinsic::mips_mini_u_b:
2087 case Intrinsic::mips_mini_u_h:
2088 case Intrinsic::mips_mini_u_w:
2089 case Intrinsic::mips_mini_u_d:
2092 case Intrinsic::mips_mod_s_b:
2093 case Intrinsic::mips_mod_s_h:
2094 case Intrinsic::mips_mod_s_w:
2095 case Intrinsic::mips_mod_s_d:
2098 case Intrinsic::mips_mod_u_b:
2099 case Intrinsic::mips_mod_u_h:
2100 case Intrinsic::mips_mod_u_w:
2101 case Intrinsic::mips_mod_u_d:
2104 case Intrinsic::mips_mulv_b:
2105 case Intrinsic::mips_mulv_h:
2106 case Intrinsic::mips_mulv_w:
2107 case Intrinsic::mips_mulv_d:
2110 case Intrinsic::mips_msubv_b:
2111 case Intrinsic::mips_msubv_h:
2112 case Intrinsic::mips_msubv_w:
2113 case Intrinsic::mips_msubv_d: {
2114 EVT ResTy =
Op->getValueType(0);
2117 Op->getOperand(2),
Op->getOperand(3)));
2119 case Intrinsic::mips_nlzc_b:
2120 case Intrinsic::mips_nlzc_h:
2121 case Intrinsic::mips_nlzc_w:
2122 case Intrinsic::mips_nlzc_d:
2124 case Intrinsic::mips_nor_v: {
2126 Op->getOperand(1),
Op->getOperand(2));
2129 case Intrinsic::mips_nori_b: {
2135 case Intrinsic::mips_or_v:
2138 case Intrinsic::mips_ori_b:
2141 case Intrinsic::mips_pckev_b:
2142 case Intrinsic::mips_pckev_h:
2143 case Intrinsic::mips_pckev_w:
2144 case Intrinsic::mips_pckev_d:
2146 Op->getOperand(1),
Op->getOperand(2));
2147 case Intrinsic::mips_pckod_b:
2148 case Intrinsic::mips_pckod_h:
2149 case Intrinsic::mips_pckod_w:
2150 case Intrinsic::mips_pckod_d:
2152 Op->getOperand(1),
Op->getOperand(2));
2153 case Intrinsic::mips_pcnt_b:
2154 case Intrinsic::mips_pcnt_h:
2155 case Intrinsic::mips_pcnt_w:
2156 case Intrinsic::mips_pcnt_d:
2158 case Intrinsic::mips_sat_s_b:
2159 case Intrinsic::mips_sat_s_h:
2160 case Intrinsic::mips_sat_s_w:
2161 case Intrinsic::mips_sat_s_d:
2162 case Intrinsic::mips_sat_u_b:
2163 case Intrinsic::mips_sat_u_h:
2164 case Intrinsic::mips_sat_u_w:
2165 case Intrinsic::mips_sat_u_d: {
2168 switch (Intrinsic) {
2169 case Intrinsic::mips_sat_s_b:
2170 case Intrinsic::mips_sat_u_b:
Max = 7;
break;
2171 case Intrinsic::mips_sat_s_h:
2172 case Intrinsic::mips_sat_u_h:
Max = 15;
break;
2173 case Intrinsic::mips_sat_s_w:
2174 case Intrinsic::mips_sat_u_w:
Max = 31;
break;
2175 case Intrinsic::mips_sat_s_d:
2176 case Intrinsic::mips_sat_u_d:
Max = 63;
break;
2179 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(2))->getSExtValue();
2180 if (Value < 0 || Value > Max)
2184 case Intrinsic::mips_shf_b:
2185 case Intrinsic::mips_shf_h:
2186 case Intrinsic::mips_shf_w: {
2187 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(2))->getSExtValue();
2188 if (Value < 0 || Value > 255)
2191 Op->getOperand(2),
Op->getOperand(1));
2193 case Intrinsic::mips_sldi_b:
2194 case Intrinsic::mips_sldi_h:
2195 case Intrinsic::mips_sldi_w:
2196 case Intrinsic::mips_sldi_d: {
2199 switch (Intrinsic) {
2200 case Intrinsic::mips_sldi_b:
Max = 15;
break;
2201 case Intrinsic::mips_sldi_h:
Max = 7;
break;
2202 case Intrinsic::mips_sldi_w:
Max = 3;
break;
2203 case Intrinsic::mips_sldi_d:
Max = 1;
break;
2206 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(3))->getSExtValue();
2207 if (Value < 0 || Value > Max)
2211 case Intrinsic::mips_sll_b:
2212 case Intrinsic::mips_sll_h:
2213 case Intrinsic::mips_sll_w:
2214 case Intrinsic::mips_sll_d:
2217 case Intrinsic::mips_slli_b:
2218 case Intrinsic::mips_slli_h:
2219 case Intrinsic::mips_slli_w:
2220 case Intrinsic::mips_slli_d:
2223 case Intrinsic::mips_splat_b:
2224 case Intrinsic::mips_splat_h:
2225 case Intrinsic::mips_splat_w:
2226 case Intrinsic::mips_splat_d:
2234 case Intrinsic::mips_splati_b:
2235 case Intrinsic::mips_splati_h:
2236 case Intrinsic::mips_splati_w:
2237 case Intrinsic::mips_splati_d:
2241 case Intrinsic::mips_sra_b:
2242 case Intrinsic::mips_sra_h:
2243 case Intrinsic::mips_sra_w:
2244 case Intrinsic::mips_sra_d:
2247 case Intrinsic::mips_srai_b:
2248 case Intrinsic::mips_srai_h:
2249 case Intrinsic::mips_srai_w:
2250 case Intrinsic::mips_srai_d:
2253 case Intrinsic::mips_srari_b:
2254 case Intrinsic::mips_srari_h:
2255 case Intrinsic::mips_srari_w:
2256 case Intrinsic::mips_srari_d: {
2259 switch (Intrinsic) {
2260 case Intrinsic::mips_srari_b:
Max = 7;
break;
2261 case Intrinsic::mips_srari_h:
Max = 15;
break;
2262 case Intrinsic::mips_srari_w:
Max = 31;
break;
2263 case Intrinsic::mips_srari_d:
Max = 63;
break;
2266 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(2))->getSExtValue();
2267 if (Value < 0 || Value > Max)
2271 case Intrinsic::mips_srl_b:
2272 case Intrinsic::mips_srl_h:
2273 case Intrinsic::mips_srl_w:
2274 case Intrinsic::mips_srl_d:
2277 case Intrinsic::mips_srli_b:
2278 case Intrinsic::mips_srli_h:
2279 case Intrinsic::mips_srli_w:
2280 case Intrinsic::mips_srli_d:
2283 case Intrinsic::mips_srlri_b:
2284 case Intrinsic::mips_srlri_h:
2285 case Intrinsic::mips_srlri_w:
2286 case Intrinsic::mips_srlri_d: {
2289 switch (Intrinsic) {
2290 case Intrinsic::mips_srlri_b:
Max = 7;
break;
2291 case Intrinsic::mips_srlri_h:
Max = 15;
break;
2292 case Intrinsic::mips_srlri_w:
Max = 31;
break;
2293 case Intrinsic::mips_srlri_d:
Max = 63;
break;
2296 int64_t
Value = cast<ConstantSDNode>(
Op->getOperand(2))->getSExtValue();
2297 if (Value < 0 || Value > Max)
2301 case Intrinsic::mips_subv_b:
2302 case Intrinsic::mips_subv_h:
2303 case Intrinsic::mips_subv_w:
2304 case Intrinsic::mips_subv_d:
2307 case Intrinsic::mips_subvi_b:
2308 case Intrinsic::mips_subvi_h:
2309 case Intrinsic::mips_subvi_w:
2310 case Intrinsic::mips_subvi_d:
2313 case Intrinsic::mips_vshf_b:
2314 case Intrinsic::mips_vshf_h:
2315 case Intrinsic::mips_vshf_w:
2316 case Intrinsic::mips_vshf_d:
2318 Op->getOperand(1),
Op->getOperand(2),
Op->getOperand(3));
2319 case Intrinsic::mips_xor_v:
2322 case Intrinsic::mips_xori_b:
2325 case Intrinsic::thread_pointer: {
2338 EVT ResTy =
Op->getValueType(0);
2354 unsigned Intr =
Op->getConstantOperandVal(1);
2358 case Intrinsic::mips_extp:
2360 case Intrinsic::mips_extpdp:
2362 case Intrinsic::mips_extr_w:
2364 case Intrinsic::mips_extr_r_w:
2366 case Intrinsic::mips_extr_rs_w:
2368 case Intrinsic::mips_extr_s_h:
2370 case Intrinsic::mips_mthlip:
2372 case Intrinsic::mips_mulsaq_s_w_ph:
2374 case Intrinsic::mips_maq_s_w_phl:
2376 case Intrinsic::mips_maq_s_w_phr:
2378 case Intrinsic::mips_maq_sa_w_phl:
2380 case Intrinsic::mips_maq_sa_w_phr:
2382 case Intrinsic::mips_dpaq_s_w_ph:
2384 case Intrinsic::mips_dpsq_s_w_ph:
2386 case Intrinsic::mips_dpaq_sa_l_w:
2388 case Intrinsic::mips_dpsq_sa_l_w:
2390 case Intrinsic::mips_dpaqx_s_w_ph:
2392 case Intrinsic::mips_dpaqx_sa_w_ph:
2394 case Intrinsic::mips_dpsqx_s_w_ph:
2396 case Intrinsic::mips_dpsqx_sa_w_ph:
2398 case Intrinsic::mips_ld_b:
2399 case Intrinsic::mips_ld_h:
2400 case Intrinsic::mips_ld_w:
2401 case Intrinsic::mips_ld_d:
2429 unsigned Intr =
Op->getConstantOperandVal(1);
2433 case Intrinsic::mips_st_b:
2434 case Intrinsic::mips_st_h:
2435 case Intrinsic::mips_st_w:
2436 case Intrinsic::mips_st_d:
2451 EVT ResTy =
Op->getValueType(0);
2471 if (isa<ConstantSDNode>(
Op))
2473 if (isa<ConstantFPSDNode>(
Op))
2501 EVT ResTy =
Op->getValueType(0);
2503 APInt SplatValue, SplatUndef;
2504 unsigned SplatBitSize;
2510 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2514 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
2526 switch (SplatBitSize) {
2530 ViaVecTy = MVT::v16i8;
2533 ViaVecTy = MVT::v8i16;
2536 ViaVecTy = MVT::v4i32;
2547 if (ViaVecTy != ResTy)
2557 EVT ResTy =
Node->getValueType(0);
2563 for (
unsigned i = 0; i < NumElts; ++i) {
2565 Node->getOperand(i),
2595 int SHFIndices[4] = { -1, -1, -1, -1 };
2597 if (Indices.
size() < 4)
2600 for (
unsigned i = 0; i < 4; ++i) {
2601 for (
unsigned j = i; j < Indices.
size(); j += 4) {
2602 int Idx = Indices[j];
2608 if (Idx < 0 || Idx >= 4)
2614 if (SHFIndices[i] == -1)
2615 SHFIndices[i] =
Idx;
2619 if (!(
Idx == -1 ||
Idx == SHFIndices[i]))
2626 for (
int i = 3; i >= 0; --i) {
2627 int Idx = SHFIndices[i];
2644template <
typename ValType>
2647 unsigned CheckStride,
2649 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
2653 if (*
I != -1 && *
I != ExpectedIndex)
2655 ExpectedIndex += ExpectedIndexStride;
2659 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
2678 int SplatIndex = -1;
2679 for (
const auto &V : Indices) {
2686 return fitsRegularPattern<int>(Indices.
begin(), 1, Indices.
end(), SplatIndex,
2712 const auto &Begin = Indices.
begin();
2713 const auto &
End = Indices.
end();
2717 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
2718 Wt =
Op->getOperand(0);
2719 else if (fitsRegularPattern<int>(Begin, 2,
End, Indices.
size(), 2))
2720 Wt =
Op->getOperand(1);
2726 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
2727 Ws =
Op->getOperand(0);
2728 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Indices.
size(), 2))
2729 Ws =
Op->getOperand(1);
2758 const auto &Begin = Indices.
begin();
2759 const auto &
End = Indices.
end();
2763 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
2764 Wt =
Op->getOperand(0);
2765 else if (fitsRegularPattern<int>(Begin, 2,
End, Indices.
size() + 1, 2))
2766 Wt =
Op->getOperand(1);
2772 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
2773 Ws =
Op->getOperand(0);
2774 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Indices.
size() + 1, 2))
2775 Ws =
Op->getOperand(1);
2805 const auto &Begin = Indices.
begin();
2806 const auto &
End = Indices.
end();
2810 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
2811 Wt =
Op->getOperand(0);
2812 else if (fitsRegularPattern<int>(Begin, 2,
End, Indices.
size(), 1))
2813 Wt =
Op->getOperand(1);
2819 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
2820 Ws =
Op->getOperand(0);
2821 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Indices.
size(), 1))
2822 Ws =
Op->getOperand(1);
2850 unsigned HalfSize = Indices.
size() / 2;
2853 const auto &Begin = Indices.
begin();
2854 const auto &
End = Indices.
end();
2858 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
2859 Wt =
Op->getOperand(0);
2860 else if (fitsRegularPattern<int>(Begin, 2,
End, Indices.
size() + HalfSize, 1))
2861 Wt =
Op->getOperand(1);
2867 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
2868 Ws =
Op->getOperand(0);
2869 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Indices.
size() + HalfSize,
2871 Ws =
Op->getOperand(1);
2900 const auto &Begin = Indices.
begin();
2901 const auto &Mid = Indices.
begin() + Indices.
size() / 2;
2902 const auto &
End = Indices.
end();
2904 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
2905 Wt =
Op->getOperand(0);
2906 else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.
size(), 2))
2907 Wt =
Op->getOperand(1);
2911 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
2912 Ws =
Op->getOperand(0);
2913 else if (fitsRegularPattern<int>(Mid, 1,
End, Indices.
size(), 2))
2914 Ws =
Op->getOperand(1);
2943 const auto &Begin = Indices.
begin();
2944 const auto &Mid = Indices.
begin() + Indices.
size() / 2;
2945 const auto &
End = Indices.
end();
2947 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
2948 Wt =
Op->getOperand(0);
2949 else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.
size() + 1, 2))
2950 Wt =
Op->getOperand(1);
2954 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
2955 Ws =
Op->getOperand(0);
2956 else if (fitsRegularPattern<int>(Mid, 1,
End, Indices.
size() + 1, 2))
2957 Ws =
Op->getOperand(1);
2979 const bool isSPLATI,
2986 bool Using1stVec =
false;
2987 bool Using2ndVec =
false;
2991 assert(Indices[0] >= 0 &&
2992 "shuffle mask starts with an UNDEF, which is not expected");
2994 for (
int i = 0; i < ResTyNumElts; ++i) {
2996 int Idx = Indices[i];
2998 if (0 <=
Idx &&
Idx < ResTyNumElts)
3000 if (ResTyNumElts <=
Idx &&
Idx < ResTyNumElts * 2)
3003 int LastValidIndex = 0;
3004 for (
size_t i = 0; i < Indices.
size(); i++) {
3005 int Idx = Indices[i];
3008 Idx = isSPLATI ? Indices[0] : LastValidIndex;
3010 LastValidIndex =
Idx;
3017 if (Using1stVec && Using2ndVec) {
3018 Op0 =
Op->getOperand(0);
3019 Op1 =
Op->getOperand(1);
3020 }
else if (Using1stVec)
3021 Op0 = Op1 =
Op->getOperand(0);
3022 else if (Using2ndVec)
3023 Op0 = Op1 =
Op->getOperand(1);
3025 llvm_unreachable(
"shuffle vector mask references neither vector operand?");
3042 EVT ResTy =
Op->getValueType(0);
3050 for (
int i = 0; i < ResTyNumElts; ++i)
3103 F->insert(It, Sink);
3108 Sink->transferSuccessorsAndUpdatePHIs(BB);
3134 MI.getOperand(0).getReg())
3140 MI.eraseFromParent();
3172 F->insert(It, Sink);
3177 Sink->transferSuccessorsAndUpdatePHIs(BB);
3203 MI.getOperand(0).getReg())
3209 MI.eraseFromParent();
3231 unsigned Lane =
MI.getOperand(2).getImm();
3238 Wt =
RegInfo.createVirtualRegister(&Mips::MSA128WEvensRegClass);
3247 : &Mips::MSA128WEvensRegClass);
3253 MI.eraseFromParent();
3276 unsigned Lane =
MI.getOperand(2).getImm() * 2;
3288 MI.eraseFromParent();
3306 unsigned Lane =
MI.getOperand(2).getImm();
3310 : &Mips::MSA128WEvensRegClass);
3322 MI.eraseFromParent();
3342 unsigned Lane =
MI.getOperand(2).getImm();
3356 MI.eraseFromParent();
3387 Register SrcVecReg =
MI.getOperand(1).getReg();
3388 Register LaneReg =
MI.getOperand(2).getReg();
3389 Register SrcValReg =
MI.getOperand(3).getReg();
3397 unsigned EltLog2Size;
3398 unsigned InsertOp = 0;
3399 unsigned InsveOp = 0;
3400 switch (EltSizeInBytes) {
3405 InsertOp = Mips::INSERT_B;
3406 InsveOp = Mips::INSVE_B;
3407 VecRC = &Mips::MSA128BRegClass;
3411 InsertOp = Mips::INSERT_H;
3412 InsveOp = Mips::INSVE_H;
3413 VecRC = &Mips::MSA128HRegClass;
3417 InsertOp = Mips::INSERT_W;
3418 InsveOp = Mips::INSVE_W;
3419 VecRC = &Mips::MSA128WRegClass;
3423 InsertOp = Mips::INSERT_D;
3424 InsveOp = Mips::INSVE_D;
3425 VecRC = &Mips::MSA128DRegClass;
3434 .
addImm(EltSizeInBytes == 8 ? Mips::sub_64 : Mips::sub_lo);
3439 if (EltSizeInBytes != 1) {
3452 .
addReg(LaneReg, 0, SubRegIdx);
3481 .
addReg(LaneTmp2, 0, SubRegIdx);
3483 MI.eraseFromParent();
3504 : &Mips::MSA128WEvensRegClass);
3507 : &Mips::MSA128WEvensRegClass);
3516 MI.eraseFromParent();
3547 MI.eraseFromParent();
3578 MI.getOperand(1).isReg() ?
RegInfo.getRegClass(
MI.getOperand(1).getReg())
3580 : &Mips::GPR64RegClass);
3581 const bool UsingMips32 = RC == &Mips::GPR32RegClass;
3593 BuildMI(*BB,
MI,
DL,
TII->get(UsingMips32 ? Mips::SH : Mips::SH64))
3600 MI.eraseFromParent();
3630 MI.getOperand(1).isReg() ?
RegInfo.getRegClass(
MI.getOperand(1).getReg())
3632 : &Mips::GPR64RegClass);
3634 const bool UsingMips32 = RC == &Mips::GPR32RegClass;
3638 BuildMI(*BB,
MI,
DL,
TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt);
3650 MI.eraseFromParent();
3706 bool IsFGR64)
const {
3722 Register Wtemp =
RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3724 IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
3725 unsigned MFC1Opc = IsFGR64onMips64
3727 : (IsFGR64onMips32 ? Mips::MFC1_D64 : Mips::MFC1);
3728 unsigned FILLOpc = IsFGR64onMips64 ? Mips::FILL_D : Mips::FILL_W;
3734 unsigned WPHI = Wtemp;
3736 if (IsFGR64onMips32) {
3739 Register Wtemp2 =
RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3740 Register Wtemp3 =
RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3753 Register Wtemp2 =
RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3762 MI.eraseFromParent();
3811 bool IsFGR64)
const {
3828 IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
3829 unsigned MTC1Opc = IsFGR64onMips64
3831 : (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1);
3832 Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
3834 Register Wtemp =
RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3839 WPHI =
RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
3846 ?
RegInfo.createVirtualRegister(&Mips::FGR64RegClass)
3851 if (IsFGR64onMips32) {
3861 MI.eraseFromParent();
3888 .
addReg(
MI.getOperand(1).getReg());
3890 MI.eraseFromParent();
3917 .
addReg(
MI.getOperand(1).getReg());
3919 MI.eraseFromParent();
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static bool isConstantOrUndef(const SDValue Op)
static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op)
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc, SDValue Imm, bool BigEndian)
static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG)
static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering::DAGCombinerInfo &DCI, const MipsSETargetLowering *TL, const MipsSubtarget &Subtarget)
static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, const MipsSubtarget &Subtarget)
static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc)
static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, SelectionDAG &DAG, const MipsSubtarget &Subtarget)
static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc)
static cl::opt< bool > NoDPLoadStore("mno-ldc1-sdc1", cl::init(false), cl::desc("Expand double precision loads and " "stores to their single precision " "counterparts"))
static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue, bool BigEndian, SelectionDAG &DAG)
static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG)
static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian)
static SDValue initAccumulator(SDValue In, const SDLoc &DL, SelectionDAG &DAG)
static bool isBitwiseInverse(SDValue N, SDValue OfNode)
static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, const MipsSubtarget &Subtarget)
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static bool isVectorAllOnes(SDValue N)
static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC)
static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG)
static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static SDValue genConstMult(SDValue X, APInt C, const SDLoc &DL, EVT VT, EVT ShiftTy, SelectionDAG &DAG)
static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy, const SmallVector< int, 16 > &Indices, const bool isSPLATI, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static SDValue extractLOHI(SDValue Op, const SDLoc &DL, SelectionDAG &DAG)
static bool shouldTransformMulToShiftsAddsSubs(APInt C, EVT VT, SelectionDAG &DAG, const MipsSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static bool isVECTOR_SHUFFLE_SPLATI(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, const MipsSubtarget &Subtarget)
static cl::opt< bool > UseMipsTailCalls("mips-tail-calls", cl::Hidden, cl::desc("MIPS: permit tail calls."), cl::init(false))
static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy, SmallVector< int, 16 > Indices, SelectionDAG &DAG)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallVector class.
support::ulittle32_t & Wd
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned logBase2() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getInRegsParamsCount() const
uint64_t getZExtValue() const
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const Triple & getTargetTriple() const
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
Flags
Flags values. These may be or'd together.
Flags getFlags() const
Return the raw flags of the source value,.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
unsigned getIncomingArgSize() const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
void addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC)
Enable MSA support for the given floating-point type and Register class.
void addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC)
Enable MSA support for the given integer type and Register class.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
const TargetRegisterClass * getRepRegClassFor(MVT VT) const override
Return the 'representative' register class for the specified value type.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
MipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
const MipsRegisterInfo * getRegisterInfo() const override
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool isSingleFloat() const
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
const MipsSubtarget & Subtarget
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
void printrWithDepth(raw_ostream &O, const SelectionDAG *G=nullptr, unsigned depth=100) const
Print a SelectionDAG node and children up to depth "depth." The given SelectionDAG allows target-spec...
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVMContext * getContext() const
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isLittleEndian() const
Tests whether the target triple is little endian.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isVector() const
Return true if this is a vector value type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...