81#define DEBUG_TYPE "mips-lower"
87 cl::desc(
"MIPS: Don't trap on integer division by zero."),
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
125 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
130 return NumIntermediates;
146 unsigned Flag)
const {
152 unsigned Flag)
const {
158 unsigned Flag)
const {
164 unsigned Flag)
const {
170 unsigned Flag)
const {
172 N->getOffset(), Flag);
546 if (!
TM.isPositionIndependent() || !
TM.getABI().IsO32() ||
566 EVT Ty =
N->getValueType(0);
567 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
568 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
574 N->getOperand(0),
N->getOperand(1));
579 if (
N->hasAnyUseOfValue(0)) {
588 if (
N->hasAnyUseOfValue(1)) {
630 "Illegal Condition Code");
644 if (!
LHS.getValueType().isFloatingPoint())
756 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
772 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
773 return DAG.
getNode(Opc,
SDLoc(
N), ValueIfFalse.getValueType(),
774 ValueIfFalse, FCC, ValueIfTrue, Glue);
783 SDValue FirstOperand =
N->getOperand(0);
784 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
786 EVT ValTy =
N->getValueType(0);
790 unsigned SMPos, SMSize;
796 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
806 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
826 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
831 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
853 NewOperand = FirstOperand;
855 return DAG.
getNode(Opc,
DL, ValTy, NewOperand,
870 SDValue And0 =
N->getOperand(0), And1 =
N->getOperand(1);
871 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
878 if (!(CN = dyn_cast<ConstantSDNode>(And0.
getOperand(1))) ||
884 And1.getOperand(0).getOpcode() ==
ISD::SHL) {
886 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
891 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
896 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
903 EVT ValTy =
N->getValueType(0);
904 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
917 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
918 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
919 (SMSize0 + SMPos0 <= 32))) {
923 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
926 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
935 EVT ValTy =
N->getOperand(0)->getValueType(0);
1010 if (!Mult.hasOneUse())
1018 SDValue MultLHS = Mult->getOperand(0);
1019 SDValue MultRHS = Mult->getOperand(1);
1026 if (!IsSigned && !IsUnsigned)
1032 std::tie(BottomHalf, TopHalf) =
1044 EVT VTs[2] = {MVT::i32, MVT::i32};
1060 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1075 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1093 EVT ValTy =
N->getValueType(0);
1111 SDValue FirstOperand =
N->getOperand(0);
1112 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1113 SDValue SecondOperand =
N->getOperand(1);
1114 EVT ValTy =
N->getValueType(0);
1118 unsigned SMPos, SMSize;
1123 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1135 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1141 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1156 unsigned Opc =
N->getOpcode();
1195 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1196 return C->getAPIntValue().ule(15);
1204 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1206 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1207 "Expected shift-shift mask");
1209 if (
N->getOperand(0).getValueType().isVector())
1224 switch (
Op.getOpcode())
1271 bool Is64Bit,
bool IsMicroMips) {
1280 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1301 switch (
MI.getOpcode()) {
1304 case Mips::ATOMIC_LOAD_ADD_I8:
1305 return emitAtomicBinaryPartword(
MI, BB, 1);
1306 case Mips::ATOMIC_LOAD_ADD_I16:
1307 return emitAtomicBinaryPartword(
MI, BB, 2);
1308 case Mips::ATOMIC_LOAD_ADD_I32:
1309 return emitAtomicBinary(
MI, BB);
1310 case Mips::ATOMIC_LOAD_ADD_I64:
1311 return emitAtomicBinary(
MI, BB);
1313 case Mips::ATOMIC_LOAD_AND_I8:
1314 return emitAtomicBinaryPartword(
MI, BB, 1);
1315 case Mips::ATOMIC_LOAD_AND_I16:
1316 return emitAtomicBinaryPartword(
MI, BB, 2);
1317 case Mips::ATOMIC_LOAD_AND_I32:
1318 return emitAtomicBinary(
MI, BB);
1319 case Mips::ATOMIC_LOAD_AND_I64:
1320 return emitAtomicBinary(
MI, BB);
1322 case Mips::ATOMIC_LOAD_OR_I8:
1323 return emitAtomicBinaryPartword(
MI, BB, 1);
1324 case Mips::ATOMIC_LOAD_OR_I16:
1325 return emitAtomicBinaryPartword(
MI, BB, 2);
1326 case Mips::ATOMIC_LOAD_OR_I32:
1327 return emitAtomicBinary(
MI, BB);
1328 case Mips::ATOMIC_LOAD_OR_I64:
1329 return emitAtomicBinary(
MI, BB);
1331 case Mips::ATOMIC_LOAD_XOR_I8:
1332 return emitAtomicBinaryPartword(
MI, BB, 1);
1333 case Mips::ATOMIC_LOAD_XOR_I16:
1334 return emitAtomicBinaryPartword(
MI, BB, 2);
1335 case Mips::ATOMIC_LOAD_XOR_I32:
1336 return emitAtomicBinary(
MI, BB);
1337 case Mips::ATOMIC_LOAD_XOR_I64:
1338 return emitAtomicBinary(
MI, BB);
1340 case Mips::ATOMIC_LOAD_NAND_I8:
1341 return emitAtomicBinaryPartword(
MI, BB, 1);
1342 case Mips::ATOMIC_LOAD_NAND_I16:
1343 return emitAtomicBinaryPartword(
MI, BB, 2);
1344 case Mips::ATOMIC_LOAD_NAND_I32:
1345 return emitAtomicBinary(
MI, BB);
1346 case Mips::ATOMIC_LOAD_NAND_I64:
1347 return emitAtomicBinary(
MI, BB);
1349 case Mips::ATOMIC_LOAD_SUB_I8:
1350 return emitAtomicBinaryPartword(
MI, BB, 1);
1351 case Mips::ATOMIC_LOAD_SUB_I16:
1352 return emitAtomicBinaryPartword(
MI, BB, 2);
1353 case Mips::ATOMIC_LOAD_SUB_I32:
1354 return emitAtomicBinary(
MI, BB);
1355 case Mips::ATOMIC_LOAD_SUB_I64:
1356 return emitAtomicBinary(
MI, BB);
1358 case Mips::ATOMIC_SWAP_I8:
1359 return emitAtomicBinaryPartword(
MI, BB, 1);
1360 case Mips::ATOMIC_SWAP_I16:
1361 return emitAtomicBinaryPartword(
MI, BB, 2);
1362 case Mips::ATOMIC_SWAP_I32:
1363 return emitAtomicBinary(
MI, BB);
1364 case Mips::ATOMIC_SWAP_I64:
1365 return emitAtomicBinary(
MI, BB);
1367 case Mips::ATOMIC_CMP_SWAP_I8:
1368 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1369 case Mips::ATOMIC_CMP_SWAP_I16:
1370 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1371 case Mips::ATOMIC_CMP_SWAP_I32:
1372 return emitAtomicCmpSwap(
MI, BB);
1373 case Mips::ATOMIC_CMP_SWAP_I64:
1374 return emitAtomicCmpSwap(
MI, BB);
1376 case Mips::ATOMIC_LOAD_MIN_I8:
1377 return emitAtomicBinaryPartword(
MI, BB, 1);
1378 case Mips::ATOMIC_LOAD_MIN_I16:
1379 return emitAtomicBinaryPartword(
MI, BB, 2);
1380 case Mips::ATOMIC_LOAD_MIN_I32:
1381 return emitAtomicBinary(
MI, BB);
1382 case Mips::ATOMIC_LOAD_MIN_I64:
1383 return emitAtomicBinary(
MI, BB);
1385 case Mips::ATOMIC_LOAD_MAX_I8:
1386 return emitAtomicBinaryPartword(
MI, BB, 1);
1387 case Mips::ATOMIC_LOAD_MAX_I16:
1388 return emitAtomicBinaryPartword(
MI, BB, 2);
1389 case Mips::ATOMIC_LOAD_MAX_I32:
1390 return emitAtomicBinary(
MI, BB);
1391 case Mips::ATOMIC_LOAD_MAX_I64:
1392 return emitAtomicBinary(
MI, BB);
1394 case Mips::ATOMIC_LOAD_UMIN_I8:
1395 return emitAtomicBinaryPartword(
MI, BB, 1);
1396 case Mips::ATOMIC_LOAD_UMIN_I16:
1397 return emitAtomicBinaryPartword(
MI, BB, 2);
1398 case Mips::ATOMIC_LOAD_UMIN_I32:
1399 return emitAtomicBinary(
MI, BB);
1400 case Mips::ATOMIC_LOAD_UMIN_I64:
1401 return emitAtomicBinary(
MI, BB);
1403 case Mips::ATOMIC_LOAD_UMAX_I8:
1404 return emitAtomicBinaryPartword(
MI, BB, 1);
1405 case Mips::ATOMIC_LOAD_UMAX_I16:
1406 return emitAtomicBinaryPartword(
MI, BB, 2);
1407 case Mips::ATOMIC_LOAD_UMAX_I32:
1408 return emitAtomicBinary(
MI, BB);
1409 case Mips::ATOMIC_LOAD_UMAX_I64:
1410 return emitAtomicBinary(
MI, BB);
1412 case Mips::PseudoSDIV:
1413 case Mips::PseudoUDIV:
1420 case Mips::SDIV_MM_Pseudo:
1421 case Mips::UDIV_MM_Pseudo:
1424 case Mips::DIV_MMR6:
1425 case Mips::DIVU_MMR6:
1426 case Mips::MOD_MMR6:
1427 case Mips::MODU_MMR6:
1429 case Mips::PseudoDSDIV:
1430 case Mips::PseudoDUDIV:
1437 case Mips::PseudoSELECT_I:
1438 case Mips::PseudoSELECT_I64:
1439 case Mips::PseudoSELECT_S:
1440 case Mips::PseudoSELECT_D32:
1441 case Mips::PseudoSELECT_D64:
1442 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1443 case Mips::PseudoSELECTFP_F_I:
1444 case Mips::PseudoSELECTFP_F_I64:
1445 case Mips::PseudoSELECTFP_F_S:
1446 case Mips::PseudoSELECTFP_F_D32:
1447 case Mips::PseudoSELECTFP_F_D64:
1448 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1449 case Mips::PseudoSELECTFP_T_I:
1450 case Mips::PseudoSELECTFP_T_I64:
1451 case Mips::PseudoSELECTFP_T_S:
1452 case Mips::PseudoSELECTFP_T_D32:
1453 case Mips::PseudoSELECTFP_T_D64:
1454 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1455 case Mips::PseudoD_SELECT_I:
1456 case Mips::PseudoD_SELECT_I64:
1457 return emitPseudoD_SELECT(
MI, BB);
1459 return emitLDR_W(
MI, BB);
1461 return emitLDR_D(
MI, BB);
1463 return emitSTR_W(
MI, BB);
1465 return emitSTR_D(
MI, BB);
1481 bool NeedsAdditionalReg =
false;
1482 switch (
MI.getOpcode()) {
1483 case Mips::ATOMIC_LOAD_ADD_I32:
1484 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1486 case Mips::ATOMIC_LOAD_SUB_I32:
1487 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1489 case Mips::ATOMIC_LOAD_AND_I32:
1490 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1492 case Mips::ATOMIC_LOAD_OR_I32:
1493 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1495 case Mips::ATOMIC_LOAD_XOR_I32:
1496 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1498 case Mips::ATOMIC_LOAD_NAND_I32:
1499 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1501 case Mips::ATOMIC_SWAP_I32:
1502 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1504 case Mips::ATOMIC_LOAD_ADD_I64:
1505 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1507 case Mips::ATOMIC_LOAD_SUB_I64:
1508 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1510 case Mips::ATOMIC_LOAD_AND_I64:
1511 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1513 case Mips::ATOMIC_LOAD_OR_I64:
1514 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1516 case Mips::ATOMIC_LOAD_XOR_I64:
1517 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1519 case Mips::ATOMIC_LOAD_NAND_I64:
1520 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1522 case Mips::ATOMIC_SWAP_I64:
1523 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1525 case Mips::ATOMIC_LOAD_MIN_I32:
1526 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1527 NeedsAdditionalReg =
true;
1529 case Mips::ATOMIC_LOAD_MAX_I32:
1530 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1531 NeedsAdditionalReg =
true;
1533 case Mips::ATOMIC_LOAD_UMIN_I32:
1534 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1535 NeedsAdditionalReg =
true;
1537 case Mips::ATOMIC_LOAD_UMAX_I32:
1538 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1539 NeedsAdditionalReg =
true;
1541 case Mips::ATOMIC_LOAD_MIN_I64:
1542 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1543 NeedsAdditionalReg =
true;
1545 case Mips::ATOMIC_LOAD_MAX_I64:
1546 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1547 NeedsAdditionalReg =
true;
1549 case Mips::ATOMIC_LOAD_UMIN_I64:
1550 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1551 NeedsAdditionalReg =
true;
1553 case Mips::ATOMIC_LOAD_UMAX_I64:
1554 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1555 NeedsAdditionalReg =
true;
1616 if (NeedsAdditionalReg) {
1623 MI.eraseFromParent();
1630 unsigned SrcReg)
const {
1650 int64_t ShiftImm = 32 - (
Size * 8);
1661 "Unsupported size for EmitAtomicBinaryPartial.");
1688 unsigned AtomicOp = 0;
1689 bool NeedsAdditionalReg =
false;
1690 switch (
MI.getOpcode()) {
1691 case Mips::ATOMIC_LOAD_NAND_I8:
1692 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1694 case Mips::ATOMIC_LOAD_NAND_I16:
1695 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1697 case Mips::ATOMIC_SWAP_I8:
1698 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1700 case Mips::ATOMIC_SWAP_I16:
1701 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1703 case Mips::ATOMIC_LOAD_ADD_I8:
1704 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1706 case Mips::ATOMIC_LOAD_ADD_I16:
1707 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1709 case Mips::ATOMIC_LOAD_SUB_I8:
1710 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1712 case Mips::ATOMIC_LOAD_SUB_I16:
1713 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1715 case Mips::ATOMIC_LOAD_AND_I8:
1716 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1718 case Mips::ATOMIC_LOAD_AND_I16:
1719 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1721 case Mips::ATOMIC_LOAD_OR_I8:
1722 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1724 case Mips::ATOMIC_LOAD_OR_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1727 case Mips::ATOMIC_LOAD_XOR_I8:
1728 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1730 case Mips::ATOMIC_LOAD_XOR_I16:
1731 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1733 case Mips::ATOMIC_LOAD_MIN_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1735 NeedsAdditionalReg =
true;
1737 case Mips::ATOMIC_LOAD_MIN_I16:
1738 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1739 NeedsAdditionalReg =
true;
1741 case Mips::ATOMIC_LOAD_MAX_I8:
1742 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1743 NeedsAdditionalReg =
true;
1745 case Mips::ATOMIC_LOAD_MAX_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1747 NeedsAdditionalReg =
true;
1749 case Mips::ATOMIC_LOAD_UMIN_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1751 NeedsAdditionalReg =
true;
1753 case Mips::ATOMIC_LOAD_UMIN_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1755 NeedsAdditionalReg =
true;
1757 case Mips::ATOMIC_LOAD_UMAX_I8:
1758 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1759 NeedsAdditionalReg =
true;
1761 case Mips::ATOMIC_LOAD_UMAX_I16:
1762 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1763 NeedsAdditionalReg =
true;
1792 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1833 if (NeedsAdditionalReg) {
1839 MI.eraseFromParent();
1853 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1854 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1855 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1857 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1865 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1866 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1867 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1882 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1883 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1901 MI.eraseFromParent();
1909 "Unsupported size for EmitAtomicCmpSwapPartial.");
1936 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1937 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1938 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1979 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1980 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1982 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2025 MI.eraseFromParent();
2051 FCC0, Dest, CondRes);
2073 "Floating point operand expected.");
2084 EVT Ty =
Op.getValueType();
2132 EVT Ty =
Op.getValueType();
2175 Args.push_back(Entry);
2180 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2181 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2227 EVT Ty =
Op.getValueType();
2240 EVT Ty =
Op.getValueType();
2269 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2276 EVT VT =
Node->getValueType(0);
2281 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2308 unsigned ArgSizeInBytes =
2324 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2333 bool HasExtractInsert) {
2334 EVT TyX =
Op.getOperand(0).getValueType();
2335 EVT TyY =
Op.getOperand(1).getValueType();
2352 if (HasExtractInsert) {
2370 if (TyX == MVT::f32)
2380 bool HasExtractInsert) {
2381 unsigned WidthX =
Op.getOperand(0).getValueSizeInBits();
2382 unsigned WidthY =
Op.getOperand(1).getValueSizeInBits();
2391 if (HasExtractInsert) {
2397 if (WidthX > WidthY)
2399 else if (WidthY > WidthX)
2418 if (WidthX > WidthY)
2420 else if (WidthY > WidthX)
2438 bool HasExtractInsert)
const {
2450 Op.getOperand(0), Const1);
2453 if (HasExtractInsert)
2464 if (
Op.getValueType() == MVT::f32)
2478 bool HasExtractInsert)
const {
2489 if (HasExtractInsert)
2511 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2513 "return address can be determined only for current frame");
2519 EVT VT =
Op.getValueType();
2532 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2534 "return address can be determined only for current frame");
2540 MVT VT =
Op.getSimpleValueType();
2541 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2567 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2568 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2657 : Mips::PseudoD_SELECT_I,
2658 DL, VTList,
Cond, ShiftRightHi,
2674 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2675 EVT BasePtrVT =
Ptr.getValueType();
2685 LD->getMemOperand());
2691 EVT MemVT = LD->getMemoryVT();
2697 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2698 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2702 EVT VT =
Op.getValueType();
2706 assert((VT == MVT::i32) || (VT == MVT::i64));
2749 SDValue Ops[] = { SRL, LWR.getValue(1) };
2822 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2834 EVT ValTy =
Op->getValueType(0);
2880 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2886 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2894 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2898 else if (ArgFlags.
isZExt())
2906 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2910 else if (ArgFlags.
isZExt())
2921 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
2924 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
2928 if (ValVT == MVT::i32 && isVectorFloat) {
2935 if (Reg == Mips::A2)
2944 }
else if (ValVT == MVT::i32 ||
2945 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2949 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2952 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2956 if (Reg == Mips::A1 || Reg == Mips::A3)
2972 if (ValVT == MVT::f32) {
2980 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2999 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3001 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3007 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3009 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3016#include "MipsGenCallingConv.inc"
3019 return CC_Mips_FixedArg;
3031 const SDLoc &
DL,
bool IsTailCall,
3049 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3050 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3063 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3064 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3066 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3075 for (
auto &R : RegsToPass) {
3082 for (
auto &R : RegsToPass)
3089 assert(Mask &&
"Missing call preserved mask for calling convention");
3093 Function *
F =
G->getGlobal()->getParent()->getFunction(
Sym);
3094 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3107 switch (
MI.getOpcode()) {
3111 case Mips::JALRPseudo:
3113 case Mips::JALR64Pseudo:
3114 case Mips::JALR16_MM:
3115 case Mips::JALRC16_MMR6:
3116 case Mips::TAILCALLREG:
3117 case Mips::TAILCALLREG64:
3118 case Mips::TAILCALLR6REG:
3119 case Mips::TAILCALL64R6REG:
3120 case Mips::TAILCALLREG_MM:
3121 case Mips::TAILCALLREG_MMR6: {
3125 Node->getNumOperands() < 1 ||
3126 Node->getOperand(0).getNumOperands() < 2) {
3132 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3135 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3139 if (!isa<Function>(
G->getGlobal())) {
3140 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3141 <<
G->getGlobal()->getName() <<
"\n");
3144 Sym =
G->getGlobal()->getName();
3147 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3148 Sym = ES->getSymbol();
3191 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3217 bool MemcpyInByVal = ES &&
3224 unsigned ReservedArgArea =
3226 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3232 unsigned StackSize = CCInfo.getStackSize();
3239 bool InternalLinkage =
false;
3241 IsTailCall = isEligibleForTailCallOptimization(
3244 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3245 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3246 G->getGlobal()->hasPrivateLinkage() ||
3247 G->getGlobal()->hasHiddenVisibility() ||
3248 G->getGlobal()->hasProtectedVisibility());
3253 "site marked musttail");
3262 StackSize =
alignTo(StackSize, StackAlignment);
3264 if (!(IsTailCall || MemcpyInByVal))
3271 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3274 CCInfo.rewindByValRegsInfo();
3277 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3278 SDValue Arg = OutVals[OutIdx];
3282 bool UseUpperBits =
false;
3285 if (
Flags.isByVal()) {
3286 unsigned FirstByValReg, LastByValReg;
3287 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3288 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3291 "ByVal args of size 0 should have been ignored by front-end.");
3292 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3294 "Do not tail-call optimize if there is a byval argument.");
3295 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3298 CCInfo.nextInRegsParam();
3308 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3309 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3310 (ValVT == MVT::i64 && LocVT == MVT::f64))
3312 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3323 Register LocRegHigh = ArgLocs[++i].getLocReg();
3324 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3325 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3334 UseUpperBits =
true;
3340 UseUpperBits =
true;
3346 UseUpperBits =
true;
3354 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3364 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
3373 if (
Options.SupportsDebugEntryValues)
3385 Chain, Arg,
DL, IsTailCall, DAG));
3390 if (!MemOpChains.
empty())
3398 bool GlobalOrExternal =
false, IsCallReloc =
false;
3407 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3412 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3416 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3417 if (
F->hasFnAttribute(
"long-call"))
3418 UseLongCalls =
true;
3419 else if (
F->hasFnAttribute(
"short-call"))
3420 UseLongCalls =
false;
3434 if (InternalLinkage)
3450 GlobalOrExternal =
true;
3453 const char *
Sym = S->getSymbol();
3469 GlobalOrExternal =
true;
3475 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3476 IsCallReloc, CLI, Callee, Chain);
3492 if (!(MemcpyInByVal)) {
3499 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3505SDValue MipsTargetLowering::LowerCallResult(
3516 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.
Callee.
getNode());
3517 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.
RetTy,
3521 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3526 RVLocs[i].getLocVT(), InGlue);
3531 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3632SDValue MipsTargetLowering::LowerFormalArguments(
3643 std::vector<SDValue> OutChains;
3653 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3655 "Functions with the interrupt attribute cannot have arguments!");
3657 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3659 CCInfo.getInRegsParamsCount() > 0);
3661 unsigned CurArgIdx = 0;
3662 CCInfo.rewindByValRegsInfo();
3664 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3666 if (Ins[InsIdx].isOrigArg()) {
3667 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3668 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3674 if (
Flags.isByVal()) {
3675 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3676 unsigned FirstByValReg, LastByValReg;
3677 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3678 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3681 "ByVal args of size 0 should have been ignored by front-end.");
3682 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3683 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3684 FirstByValReg, LastByValReg, VA, CCInfo);
3685 CCInfo.nextInRegsParam();
3705 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3706 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3707 (RegVT == MVT::f64 && ValVT == MVT::i64))
3709 else if (
ABI.
IsO32() && RegVT == MVT::i32 &&
3710 ValVT == MVT::f64) {
3719 ArgValue, ArgValue2);
3738 LocVT,
DL, Chain, FIN,
3740 OutChains.push_back(ArgValue.
getValue(1));
3749 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3751 if (ArgLocs[i].needsCustom()) {
3759 if (Ins[InsIdx].
Flags.isSRet()) {
3773 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3777 if (!OutChains.empty()) {
3778 OutChains.push_back(Chain);
3795 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3796 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3799bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
EVT Type,
3800 bool IsSigned)
const {
3834 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3840 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3844 bool UseUpperBits =
false;
3855 UseUpperBits =
true;
3861 UseUpperBits =
true;
3867 UseUpperBits =
true;
3875 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3901 unsigned V0 =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
3916 return LowerInterruptReturn(RetOps,
DL, DAG);
3929MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
3941 if (Constraint.
size() == 1) {
3942 switch (Constraint[0]) {
3956 if (Constraint ==
"ZC")
3966MipsTargetLowering::getSingleConstraintMatchWeight(
3967 AsmOperandInfo &
info,
const char *constraint)
const {
3969 Value *CallOperandVal =
info.CallOperandVal;
3972 if (!CallOperandVal)
3976 switch (*constraint) {