81#define DEBUG_TYPE "mips-lower"
87 cl::desc(
"MIPS: Don't trap on integer division by zero."),
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
119 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
122 IntermediateVT = RegisterVT;
127 return NumIntermediates;
138 unsigned Flag)
const {
144 unsigned Flag)
const {
150 unsigned Flag)
const {
156 unsigned Flag)
const {
162 unsigned Flag)
const {
164 N->getOffset(), Flag);
533 if (!
TM.isPositionIndependent() || !
TM.getABI().IsO32() ||
553 EVT Ty =
N->getValueType(0);
554 unsigned LO = (Ty ==
MVT::i32) ? Mips::LO0 : Mips::LO0_64;
555 unsigned HI = (Ty ==
MVT::i32) ? Mips::HI0 : Mips::HI0_64;
561 N->getOperand(0),
N->getOperand(1));
566 if (
N->hasAnyUseOfValue(0)) {
575 if (
N->hasAnyUseOfValue(1)) {
617 "Illegal Condition Code");
631 if (!
LHS.getValueType().isFloatingPoint())
743 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
759 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
760 return DAG.
getNode(Opc,
SDLoc(
N), ValueIfFalse.getValueType(),
761 ValueIfFalse, FCC, ValueIfTrue, Glue);
770 SDValue FirstOperand =
N->getOperand(0);
771 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
773 EVT ValTy =
N->getValueType(0);
777 unsigned SMPos, SMSize;
783 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
793 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
813 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
818 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
840 NewOperand = FirstOperand;
842 return DAG.
getNode(Opc,
DL, ValTy, NewOperand,
857 SDValue And0 =
N->getOperand(0), And1 =
N->getOperand(1);
858 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
865 if (!(CN = dyn_cast<ConstantSDNode>(And0.
getOperand(1))) ||
871 And1.getOperand(0).getOpcode() ==
ISD::SHL) {
873 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
878 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
883 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
890 EVT ValTy =
N->getValueType(0);
891 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
904 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
905 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
906 (SMSize0 + SMPos0 <= 32))) {
910 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
913 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
922 EVT ValTy =
N->getOperand(0)->getValueType(0);
997 if (!Mult.hasOneUse())
1005 SDValue MultLHS = Mult->getOperand(0);
1006 SDValue MultRHS = Mult->getOperand(1);
1013 if (!IsSigned && !IsUnsigned)
1085 EVT ValTy =
N->getValueType(0);
1103 SDValue FirstOperand =
N->getOperand(0);
1104 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1105 SDValue SecondOperand =
N->getOperand(1);
1106 EVT ValTy =
N->getValueType(0);
1110 unsigned SMPos, SMSize;
1115 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1127 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1133 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1148 unsigned Opc =
N->getOpcode();
1187 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1188 return C->getAPIntValue().ule(15);
1196 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1198 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1199 "Expected shift-shift mask");
1201 if (
N->getOperand(0).getValueType().isVector())
1216 switch (Op.getOpcode())
1229 case ISD::FABS:
return lowerFABS(Op, DAG);
1236 case ISD::SRL_PARTS:
return lowerShiftRightParts(Op, DAG,
false);
1263 bool Is64Bit,
bool IsMicroMips) {
1272 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1293 switch (
MI.getOpcode()) {
1296 case Mips::ATOMIC_LOAD_ADD_I8:
1297 return emitAtomicBinaryPartword(
MI, BB, 1);
1298 case Mips::ATOMIC_LOAD_ADD_I16:
1299 return emitAtomicBinaryPartword(
MI, BB, 2);
1300 case Mips::ATOMIC_LOAD_ADD_I32:
1301 return emitAtomicBinary(
MI, BB);
1302 case Mips::ATOMIC_LOAD_ADD_I64:
1303 return emitAtomicBinary(
MI, BB);
1305 case Mips::ATOMIC_LOAD_AND_I8:
1306 return emitAtomicBinaryPartword(
MI, BB, 1);
1307 case Mips::ATOMIC_LOAD_AND_I16:
1308 return emitAtomicBinaryPartword(
MI, BB, 2);
1309 case Mips::ATOMIC_LOAD_AND_I32:
1310 return emitAtomicBinary(
MI, BB);
1311 case Mips::ATOMIC_LOAD_AND_I64:
1312 return emitAtomicBinary(
MI, BB);
1314 case Mips::ATOMIC_LOAD_OR_I8:
1315 return emitAtomicBinaryPartword(
MI, BB, 1);
1316 case Mips::ATOMIC_LOAD_OR_I16:
1317 return emitAtomicBinaryPartword(
MI, BB, 2);
1318 case Mips::ATOMIC_LOAD_OR_I32:
1319 return emitAtomicBinary(
MI, BB);
1320 case Mips::ATOMIC_LOAD_OR_I64:
1321 return emitAtomicBinary(
MI, BB);
1323 case Mips::ATOMIC_LOAD_XOR_I8:
1324 return emitAtomicBinaryPartword(
MI, BB, 1);
1325 case Mips::ATOMIC_LOAD_XOR_I16:
1326 return emitAtomicBinaryPartword(
MI, BB, 2);
1327 case Mips::ATOMIC_LOAD_XOR_I32:
1328 return emitAtomicBinary(
MI, BB);
1329 case Mips::ATOMIC_LOAD_XOR_I64:
1330 return emitAtomicBinary(
MI, BB);
1332 case Mips::ATOMIC_LOAD_NAND_I8:
1333 return emitAtomicBinaryPartword(
MI, BB, 1);
1334 case Mips::ATOMIC_LOAD_NAND_I16:
1335 return emitAtomicBinaryPartword(
MI, BB, 2);
1336 case Mips::ATOMIC_LOAD_NAND_I32:
1337 return emitAtomicBinary(
MI, BB);
1338 case Mips::ATOMIC_LOAD_NAND_I64:
1339 return emitAtomicBinary(
MI, BB);
1341 case Mips::ATOMIC_LOAD_SUB_I8:
1342 return emitAtomicBinaryPartword(
MI, BB, 1);
1343 case Mips::ATOMIC_LOAD_SUB_I16:
1344 return emitAtomicBinaryPartword(
MI, BB, 2);
1345 case Mips::ATOMIC_LOAD_SUB_I32:
1346 return emitAtomicBinary(
MI, BB);
1347 case Mips::ATOMIC_LOAD_SUB_I64:
1348 return emitAtomicBinary(
MI, BB);
1350 case Mips::ATOMIC_SWAP_I8:
1351 return emitAtomicBinaryPartword(
MI, BB, 1);
1352 case Mips::ATOMIC_SWAP_I16:
1353 return emitAtomicBinaryPartword(
MI, BB, 2);
1354 case Mips::ATOMIC_SWAP_I32:
1355 return emitAtomicBinary(
MI, BB);
1356 case Mips::ATOMIC_SWAP_I64:
1357 return emitAtomicBinary(
MI, BB);
1359 case Mips::ATOMIC_CMP_SWAP_I8:
1360 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1361 case Mips::ATOMIC_CMP_SWAP_I16:
1362 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1363 case Mips::ATOMIC_CMP_SWAP_I32:
1364 return emitAtomicCmpSwap(
MI, BB);
1365 case Mips::ATOMIC_CMP_SWAP_I64:
1366 return emitAtomicCmpSwap(
MI, BB);
1368 case Mips::ATOMIC_LOAD_MIN_I8:
1369 return emitAtomicBinaryPartword(
MI, BB, 1);
1370 case Mips::ATOMIC_LOAD_MIN_I16:
1371 return emitAtomicBinaryPartword(
MI, BB, 2);
1372 case Mips::ATOMIC_LOAD_MIN_I32:
1373 return emitAtomicBinary(
MI, BB);
1374 case Mips::ATOMIC_LOAD_MIN_I64:
1375 return emitAtomicBinary(
MI, BB);
1377 case Mips::ATOMIC_LOAD_MAX_I8:
1378 return emitAtomicBinaryPartword(
MI, BB, 1);
1379 case Mips::ATOMIC_LOAD_MAX_I16:
1380 return emitAtomicBinaryPartword(
MI, BB, 2);
1381 case Mips::ATOMIC_LOAD_MAX_I32:
1382 return emitAtomicBinary(
MI, BB);
1383 case Mips::ATOMIC_LOAD_MAX_I64:
1384 return emitAtomicBinary(
MI, BB);
1386 case Mips::ATOMIC_LOAD_UMIN_I8:
1387 return emitAtomicBinaryPartword(
MI, BB, 1);
1388 case Mips::ATOMIC_LOAD_UMIN_I16:
1389 return emitAtomicBinaryPartword(
MI, BB, 2);
1390 case Mips::ATOMIC_LOAD_UMIN_I32:
1391 return emitAtomicBinary(
MI, BB);
1392 case Mips::ATOMIC_LOAD_UMIN_I64:
1393 return emitAtomicBinary(
MI, BB);
1395 case Mips::ATOMIC_LOAD_UMAX_I8:
1396 return emitAtomicBinaryPartword(
MI, BB, 1);
1397 case Mips::ATOMIC_LOAD_UMAX_I16:
1398 return emitAtomicBinaryPartword(
MI, BB, 2);
1399 case Mips::ATOMIC_LOAD_UMAX_I32:
1400 return emitAtomicBinary(
MI, BB);
1401 case Mips::ATOMIC_LOAD_UMAX_I64:
1402 return emitAtomicBinary(
MI, BB);
1404 case Mips::PseudoSDIV:
1405 case Mips::PseudoUDIV:
1412 case Mips::SDIV_MM_Pseudo:
1413 case Mips::UDIV_MM_Pseudo:
1416 case Mips::DIV_MMR6:
1417 case Mips::DIVU_MMR6:
1418 case Mips::MOD_MMR6:
1419 case Mips::MODU_MMR6:
1421 case Mips::PseudoDSDIV:
1422 case Mips::PseudoDUDIV:
1429 case Mips::PseudoSELECT_I:
1430 case Mips::PseudoSELECT_I64:
1431 case Mips::PseudoSELECT_S:
1432 case Mips::PseudoSELECT_D32:
1433 case Mips::PseudoSELECT_D64:
1434 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1435 case Mips::PseudoSELECTFP_F_I:
1436 case Mips::PseudoSELECTFP_F_I64:
1437 case Mips::PseudoSELECTFP_F_S:
1438 case Mips::PseudoSELECTFP_F_D32:
1439 case Mips::PseudoSELECTFP_F_D64:
1440 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1441 case Mips::PseudoSELECTFP_T_I:
1442 case Mips::PseudoSELECTFP_T_I64:
1443 case Mips::PseudoSELECTFP_T_S:
1444 case Mips::PseudoSELECTFP_T_D32:
1445 case Mips::PseudoSELECTFP_T_D64:
1446 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1447 case Mips::PseudoD_SELECT_I:
1448 case Mips::PseudoD_SELECT_I64:
1449 return emitPseudoD_SELECT(
MI, BB);
1451 return emitLDR_W(
MI, BB);
1453 return emitLDR_D(
MI, BB);
1455 return emitSTR_W(
MI, BB);
1457 return emitSTR_D(
MI, BB);
1473 bool NeedsAdditionalReg =
false;
1474 switch (
MI.getOpcode()) {
1475 case Mips::ATOMIC_LOAD_ADD_I32:
1476 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1478 case Mips::ATOMIC_LOAD_SUB_I32:
1479 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1481 case Mips::ATOMIC_LOAD_AND_I32:
1482 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1484 case Mips::ATOMIC_LOAD_OR_I32:
1485 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1487 case Mips::ATOMIC_LOAD_XOR_I32:
1488 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1490 case Mips::ATOMIC_LOAD_NAND_I32:
1491 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1493 case Mips::ATOMIC_SWAP_I32:
1494 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1496 case Mips::ATOMIC_LOAD_ADD_I64:
1497 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1499 case Mips::ATOMIC_LOAD_SUB_I64:
1500 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1502 case Mips::ATOMIC_LOAD_AND_I64:
1503 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1505 case Mips::ATOMIC_LOAD_OR_I64:
1506 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1508 case Mips::ATOMIC_LOAD_XOR_I64:
1509 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1511 case Mips::ATOMIC_LOAD_NAND_I64:
1512 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1514 case Mips::ATOMIC_SWAP_I64:
1515 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1517 case Mips::ATOMIC_LOAD_MIN_I32:
1518 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1519 NeedsAdditionalReg =
true;
1521 case Mips::ATOMIC_LOAD_MAX_I32:
1522 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1523 NeedsAdditionalReg =
true;
1525 case Mips::ATOMIC_LOAD_UMIN_I32:
1526 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1527 NeedsAdditionalReg =
true;
1529 case Mips::ATOMIC_LOAD_UMAX_I32:
1530 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1531 NeedsAdditionalReg =
true;
1533 case Mips::ATOMIC_LOAD_MIN_I64:
1534 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1535 NeedsAdditionalReg =
true;
1537 case Mips::ATOMIC_LOAD_MAX_I64:
1538 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1539 NeedsAdditionalReg =
true;
1541 case Mips::ATOMIC_LOAD_UMIN_I64:
1542 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1543 NeedsAdditionalReg =
true;
1545 case Mips::ATOMIC_LOAD_UMAX_I64:
1546 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1547 NeedsAdditionalReg =
true;
1608 if (NeedsAdditionalReg) {
1615 MI.eraseFromParent();
1622 unsigned SrcReg)
const {
1642 int64_t ShiftImm = 32 - (
Size * 8);
1653 "Unsupported size for EmitAtomicBinaryPartial.");
1680 unsigned AtomicOp = 0;
1681 bool NeedsAdditionalReg =
false;
1682 switch (
MI.getOpcode()) {
1683 case Mips::ATOMIC_LOAD_NAND_I8:
1684 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1686 case Mips::ATOMIC_LOAD_NAND_I16:
1687 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1689 case Mips::ATOMIC_SWAP_I8:
1690 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1692 case Mips::ATOMIC_SWAP_I16:
1693 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1695 case Mips::ATOMIC_LOAD_ADD_I8:
1696 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1698 case Mips::ATOMIC_LOAD_ADD_I16:
1699 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1701 case Mips::ATOMIC_LOAD_SUB_I8:
1702 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1704 case Mips::ATOMIC_LOAD_SUB_I16:
1705 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1707 case Mips::ATOMIC_LOAD_AND_I8:
1708 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1710 case Mips::ATOMIC_LOAD_AND_I16:
1711 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1713 case Mips::ATOMIC_LOAD_OR_I8:
1714 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1716 case Mips::ATOMIC_LOAD_OR_I16:
1717 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1719 case Mips::ATOMIC_LOAD_XOR_I8:
1720 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1722 case Mips::ATOMIC_LOAD_XOR_I16:
1723 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1725 case Mips::ATOMIC_LOAD_MIN_I8:
1726 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1727 NeedsAdditionalReg =
true;
1729 case Mips::ATOMIC_LOAD_MIN_I16:
1730 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1731 NeedsAdditionalReg =
true;
1733 case Mips::ATOMIC_LOAD_MAX_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1735 NeedsAdditionalReg =
true;
1737 case Mips::ATOMIC_LOAD_MAX_I16:
1738 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1739 NeedsAdditionalReg =
true;
1741 case Mips::ATOMIC_LOAD_UMIN_I8:
1742 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1743 NeedsAdditionalReg =
true;
1745 case Mips::ATOMIC_LOAD_UMIN_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1747 NeedsAdditionalReg =
true;
1749 case Mips::ATOMIC_LOAD_UMAX_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1751 NeedsAdditionalReg =
true;
1753 case Mips::ATOMIC_LOAD_UMAX_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1755 NeedsAdditionalReg =
true;
1784 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1825 if (NeedsAdditionalReg) {
1831 MI.eraseFromParent();
1845 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1846 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1847 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1849 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1857 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1858 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1859 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1874 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1875 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1893 MI.eraseFromParent();
1901 "Unsupported size for EmitAtomicCmpSwapPartial.");
1928 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1929 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1930 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1971 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1972 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1974 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2017 MI.eraseFromParent();
2043 FCC0, Dest, CondRes);
2065 "Floating point operand expected.");
2076 EVT Ty =
Op.getValueType();
2124 EVT Ty =
Op.getValueType();
2167 Args.push_back(Entry);
2172 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2173 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2219 EVT Ty =
Op.getValueType();
2232 EVT Ty =
Op.getValueType();
2261 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2268 EVT VT =
Node->getValueType(0);
2273 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2300 unsigned ArgSizeInBytes =
2316 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2325 bool HasExtractInsert) {
2326 EVT TyX = Op.getOperand(0).getValueType();
2327 EVT TyY = Op.getOperand(1).getValueType();
2344 if (HasExtractInsert) {
2372 bool HasExtractInsert) {
2373 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2374 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2383 if (HasExtractInsert) {
2389 if (WidthX > WidthY)
2391 else if (WidthY > WidthX)
2410 if (WidthX > WidthY)
2412 else if (WidthY > WidthX)
2430 bool HasExtractInsert)
const {
2442 Op.getOperand(0), Const1);
2445 if (HasExtractInsert)
2470 bool HasExtractInsert)
const {
2481 if (HasExtractInsert)
2503 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2505 "return address can be determined only for current frame");
2511 EVT VT =
Op.getValueType();
2524 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2526 "return address can be determined only for current frame");
2532 MVT VT =
Op.getSimpleValueType();
2533 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2559 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2560 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2647 : Mips::PseudoD_SELECT_I,
2648 DL, VTList,
Cond, ShiftRightHi,
2664 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2665 EVT BasePtrVT =
Ptr.getValueType();
2675 LD->getMemOperand());
2681 EVT MemVT = LD->getMemoryVT();
2687 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2692 EVT VT = Op.getValueType();
2739 SDValue Ops[] = { SRL, LWR.getValue(1) };
2824 EVT ValTy = Op->getValueType(0);
2870 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2876 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2888 else if (ArgFlags.
isZExt())
2900 else if (ArgFlags.
isZExt())
2911 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
2918 if (ValVT ==
MVT::i32 && isVectorFloat) {
2925 if (Reg == Mips::A2)
2935 (ValVT ==
MVT::f32 && AllocateFloatsInIntReg)) {
2939 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2942 }
else if (ValVT ==
MVT::f64 && AllocateFloatsInIntReg) {
2948 if (Reg == Mips::A1 || Reg == Mips::A3)
2970 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2989 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2991 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2997 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2999 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3006#include "MipsGenCallingConv.inc"
3009 return CC_Mips_FixedArg;
3021 const SDLoc &
DL,
bool IsTailCall,
3039 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3040 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3053 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3054 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3056 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3065 for (
auto &R : RegsToPass) {
3072 for (
auto &R : RegsToPass)
3079 assert(Mask &&
"Missing call preserved mask for calling convention");
3083 Function *
F =
G->getGlobal()->getParent()->getFunction(Sym);
3084 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3097 switch (
MI.getOpcode()) {
3101 case Mips::JALRPseudo:
3103 case Mips::JALR64Pseudo:
3104 case Mips::JALR16_MM:
3105 case Mips::JALRC16_MMR6:
3106 case Mips::TAILCALLREG:
3107 case Mips::TAILCALLREG64:
3108 case Mips::TAILCALLR6REG:
3109 case Mips::TAILCALL64R6REG:
3110 case Mips::TAILCALLREG_MM:
3111 case Mips::TAILCALLREG_MMR6: {
3115 Node->getNumOperands() < 1 ||
3116 Node->getOperand(0).getNumOperands() < 2) {
3122 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3125 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3129 if (!isa<Function>(
G->getGlobal())) {
3130 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3131 <<
G->getGlobal()->getName() <<
"\n");
3134 Sym =
G->getGlobal()->getName();
3137 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3138 Sym = ES->getSymbol();
3146 LLVM_DEBUG(
dbgs() <<
"Adding R_MIPS_JALR against " << Sym <<
"\n");
3181 dyn_cast_or_null<const ExternalSymbolSDNode>(
Callee.getNode());
3207 bool MemcpyInByVal = ES &&
3214 unsigned ReservedArgArea =
3216 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3222 unsigned NextStackOffset = CCInfo.getNextStackOffset();
3229 bool InternalLinkage =
false;
3231 IsTailCall = isEligibleForTailCallOptimization(
3234 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3235 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3236 G->getGlobal()->hasPrivateLinkage() ||
3237 G->getGlobal()->hasHiddenVisibility() ||
3238 G->getGlobal()->hasProtectedVisibility());
3243 "site marked musttail");
3252 NextStackOffset =
alignTo(NextStackOffset, StackAlignment);
3254 if (!(IsTailCall || MemcpyInByVal))
3261 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3264 CCInfo.rewindByValRegsInfo();
3267 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3272 bool UseUpperBits =
false;
3275 if (
Flags.isByVal()) {
3276 unsigned FirstByValReg, LastByValReg;
3277 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3278 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3281 "ByVal args of size 0 should have been ignored by front-end.");
3282 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3284 "Do not tail-call optimize if there is a byval argument.");
3285 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG,
Arg,
3288 CCInfo.nextInRegsParam();
3313 Register LocRegHigh = ArgLocs[++i].getLocReg();
3314 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3315 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3324 UseUpperBits =
true;
3330 UseUpperBits =
true;
3336 UseUpperBits =
true;
3344 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3354 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Arg));
3363 if (
Options.SupportsDebugEntryValues)
3375 Chain,
Arg,
DL, IsTailCall, DAG));
3380 if (!MemOpChains.
empty())
3388 bool GlobalOrExternal =
false, IsCallReloc =
false;
3397 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(
Callee)) {
3402 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(
Callee)) {
3406 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3407 if (
F->hasFnAttribute(
"long-call"))
3408 UseLongCalls =
true;
3409 else if (
F->hasFnAttribute(
"short-call"))
3410 UseLongCalls =
false;
3424 if (InternalLinkage)
3440 GlobalOrExternal =
true;
3443 const char *Sym = S->getSymbol();
3459 GlobalOrExternal =
true;
3465 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3466 IsCallReloc, CLI,
Callee, Chain);
3482 if (!(MemcpyInByVal)) {
3489 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins,
DL, DAG,
3495SDValue MipsTargetLowering::LowerCallResult(
3506 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.
Callee.
getNode());
3507 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.
RetTy,
3511 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3516 RVLocs[i].getLocVT(), InFlag);
3521 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3622SDValue MipsTargetLowering::LowerFormalArguments(
3633 std::vector<SDValue> OutChains;
3643 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3645 "Functions with the interrupt attribute cannot have arguments!");
3647 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3649 CCInfo.getInRegsParamsCount() > 0);
3651 unsigned CurArgIdx = 0;
3652 CCInfo.rewindByValRegsInfo();
3654 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3656 if (Ins[InsIdx].isOrigArg()) {
3657 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3658 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3664 if (
Flags.isByVal()) {
3665 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3666 unsigned FirstByValReg, LastByValReg;
3667 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3668 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3671 "ByVal args of size 0 should have been ignored by front-end.");
3672 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3673 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3674 FirstByValReg, LastByValReg, VA, CCInfo);
3675 CCInfo.nextInRegsParam();
3709 ArgValue, ArgValue2);
3737 LocVT,
DL, Chain, FIN,
3739 OutChains.push_back(ArgValue.
getValue(1));
3748 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3750 if (ArgLocs[i].needsCustom()) {
3758 if (Ins[InsIdx].
Flags.isSRet()) {
3772 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3776 if (!OutChains.empty()) {
3777 OutChains.push_back(Chain);
3794 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3795 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3798bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
EVT Type,
3799 bool IsSigned)
const {
3833 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3839 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3843 bool UseUpperBits =
false;