80#define DEBUG_TYPE "mips-lower"
86 cl::desc(
"MIPS: Don't trap on integer division by zero."),
92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
124 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
129 return NumIntermediates;
145 unsigned Flag)
const {
151 unsigned Flag)
const {
157 unsigned Flag)
const {
163 unsigned Flag)
const {
169 unsigned Flag)
const {
171 N->getOffset(), Flag);
433 isMicroMips =
Subtarget.inMicroMipsMode();
459 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
479 EVT Ty =
N->getValueType(0);
480 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
481 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
487 N->getOperand(0),
N->getOperand(1));
492 if (
N->hasAnyUseOfValue(0)) {
501 if (
N->hasAnyUseOfValue(1)) {
543 "Illegal Condition Code");
558 if (!
LHS.getValueType().isFloatingPoint())
579 return DAG.
getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T),
DL,
670 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
683 unsigned Opc = (
N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
686 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
688 ValueIfFalse, FCC, ValueIfTrue, Glue);
697 SDValue FirstOperand =
N->getOperand(0);
698 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
700 EVT ValTy =
N->getValueType(0);
704 unsigned SMPos, SMSize;
727 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
745 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
746 Pos + SMSize > ValTy.getSizeInBits())
767 NewOperand = FirstOperand;
780 SDValue FirstOperand =
N->getOperand(0), SecondOperand =
N->getOperand(1);
781 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
785 SecondOperand.getOpcode() ==
ISD::SHL) ||
787 SecondOperand.getOpcode() ==
ISD::AND)) {
798 ? SecondOperand.getOperand(0)
808 ? SecondOperand.getOperand(1)
814 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
818 EVT ValTy =
N->getValueType(0);
819 SMPos1 = ShlShiftValue;
820 assert(SMPos1 < ValTy.getSizeInBits());
821 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
822 return DAG.
getNode(MipsISD::Ins,
DL, ValTy, ShlOperand0,
840 if (SecondOperand.getOpcode() ==
ISD::AND &&
841 SecondOperand.getOperand(0).getOpcode() ==
ISD::SHL) {
848 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
860 EVT ValTy =
N->getValueType(0);
861 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
874 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
875 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
876 (SMSize0 + SMPos0 <= 32))) {
878 bool isConstCase = SecondOperand.getOpcode() !=
ISD::AND;
879 if (SecondOperand.getOpcode() ==
ISD::AND) {
892 EVT ValTy =
N->getOperand(0)->getValueType(0);
898 SecondOperand, Const1);
901 MipsISD::Ins,
DL,
N->getValueType(0),
906 DAG.
getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
983 if (!IsSigned && !IsUnsigned)
989 std::tie(BottomHalf, TopHalf) =
992 CurDAG.
getNode(MipsISD::MTLOHI,
DL, MVT::Untyped, BottomHalf, TopHalf);
996 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
997 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1016 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1031 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1041 SDValue InnerAdd =
N->getOperand(1);
1050 if (
Lo.getOpcode() != MipsISD::Lo)
1053 if ((
Lo.getOpcode() != MipsISD::Lo) ||
1057 EVT ValTy =
N->getValueType(0);
1074 SDValue FirstOperand =
N->getOperand(0);
1075 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1076 SDValue SecondOperand =
N->getOperand(1);
1077 EVT ValTy =
N->getValueType(0);
1081 unsigned SMPos, SMSize;
1091 if (Pos >= ValTy.getSizeInBits())
1104 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1111 return DAG.
getNode(MipsISD::CIns,
DL, ValTy, NewOperand,
1124 EVT VT =
N->getValueType(0);
1139 int64_t ConstImm = ConstantOperand->getSExtValue();
1150 unsigned Opc =
N->getOpcode();
1159 case MipsISD::CMovFP_F:
1160 case MipsISD::CMovFP_T:
1192 return C->getAPIntValue().ule(15);
1200 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1202 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1203 "Expected shift-shift mask");
1205 if (
N->getOperand(0).getValueType().isVector())
1220 switch (
Op.getOpcode())
1222 case ISD::BRCOND:
return lowerBRCOND(
Op, DAG);
1232 return lowerFSETCC(
Op, DAG);
1233 case ISD::VASTART:
return lowerVASTART(
Op, DAG);
1234 case ISD::VAARG:
return lowerVAARG(
Op, DAG);
1236 case ISD::FABS:
return lowerFABS(
Op, DAG);
1238 return lowerFCANONICALIZE(
Op, DAG);
1242 case ISD::ATOMIC_FENCE:
return lowerATOMIC_FENCE(
Op, DAG);
1251 return lowerSTRICT_FP_TO_INT(
Op, DAG);
1253 case ISD::READCYCLECOUNTER:
1254 return lowerREADCYCLECOUNTER(
Op, DAG);
1277 bool Is64Bit,
bool IsMicroMips) {
1286 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1307 switch (
MI.getOpcode()) {
1310 case Mips::ATOMIC_LOAD_ADD_I8:
1311 return emitAtomicBinaryPartword(
MI, BB, 1);
1312 case Mips::ATOMIC_LOAD_ADD_I16:
1313 return emitAtomicBinaryPartword(
MI, BB, 2);
1314 case Mips::ATOMIC_LOAD_ADD_I32:
1315 return emitAtomicBinary(
MI, BB);
1316 case Mips::ATOMIC_LOAD_ADD_I64:
1317 return emitAtomicBinary(
MI, BB);
1319 case Mips::ATOMIC_LOAD_AND_I8:
1320 return emitAtomicBinaryPartword(
MI, BB, 1);
1321 case Mips::ATOMIC_LOAD_AND_I16:
1322 return emitAtomicBinaryPartword(
MI, BB, 2);
1323 case Mips::ATOMIC_LOAD_AND_I32:
1324 return emitAtomicBinary(
MI, BB);
1325 case Mips::ATOMIC_LOAD_AND_I64:
1326 return emitAtomicBinary(
MI, BB);
1328 case Mips::ATOMIC_LOAD_OR_I8:
1329 return emitAtomicBinaryPartword(
MI, BB, 1);
1330 case Mips::ATOMIC_LOAD_OR_I16:
1331 return emitAtomicBinaryPartword(
MI, BB, 2);
1332 case Mips::ATOMIC_LOAD_OR_I32:
1333 return emitAtomicBinary(
MI, BB);
1334 case Mips::ATOMIC_LOAD_OR_I64:
1335 return emitAtomicBinary(
MI, BB);
1337 case Mips::ATOMIC_LOAD_XOR_I8:
1338 return emitAtomicBinaryPartword(
MI, BB, 1);
1339 case Mips::ATOMIC_LOAD_XOR_I16:
1340 return emitAtomicBinaryPartword(
MI, BB, 2);
1341 case Mips::ATOMIC_LOAD_XOR_I32:
1342 return emitAtomicBinary(
MI, BB);
1343 case Mips::ATOMIC_LOAD_XOR_I64:
1344 return emitAtomicBinary(
MI, BB);
1346 case Mips::ATOMIC_LOAD_NAND_I8:
1347 return emitAtomicBinaryPartword(
MI, BB, 1);
1348 case Mips::ATOMIC_LOAD_NAND_I16:
1349 return emitAtomicBinaryPartword(
MI, BB, 2);
1350 case Mips::ATOMIC_LOAD_NAND_I32:
1351 return emitAtomicBinary(
MI, BB);
1352 case Mips::ATOMIC_LOAD_NAND_I64:
1353 return emitAtomicBinary(
MI, BB);
1355 case Mips::ATOMIC_LOAD_SUB_I8:
1356 return emitAtomicBinaryPartword(
MI, BB, 1);
1357 case Mips::ATOMIC_LOAD_SUB_I16:
1358 return emitAtomicBinaryPartword(
MI, BB, 2);
1359 case Mips::ATOMIC_LOAD_SUB_I32:
1360 return emitAtomicBinary(
MI, BB);
1361 case Mips::ATOMIC_LOAD_SUB_I64:
1362 return emitAtomicBinary(
MI, BB);
1364 case Mips::ATOMIC_SWAP_I8:
1365 return emitAtomicBinaryPartword(
MI, BB, 1);
1366 case Mips::ATOMIC_SWAP_I16:
1367 return emitAtomicBinaryPartword(
MI, BB, 2);
1368 case Mips::ATOMIC_SWAP_I32:
1369 return emitAtomicBinary(
MI, BB);
1370 case Mips::ATOMIC_SWAP_I64:
1371 return emitAtomicBinary(
MI, BB);
1373 case Mips::ATOMIC_CMP_SWAP_I8:
1374 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1375 case Mips::ATOMIC_CMP_SWAP_I16:
1376 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1377 case Mips::ATOMIC_CMP_SWAP_I32:
1378 return emitAtomicCmpSwap(
MI, BB);
1379 case Mips::ATOMIC_CMP_SWAP_I64:
1380 return emitAtomicCmpSwap(
MI, BB);
1382 case Mips::ATOMIC_LOAD_MIN_I8:
1383 return emitAtomicBinaryPartword(
MI, BB, 1);
1384 case Mips::ATOMIC_LOAD_MIN_I16:
1385 return emitAtomicBinaryPartword(
MI, BB, 2);
1386 case Mips::ATOMIC_LOAD_MIN_I32:
1387 return emitAtomicBinary(
MI, BB);
1388 case Mips::ATOMIC_LOAD_MIN_I64:
1389 return emitAtomicBinary(
MI, BB);
1391 case Mips::ATOMIC_LOAD_MAX_I8:
1392 return emitAtomicBinaryPartword(
MI, BB, 1);
1393 case Mips::ATOMIC_LOAD_MAX_I16:
1394 return emitAtomicBinaryPartword(
MI, BB, 2);
1395 case Mips::ATOMIC_LOAD_MAX_I32:
1396 return emitAtomicBinary(
MI, BB);
1397 case Mips::ATOMIC_LOAD_MAX_I64:
1398 return emitAtomicBinary(
MI, BB);
1400 case Mips::ATOMIC_LOAD_UMIN_I8:
1401 return emitAtomicBinaryPartword(
MI, BB, 1);
1402 case Mips::ATOMIC_LOAD_UMIN_I16:
1403 return emitAtomicBinaryPartword(
MI, BB, 2);
1404 case Mips::ATOMIC_LOAD_UMIN_I32:
1405 return emitAtomicBinary(
MI, BB);
1406 case Mips::ATOMIC_LOAD_UMIN_I64:
1407 return emitAtomicBinary(
MI, BB);
1409 case Mips::ATOMIC_LOAD_UMAX_I8:
1410 return emitAtomicBinaryPartword(
MI, BB, 1);
1411 case Mips::ATOMIC_LOAD_UMAX_I16:
1412 return emitAtomicBinaryPartword(
MI, BB, 2);
1413 case Mips::ATOMIC_LOAD_UMAX_I32:
1414 return emitAtomicBinary(
MI, BB);
1415 case Mips::ATOMIC_LOAD_UMAX_I64:
1416 return emitAtomicBinary(
MI, BB);
1418 case Mips::PseudoSDIV:
1419 case Mips::PseudoUDIV:
1426 case Mips::SDIV_MM_Pseudo:
1427 case Mips::UDIV_MM_Pseudo:
1430 case Mips::DIV_MMR6:
1431 case Mips::DIVU_MMR6:
1432 case Mips::MOD_MMR6:
1433 case Mips::MODU_MMR6:
1435 case Mips::PseudoDSDIV:
1436 case Mips::PseudoDUDIV:
1443 case Mips::PseudoSELECT_I:
1444 case Mips::PseudoSELECT_I64:
1445 case Mips::PseudoSELECT_S:
1446 case Mips::PseudoSELECT_D32:
1447 case Mips::PseudoSELECT_D64:
1448 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1449 case Mips::PseudoSELECTFP_F_I:
1450 case Mips::PseudoSELECTFP_F_I64:
1451 case Mips::PseudoSELECTFP_F_S:
1452 case Mips::PseudoSELECTFP_F_D32:
1453 case Mips::PseudoSELECTFP_F_D64:
1454 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1455 case Mips::PseudoSELECTFP_T_I:
1456 case Mips::PseudoSELECTFP_T_I64:
1457 case Mips::PseudoSELECTFP_T_S:
1458 case Mips::PseudoSELECTFP_T_D32:
1459 case Mips::PseudoSELECTFP_T_D64:
1460 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1461 case Mips::PseudoD_SELECT_I:
1462 case Mips::PseudoD_SELECT_I64:
1463 return emitPseudoD_SELECT(
MI, BB);
1465 return emitLDR_W(
MI, BB);
1467 return emitLDR_D(
MI, BB);
1469 return emitSTR_W(
MI, BB);
1471 return emitSTR_D(
MI, BB);
1487 bool NeedsAdditionalReg =
false;
1488 switch (
MI.getOpcode()) {
1489 case Mips::ATOMIC_LOAD_ADD_I32:
1490 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1492 case Mips::ATOMIC_LOAD_SUB_I32:
1493 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1495 case Mips::ATOMIC_LOAD_AND_I32:
1496 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1498 case Mips::ATOMIC_LOAD_OR_I32:
1499 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1501 case Mips::ATOMIC_LOAD_XOR_I32:
1502 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1504 case Mips::ATOMIC_LOAD_NAND_I32:
1505 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1507 case Mips::ATOMIC_SWAP_I32:
1508 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1510 case Mips::ATOMIC_LOAD_ADD_I64:
1511 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1513 case Mips::ATOMIC_LOAD_SUB_I64:
1514 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1516 case Mips::ATOMIC_LOAD_AND_I64:
1517 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1519 case Mips::ATOMIC_LOAD_OR_I64:
1520 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1522 case Mips::ATOMIC_LOAD_XOR_I64:
1523 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1525 case Mips::ATOMIC_LOAD_NAND_I64:
1526 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1528 case Mips::ATOMIC_SWAP_I64:
1529 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1531 case Mips::ATOMIC_LOAD_MIN_I32:
1532 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1533 NeedsAdditionalReg =
true;
1535 case Mips::ATOMIC_LOAD_MAX_I32:
1536 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1537 NeedsAdditionalReg =
true;
1539 case Mips::ATOMIC_LOAD_UMIN_I32:
1540 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1541 NeedsAdditionalReg =
true;
1543 case Mips::ATOMIC_LOAD_UMAX_I32:
1544 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1545 NeedsAdditionalReg =
true;
1547 case Mips::ATOMIC_LOAD_MIN_I64:
1548 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1549 NeedsAdditionalReg =
true;
1551 case Mips::ATOMIC_LOAD_MAX_I64:
1552 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1553 NeedsAdditionalReg =
true;
1555 case Mips::ATOMIC_LOAD_UMIN_I64:
1556 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1557 NeedsAdditionalReg =
true;
1559 case Mips::ATOMIC_LOAD_UMAX_I64:
1560 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1561 NeedsAdditionalReg =
true;
1622 if (NeedsAdditionalReg) {
1629 MI.eraseFromParent();
1636 unsigned SrcReg)
const {
1651 MachineRegisterInfo &RegInfo = MF->
getRegInfo();
1656 int64_t ShiftImm = 32 - (
Size * 8);
1667 "Unsupported size for EmitAtomicBinaryPartial.");
1670 MachineRegisterInfo &RegInfo = MF->
getRegInfo();
1672 const bool ArePtrs64bit =
ABI.ArePtrs64bit();
1673 const TargetRegisterClass *RCp =
1694 unsigned AtomicOp = 0;
1695 bool NeedsAdditionalReg =
false;
1696 switch (
MI.getOpcode()) {
1697 case Mips::ATOMIC_LOAD_NAND_I8:
1698 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1700 case Mips::ATOMIC_LOAD_NAND_I16:
1701 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1703 case Mips::ATOMIC_SWAP_I8:
1704 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1706 case Mips::ATOMIC_SWAP_I16:
1707 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1709 case Mips::ATOMIC_LOAD_ADD_I8:
1710 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1712 case Mips::ATOMIC_LOAD_ADD_I16:
1713 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1715 case Mips::ATOMIC_LOAD_SUB_I8:
1716 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1718 case Mips::ATOMIC_LOAD_SUB_I16:
1719 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1721 case Mips::ATOMIC_LOAD_AND_I8:
1722 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1724 case Mips::ATOMIC_LOAD_AND_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1727 case Mips::ATOMIC_LOAD_OR_I8:
1728 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1730 case Mips::ATOMIC_LOAD_OR_I16:
1731 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1733 case Mips::ATOMIC_LOAD_XOR_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1736 case Mips::ATOMIC_LOAD_XOR_I16:
1737 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1739 case Mips::ATOMIC_LOAD_MIN_I8:
1740 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1741 NeedsAdditionalReg =
true;
1743 case Mips::ATOMIC_LOAD_MIN_I16:
1744 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1745 NeedsAdditionalReg =
true;
1747 case Mips::ATOMIC_LOAD_MAX_I8:
1748 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1749 NeedsAdditionalReg =
true;
1751 case Mips::ATOMIC_LOAD_MAX_I16:
1752 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1753 NeedsAdditionalReg =
true;
1755 case Mips::ATOMIC_LOAD_UMIN_I8:
1756 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1757 NeedsAdditionalReg =
true;
1759 case Mips::ATOMIC_LOAD_UMIN_I16:
1760 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1761 NeedsAdditionalReg =
true;
1763 case Mips::ATOMIC_LOAD_UMAX_I8:
1764 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1765 NeedsAdditionalReg =
true;
1767 case Mips::ATOMIC_LOAD_UMAX_I16:
1768 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1769 NeedsAdditionalReg =
true;
1798 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1804 .
addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).
addImm(3);
1825 MachineInstrBuilder MIB =
1839 if (NeedsAdditionalReg) {
1845 MI.eraseFromParent();
1859 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1860 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1861 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1863 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1871 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1872 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1873 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1887 Register PtrCopy =
MRI.createVirtualRegister(
MRI.getRegClass(Ptr));
1888 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1889 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1907 MI.eraseFromParent();
1915 "Unsupported size for EmitAtomicCmpSwapPartial.");
1918 MachineRegisterInfo &RegInfo = MF->
getRegInfo();
1920 const bool ArePtrs64bit =
ABI.ArePtrs64bit();
1921 const TargetRegisterClass *RCp =
1942 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1943 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1944 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1985 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1986 BuildMI(BB,
DL,
TII->
get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1988 BuildMI(BB,
DL,
TII->
get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1991 .
addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).
addImm(3);
2031 MI.eraseFromParent();
2041 unsigned RdhwrOpc, DestReg;
2044 if (PtrVT == MVT::i64) {
2045 RdhwrOpc = Mips::RDHWR64;
2057 RdhwrOpc = Mips::RDHWR;
2085 if (CondRes.
getOpcode() != MipsISD::FPCmp)
2093 return DAG.
getNode(MipsISD::FPBrcond,
DL,
Op.getValueType(), Chain, BrCode,
2094 FCC0, Dest, CondRes);
2104 if (
Cond.getOpcode() != MipsISD::FPCmp)
2116 "Floating point operand expected.");
2145 EVT Ty =
Op.getValueType();
2147 const GlobalValue *GV =
N->getGlobal();
2151 "Windows is the only supported COFF target");
2158 const MipsTargetObjectFile *TLOF =
2159 static_cast<const MipsTargetObjectFile *
>(
2193 N, SDLoc(
N), Ty, DAG,
2201 EVT Ty =
Op.getValueType();
2222 const GlobalValue *GV = GA->
getGlobal();
2241 Args.emplace_back(Argument, PtrTy);
2243 TargetLowering::CallLoweringInfo CLI(DAG);
2246 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2247 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2249 SDValue Ret = CallResult.first;
2293 EVT Ty =
Op.getValueType();
2306 EVT Ty =
Op.getValueType();
2309 const MipsTargetObjectFile *TLOF =
2310 static_cast<const MipsTargetObjectFile *
>(
2327 MipsFunctionInfo *FuncInfo = MF.
getInfo<MipsFunctionInfo>();
2337 MachinePointerInfo(SV));
2341 SDNode *
Node =
Op.getNode();
2342 EVT VT =
Node->getValueType(0);
2346 llvm::MaybeAlign(
Node->getConstantOperandVal(3)).valueOrOne();
2349 unsigned ArgSlotSizeInBytes = (
ABI.IsN32() ||
ABI.IsN64()) ? 8 : 4;
2352 VAListPtr, MachinePointerInfo(SV));
2374 unsigned ArgSizeInBytes =
2382 MachinePointerInfo(SV));
2389 if (!
Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2390 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2395 return DAG.
getLoad(VT,
DL, Chain, VAList, MachinePointerInfo());
2399 bool HasExtractInsert) {
2400 EVT TyX =
Op.getOperand(0).getValueType();
2401 EVT TyY =
Op.getOperand(1).getValueType();
2410 DAG.
getNode(ISD::BITCAST,
DL, MVT::i32,
Op.getOperand(0)) :
2411 DAG.
getNode(MipsISD::ExtractElementF64,
DL, MVT::i32,
Op.getOperand(0),
2414 DAG.
getNode(ISD::BITCAST,
DL, MVT::i32,
Op.getOperand(1)) :
2415 DAG.
getNode(MipsISD::ExtractElementF64,
DL, MVT::i32,
Op.getOperand(1),
2418 if (HasExtractInsert) {
2422 Res = DAG.
getNode(MipsISD::Ins,
DL, MVT::i32,
E, Const31, Const1,
X);
2436 if (TyX == MVT::f32)
2437 return DAG.
getNode(ISD::BITCAST,
DL,
Op.getOperand(0).getValueType(), Res);
2442 return DAG.
getNode(MipsISD::BuildPairF64,
DL, MVT::f64, LowX, Res);
2446 bool HasExtractInsert) {
2447 unsigned WidthX =
Op.getOperand(0).getValueSizeInBits();
2448 unsigned WidthY =
Op.getOperand(1).getValueSizeInBits();
2457 if (HasExtractInsert) {
2463 if (WidthX > WidthY)
2465 else if (WidthY > WidthX)
2471 return DAG.
getNode(ISD::BITCAST,
DL,
Op.getOperand(0).getValueType(),
I);
2484 if (WidthX > WidthY)
2486 else if (WidthY > WidthX)
2492 return DAG.
getNode(ISD::BITCAST,
DL,
Op.getOperand(0).getValueType(),
Or);
2504 bool HasExtractInsert)
const {
2514 ? DAG.
getNode(ISD::BITCAST,
DL, MVT::i32,
Op.getOperand(0))
2515 : DAG.
getNode(MipsISD::ExtractElementF64,
DL, MVT::i32,
2516 Op.getOperand(0), Const1);
2519 if (HasExtractInsert)
2520 Res = DAG.
getNode(MipsISD::Ins,
DL, MVT::i32,
2530 if (
Op.getValueType() == MVT::f32)
2531 return DAG.
getNode(ISD::BITCAST,
DL, MVT::f32, Res);
2538 DAG.
getNode(MipsISD::ExtractElementF64,
DL, MVT::i32,
Op.getOperand(0),
2540 return DAG.
getNode(MipsISD::BuildPairF64,
DL, MVT::f64, LowX, Res);
2544 bool HasExtractInsert)
const {
2555 if (HasExtractInsert)
2556 Res = DAG.
getNode(MipsISD::Ins,
DL, MVT::i64,
2564 return DAG.
getNode(ISD::BITCAST,
DL, MVT::f64, Res);
2568 if ((
ABI.IsN32() ||
ABI.IsN64()) && (
Op.getValueType() == MVT::f64))
2569 return lowerFABS64(
Op, DAG,
Subtarget.hasExtractInsert());
2571 return lowerFABS32(
Op, DAG,
Subtarget.hasExtractInsert());
2577 EVT VT =
Op.getValueType();
2579 SDNodeFlags
Flags =
Op->getFlags();
2591 if (
Op.getConstantOperandVal(0) != 0) {
2593 "return address can be determined only for current frame");
2599 EVT VT =
Op.getValueType();
2609 if (
Op.getConstantOperandVal(0) != 0) {
2611 "return address can be determined only for current frame");
2617 MVT VT =
Op.getSimpleValueType();
2618 unsigned RA =
ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2633 MipsFunctionInfo *MipsFI = MF.
getInfo<MipsFunctionInfo>();
2640 EVT Ty =
ABI.IsN64() ? MVT::i64 : MVT::i32;
2644 unsigned OffsetReg =
ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2645 unsigned AddrReg =
ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2648 return DAG.
getNode(MipsISD::EH_RETURN,
DL, MVT::Other, Chain,
2660 return DAG.
getNode(MipsISD::Sync,
DL, MVT::Other,
Op.getOperand(0),
2667 MVT VT =
Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2701 MVT VT =
Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2732 SDVTList VTList = DAG.
getVTList(VT, VT);
2735 DL, VTList,
Cond, ShiftRightHi,
2750 SDValue Ptr = LD->getBasePtr();
2751 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2762 LD->getMemOperand());
2768 EVT MemVT = LD->getMemoryVT();
2770 if (
Subtarget.systemSupportsUnalignedAccess())
2774 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2775 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2779 EVT VT =
Op.getValueType();
2783 assert((VT == MVT::i32) || (VT == MVT::i64));
2861 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2872 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2897 if (!
Subtarget.systemSupportsUnalignedAccess() &&
2899 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2911 EVT ValTy =
Op->getValueType(0);
2936 Loc,
Op.getValueType(), SrcVal);
2970 State.getMachineFunction().getSubtarget());
2972 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2976 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2984 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2988 else if (ArgFlags.
isZExt())
2996 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
3000 else if (ArgFlags.
isZExt())
3011 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
3012 State.getFirstUnallocated(
F32Regs) != ValNo;
3014 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
3018 if (ValVT == MVT::i32 && isVectorFloat) {
3024 Reg = State.AllocateReg(FloatVectorIntRegs);
3025 if (
Reg == Mips::A2)
3026 State.AllocateReg(Mips::A1);
3028 State.AllocateReg(Mips::A3);
3034 }
else if (ValVT == MVT::i32 ||
3035 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3039 if (isI64 && (
Reg == Mips::A1 ||
Reg == Mips::A3))
3042 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3046 if (
Reg == Mips::A1 ||
Reg == Mips::A3)
3062 if (ValVT == MVT::f32) {
3067 Reg = State.AllocateReg(F64Regs);
3070 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3090 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3092 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3100 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3102 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3111#include "MipsGenCallingConv.inc"
3114 return CC_Mips_FixedArg;
3126 const SDLoc &
DL,
bool IsTailCall,
3144 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3145 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3158 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3159 unsigned GPReg =
ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3160 EVT Ty =
ABI.IsN64() ? MVT::i64 : MVT::i32;
3161 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3170 for (
auto &R : RegsToPass) {
3177 for (
auto &R : RegsToPass)
3184 assert(Mask &&
"Missing call preserved mask for calling convention");
3188 Function *
F =
G->getGlobal()->getParent()->getFunction(Sym);
3189 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3197 Ops.push_back(InGlue);
3202 switch (
MI.getOpcode()) {
3206 case Mips::JALRPseudo:
3208 case Mips::JALR64Pseudo:
3209 case Mips::JALR16_MM:
3210 case Mips::JALRC16_MMR6:
3211 case Mips::TAILCALLREG:
3212 case Mips::TAILCALLREG64:
3213 case Mips::TAILCALLR6REG:
3214 case Mips::TAILCALL64R6REG:
3215 case Mips::TAILCALLREG_MM:
3216 case Mips::TAILCALLREG_MMR6: {
3220 Node->getNumOperands() < 1 ||
3221 Node->getOperand(0).getNumOperands() < 2) {
3227 const SDValue TargetAddr =
Node->getOperand(0).getOperand(1);
3235 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3236 <<
G->getGlobal()->getName() <<
"\n");
3239 Sym =
G->getGlobal()->getName();
3243 Sym = ES->getSymbol();
3251 LLVM_DEBUG(
dbgs() <<
"Adding R_MIPS_JALR against " << Sym <<
"\n");
3315 Chain.
getOpcode() == ISD::CALLSEQ_START;
3319 unsigned ReservedArgArea =
3320 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3321 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3323 CCInfo.AnalyzeCallOperands(Outs,
CC_Mips);
3326 unsigned StackSize = CCInfo.getStackSize();
3336 bool InternalLinkage =
false;
3338 IsTailCall = isEligibleForTailCallOptimization(
3341 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3342 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3343 G->getGlobal()->hasPrivateLinkage() ||
3344 G->getGlobal()->hasHiddenVisibility() ||
3345 G->getGlobal()->hasProtectedVisibility());
3350 "site marked musttail");
3359 StackSize =
alignTo(StackSize, StackAlignment);
3361 if (!(IsTailCall || MemcpyInByVal))
3367 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3370 CCInfo.rewindByValRegsInfo();
3373 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3374 SDValue Arg = OutVals[OutIdx];
3375 CCValAssign &VA = ArgLocs[i];
3377 ISD::ArgFlagsTy
Flags = Outs[OutIdx].Flags;
3378 bool UseUpperBits =
false;
3381 if (
Flags.isByVal()) {
3382 unsigned FirstByValReg, LastByValReg;
3383 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3384 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3387 "ByVal args of size 0 should have been ignored by front-end.");
3388 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3390 "Do not tail-call optimize if there is a byval argument.");
3391 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3392 FirstByValReg, LastByValReg, Flags,
Subtarget.isLittle(),
3394 CCInfo.nextInRegsParam();
3404 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3405 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3406 (ValVT == MVT::i64 && LocVT == MVT::f64))
3407 Arg = DAG.
getNode(ISD::BITCAST,
DL, LocVT, Arg);
3408 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3419 Register LocRegHigh = ArgLocs[++i].getLocReg();
3420 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3421 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3427 Arg = DAG.
getNode(ISD::BITCAST,
DL, LocVT, Arg);
3430 UseUpperBits =
true;
3436 UseUpperBits =
true;
3442 UseUpperBits =
true;
3450 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3460 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
3481 Chain, Arg,
DL, IsTailCall, DAG));
3486 if (!MemOpChains.
empty())
3493 EVT Ty =
Callee.getValueType();
3494 bool GlobalOrExternal =
false, IsCallReloc =
false;
3499 if (!
Subtarget.isABICalls() && !IsPIC) {
3509 bool UseLongCalls =
Subtarget.useLongCalls();
3513 if (
F->hasFnAttribute(
"long-call"))
3514 UseLongCalls =
true;
3515 else if (
F->hasFnAttribute(
"short-call"))
3516 UseLongCalls =
false;
3527 G->getGlobal()->hasDLLImportStorageClass()) {
3529 "Windows is the only supported COFF target");
3530 auto PtrInfo = MachinePointerInfo();
3534 const GlobalValue *Val =
G->getGlobal();
3537 if (InternalLinkage)
3553 GlobalOrExternal =
true;
3556 const char *Sym = S->getSymbol();
3572 GlobalOrExternal =
true;
3576 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
3578 getOpndList(
Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3579 IsCallReloc, CLI, Callee, Chain);
3588 Chain = DAG.
getNode(MipsISD::JmpLink,
DL, NodeTys,
Ops);
3595 if (!(MemcpyInByVal)) {
3602 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3608SDValue MipsTargetLowering::LowerCallResult(
3618 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
3621 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3622 CCValAssign &VA = RVLocs[i];
3626 RVLocs[i].getLocVT(), InGlue);
3631 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3720 Val = DAG.
getNode(ISD::BITCAST,
DL, ValVT, Val);
3732SDValue MipsTargetLowering::LowerFormalArguments(
3738 MipsFunctionInfo *MipsFI = MF.
getInfo<MipsFunctionInfo>();
3743 std::vector<SDValue> OutChains;
3749 CCInfo.AllocateStack(
ABI.GetCalleeAllocdArgSizeInBytes(CallConv),
Align(1));
3753 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3755 "Functions with the interrupt attribute cannot have arguments!");
3757 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3759 CCInfo.getInRegsParamsCount() > 0);
3761 unsigned CurArgIdx = 0;
3762 CCInfo.rewindByValRegsInfo();
3764 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3765 CCValAssign &VA = ArgLocs[i];
3766 if (Ins[InsIdx].isOrigArg()) {
3767 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3768 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3771 ISD::ArgFlagsTy
Flags = Ins[InsIdx].Flags;
3774 if (
Flags.isByVal()) {
3775 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3776 unsigned FirstByValReg, LastByValReg;
3777 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3778 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3781 "ByVal args of size 0 should have been ignored by front-end.");
3782 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3783 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3784 FirstByValReg, LastByValReg, VA, CCInfo);
3785 CCInfo.nextInRegsParam();
3805 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3806 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3807 (RegVT == MVT::f64 && ValVT == MVT::i64))
3808 ArgValue = DAG.
getNode(ISD::BITCAST,
DL, ValVT, ArgValue);
3809 else if (
ABI.IsO32() && RegVT == MVT::i32 &&
3810 ValVT == MVT::f64) {
3812 CCValAssign &NextVA = ArgLocs[++i];
3818 ArgValue = DAG.
getNode(MipsISD::BuildPairF64,
DL, MVT::f64,
3819 ArgValue, ArgValue2);
3838 LocVT,
DL, Chain, FIN,
3840 OutChains.push_back(ArgValue.
getValue(1));
3849 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3851 if (ArgLocs[i].needsCustom()) {
3859 if (Ins[InsIdx].
Flags.isSRet()) {
3873 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3877 if (!OutChains.empty()) {
3878 OutChains.push_back(Chain);
3895 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs,
Context);
3896 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3899bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
Type *Ty,
3900 bool IsSigned)
const {
3912 MipsFunctionInfo *MipsFI = MF.
getInfo<MipsFunctionInfo>();
3916 return DAG.
getNode(MipsISD::ERet,
DL, MVT::Other, RetOps);
3931 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.
getContext());
3934 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3940 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3942 CCValAssign &VA = RVLocs[i];
3944 bool UseUpperBits =
false;
3955 UseUpperBits =
true;
3961 UseUpperBits =
true;
3967 UseUpperBits =
true;
3975 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3994 MipsFunctionInfo *MipsFI = MF.
getInfo<MipsFunctionInfo>();
4001 unsigned V0 =
ABI.IsN64() ? Mips::V0_64 : Mips::V0;
4016 return LowerInterruptReturn(RetOps,
DL, DAG);
4019 return DAG.
getNode(MipsISD::Ret,
DL, MVT::Other, RetOps);
4029MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
4041 if (Constraint.
size() == 1) {
4042 switch (Constraint[0]) {
4056 if (Constraint ==
"ZC")
4066MipsTargetLowering::getSingleConstraintMatchWeight(
4067 AsmOperandInfo &
info,
const char *constraint)
const {
4069 Value *CallOperandVal =
info.CallOperandVal;
4072 if (!CallOperandVal)
4076 switch (*constraint) {
4120 unsigned long long &
Reg) {
4121 if (
C.front() !=
'{' ||
C.back() !=
'}')
4122 return std::make_pair(
false,
false);
4126 I = std::find_if(
B,
E, isdigit);
4132 return std::make_pair(
true,
false);
4143 return VT.
bitsLT(MinVT) ? MinVT : VT;
4146std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4152 unsigned long long Reg;
4157 return std::make_pair(0U,
nullptr);
4159 if ((Prefix ==
"hi" || Prefix ==
"lo")) {
4162 return std::make_pair(0U,
nullptr);
4164 RC =
TRI->getRegClass(Prefix ==
"hi" ?
4165 Mips::HI32RegClassID : Mips::LO32RegClassID);
4166 return std::make_pair(*(RC->
begin()), RC);
4167 }
else if (Prefix.starts_with(
"$msa")) {
4172 return std::make_pair(0U,
nullptr);
4175 .
Case(
"$msair", Mips::MSAIR)
4176 .
Case(
"$msacsr", Mips::MSACSR)
4177 .
Case(
"$msaaccess", Mips::MSAAccess)
4178 .
Case(
"$msasave", Mips::MSASave)
4179 .
Case(
"$msamodify", Mips::MSAModify)
4180 .
Case(
"$msarequest", Mips::MSARequest)
4181 .
Case(
"$msamap", Mips::MSAMap)
4182 .
Case(
"$msaunmap", Mips::MSAUnmap)
4186 return std::make_pair(0U,
nullptr);
4188 RC =
TRI->getRegClass(Mips::MSACtrlRegClassID);
4189 return std::make_pair(
Reg, RC);
4193 return std::make_pair(0U,
nullptr);
4195 if (Prefix ==
"$f") {
4200 if (VT == MVT::Other) {
4204 VT = (
Subtarget.isFP64bit() || !(
Reg % 2)) ? MVT::f64 : MVT::f32;
4209 if (RC == &Mips::AFGR64RegClass) {
4213 }
else if (Prefix ==
"$fcc")
4214 RC =
TRI->getRegClass(Mips::FCCRegClassID);
4215 else if (Prefix ==
"$w") {
4223 return std::make_pair(*(RC->
begin() +
Reg), RC);
4229std::pair<unsigned, const TargetRegisterClass *>
4233 if (Constraint.
size() == 1) {
4234 switch (Constraint[0]) {
4238 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4240 (VT == MVT::f32 &&
Subtarget.useSoftFloat())) {
4242 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4243 return std::make_pair(0U, &Mips::GPR32RegClass);
4245 if ((VT == MVT::i64 || (VT == MVT::f64 &&
Subtarget.useSoftFloat()) ||
4246 (VT == MVT::f64 &&
Subtarget.isSingleFloat())) &&
4248 return std::make_pair(0U, &Mips::GPR32RegClass);
4249 if ((VT == MVT::i64 || (VT == MVT::f64 &&
Subtarget.useSoftFloat()) ||
4250 (VT == MVT::f64 &&
Subtarget.isSingleFloat())) &&
4252 return std::make_pair(0U, &Mips::GPR64RegClass);
4254 return std::make_pair(0U,
nullptr);
4256 if (VT == MVT::v16i8)
4257 return std::make_pair(0U, &Mips::MSA128BRegClass);
4258 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4259 return std::make_pair(0U, &Mips::MSA128HRegClass);
4260 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4261 return std::make_pair(0U, &Mips::MSA128WRegClass);
4262 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4263 return std::make_pair(0U, &Mips::MSA128DRegClass);
4264 else if (VT == MVT::f32)
4265 return std::make_pair(0U, &Mips::FGR32RegClass);
4266 else if ((VT == MVT::f64) && (!
Subtarget.isSingleFloat())) {
4268 return std::make_pair(0U, &Mips::FGR64RegClass);
4269 return std::make_pair(0U, &Mips::AFGR64RegClass);
4274 return std::make_pair((
unsigned)Mips::T9, &Mips::GPR32RegClass);
4276 return std::make_pair((
unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4278 return std::make_pair(0U,
nullptr);
4281 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4282 return std::make_pair((
unsigned)Mips::LO0, &Mips::LO32RegClass);
4283 return std::make_pair((
unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4288 return std::make_pair(0U,
nullptr);
4292 if (!Constraint.
empty()) {
4293 std::pair<unsigned, const TargetRegisterClass *>
R;
4294 R = parseRegForInlineAsmConstraint(Constraint, VT);
4305void MipsTargetLowering::LowerAsmOperandForConstraint(
SDValue Op,
4307 std::vector<SDValue> &
Ops,
4313 if (Constraint.
size() > 1)
4316 char ConstraintLetter = Constraint[0];
4317 switch (ConstraintLetter) {
4322 EVT
Type =
Op.getValueType();
4323 int64_t Val =
C->getSExtValue();
4332 EVT
Type =
Op.getValueType();
4333 int64_t Val =
C->getZExtValue();
4342 EVT
Type =
Op.getValueType();
4343 uint64_t Val =
C->getZExtValue();
4352 EVT
Type =
Op.getValueType();
4353 int64_t Val =
C->getSExtValue();
4354 if ((
isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4362 EVT
Type =
Op.getValueType();
4363 int64_t Val =
C->getSExtValue();
4364 if ((Val >= -65535) && (Val <= -1)) {
4372 EVT
Type =
Op.getValueType();
4373 int64_t Val =
C->getSExtValue();
4382 EVT
Type =
Op.getValueType();
4383 int64_t Val =
C->getSExtValue();
4384 if ((Val <= 65535) && (Val >= 1)) {
4393 Ops.push_back(Result);
4400bool MipsTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
4428EVT MipsTargetLowering::getOptimalMemOpType(
4430 const AttributeList &FuncAttributes)
const {
4437bool MipsTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
4438 bool ForCodeSize)
const {
4439 if (VT != MVT::f32 && VT != MVT::f64)
4441 if (
Imm.isNegZero())
4443 return Imm.isZero();
4446bool MipsTargetLowering::isLegalICmpImmediate(int64_t Imm)
const {
4450bool MipsTargetLowering::isLegalAddImmediate(int64_t Imm)
const {
4462SDValue MipsTargetLowering::getPICJumpTableRelocBase(
SDValue Table,
4473void MipsTargetLowering::copyByValRegs(
4477 unsigned FirstReg,
unsigned LastReg,
const CCValAssign &VA,
4481 unsigned GPRSizeInBytes =
Subtarget.getGPRSizeInBytes();
4482 unsigned NumRegs = LastReg - FirstReg;
4483 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4484 unsigned FrameObjSize = std::max(
Flags.getByValSize(), RegAreaSize);
4491 (int)((ByValArgRegs.
size() - FirstReg) * GPRSizeInBytes);
4513 for (
unsigned I = 0;
I < NumRegs; ++
I) {
4514 unsigned ArgReg = ByValArgRegs[FirstReg +
I];
4515 unsigned VReg =
addLiveIn(MF, ArgReg, RC);
4516 unsigned Offset =
I * GPRSizeInBytes;
4520 StorePtr, MachinePointerInfo(FuncArg,
Offset));
4521 OutChains.push_back(Store);
4526void MipsTargetLowering::passByValArg(
4528 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4533 unsigned ByValSizeInBytes =
Flags.getByValSize();
4534 unsigned OffsetInBytes = 0;
4535 unsigned RegSizeInBytes =
Subtarget.getGPRSizeInBytes();
4537 std::min(
Flags.getNonZeroByValAlign(),
Align(RegSizeInBytes));
4540 unsigned NumRegs = LastReg - FirstReg;
4544 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4548 for (;
I < NumRegs - LeftoverBytes; ++
I, OffsetInBytes += RegSizeInBytes) {
4552 MachinePointerInfo(), Alignment);
4554 unsigned ArgReg = ArgRegs[FirstReg +
I];
4555 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4559 if (ByValSizeInBytes == OffsetInBytes)
4563 if (LeftoverBytes) {
4566 for (
unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4567 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4568 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4570 if (RemainingSizeInBytes < LoadSizeInBytes)
4586 Shamt = TotalBytesLoaded * 8;
4588 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4598 OffsetInBytes += LoadSizeInBytes;
4599 TotalBytesLoaded += LoadSizeInBytes;
4600 Alignment = std::min(Alignment,
Align(LoadSizeInBytes));
4603 unsigned ArgReg = ArgRegs[FirstReg +
I];
4604 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4610 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4617 Align(Alignment),
false,
false,
4618 nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
4622void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4628 unsigned RegSizeInBytes =
Subtarget.getGPRSizeInBytes();
4633 MipsFunctionInfo *MipsFI = MF.
getInfo<MipsFunctionInfo>();
4638 if (ArgRegs.
size() == Idx)
4643 (int)(RegSizeInBytes * (ArgRegs.
size() - Idx));
4655 for (
unsigned I = Idx;
I < ArgRegs.
size();
4656 ++
I, VaArgOffset += RegSizeInBytes) {
4662 DAG.
getStore(Chain,
DL, ArgValue, PtrOff, MachinePointerInfo());
4665 OutChains.push_back(Store);
4670 Align Alignment)
const {
4673 assert(
Size &&
"Byval argument's size shouldn't be 0.");
4677 unsigned FirstReg = 0;
4678 unsigned NumRegs = 0;
4681 unsigned RegSizeInBytes =
Subtarget.getGPRSizeInBytes();
4690 Alignment >=
Align(RegSizeInBytes) &&
4691 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4693 FirstReg = State->getFirstUnallocated(IntArgRegs);
4699 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4700 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4706 for (
unsigned I = FirstReg;
Size > 0 && (
I < IntArgRegs.
size());
4707 Size -= RegSizeInBytes, ++
I, ++NumRegs)
4708 State->AllocateReg(IntArgRegs[
I], ShadowRegs[
I]);
4711 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4717 unsigned Opc)
const {
4719 "Subtarget already supports SELECT nodes with the use of"
4720 "conditional-move instructions.");
4743 F->insert(It, copy0MBB);
4744 F->insert(It, sinkMBB);
4787 MI.eraseFromParent();
4796 "Subtarget already supports SELECT nodes with the use of"
4797 "conditional-move instructions.");
4816 MachineBasicBlock *thisMBB = BB;
4818 MachineBasicBlock *copy0MBB =
F->CreateMachineBasicBlock(LLVM_BB);
4819 MachineBasicBlock *sinkMBB =
F->CreateMachineBasicBlock(LLVM_BB);
4821 F->insert(It, sinkMBB);
4863 MI.eraseFromParent();
4876 .
Case(
"$28", Mips::GP_64)
4877 .
Case(
"sp", Mips::SP_64)
4883 .
Case(
"$28", Mips::GP)
4884 .
Case(
"sp", Mips::SP)
4899 unsigned Imm =
MI.getOperand(2).getImm();
4905 Register Temp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4914 Register LoadHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4915 Register LoadFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4916 Register Undef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4921 .
addImm(Imm + (IsLittle ? 0 : 3))
4926 .
addImm(Imm + (IsLittle ? 3 : 0))
4931 MI.eraseFromParent();
4940 const bool IsLittle =
Subtarget.isLittle();
4945 unsigned Imm =
MI.getOperand(2).getImm();
4952 Register Temp =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
4959 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4960 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4961 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4965 .
addImm(Imm + (IsLittle ? 0 : 4));
4969 .
addImm(Imm + (IsLittle ? 4 : 0));
4979 Register LoHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4980 Register LoFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4981 Register LoUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4982 Register HiHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4983 Register HiFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4984 Register HiUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4985 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4990 .
addImm(Imm + (IsLittle ? 0 : 7))
4995 .
addImm(Imm + (IsLittle ? 3 : 4))
5001 .
addImm(Imm + (IsLittle ? 4 : 3))
5006 .
addImm(Imm + (IsLittle ? 7 : 0))
5015 MI.eraseFromParent();
5024 const bool IsLittle =
Subtarget.isLittle();
5027 Register StoreVal =
MI.getOperand(0).getReg();
5029 unsigned Imm =
MI.getOperand(2).getImm();
5035 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5036 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5049 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5057 .
addImm(Imm + (IsLittle ? 0 : 3));
5061 .
addImm(Imm + (IsLittle ? 3 : 0));
5064 MI.eraseFromParent();
5074 const bool IsLittle =
Subtarget.isLittle();
5077 Register StoreVal =
MI.getOperand(0).getReg();
5079 unsigned Imm =
MI.getOperand(2).getImm();
5086 Register BitcastD =
MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5087 Register Lo =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
5100 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5101 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5102 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5117 .
addImm(Imm + (IsLittle ? 0 : 4));
5121 .
addImm(Imm + (IsLittle ? 4 : 0));
5127 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5128 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5141 .
addImm(Imm + (IsLittle ? 0 : 3));
5145 .
addImm(Imm + (IsLittle ? 3 : 0));
5149 .
addImm(Imm + (IsLittle ? 4 : 7));
5153 .
addImm(Imm + (IsLittle ? 7 : 4));
5156 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
static SDValue performSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
If the operand is a bitwise AND with a constant RHS, and the shift has a constant RHS and is the only...
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
cl::opt< bool > EmitJalrReloc
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSignExtendCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const MCPhysReg F32Regs[64]
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
uint64_t getZExtValue() const
int64_t getSExtValue() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
const Argument * const_arg_iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
bool hasDLLImportStorageClass() const
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasInternalLinkage() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
int getVarArgsFrameIndex() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool isSingleFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue getDllimportVariable(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, SDValue Chain, const MachinePointerInfo &PtrInfo) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue getDllimportSymbol(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
const char * const_iterator
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual bool useSoftFloat() const
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ BasicBlock
Various leaf nodes.
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ MO_TLSGD
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const