80#define DEBUG_TYPE "mips-lower"
86 cl::desc(
"MIPS: Don't trap on integer division by zero."),
92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
124 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
129 return NumIntermediates;
145 unsigned Flag)
const {
151 unsigned Flag)
const {
157 unsigned Flag)
const {
163 unsigned Flag)
const {
169 unsigned Flag)
const {
171 N->getOffset(), Flag);
564 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
584 EVT Ty =
N->getValueType(0);
585 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
586 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
592 N->getOperand(0),
N->getOperand(1));
597 if (
N->hasAnyUseOfValue(0)) {
606 if (
N->hasAnyUseOfValue(1)) {
648 "Illegal Condition Code");
662 if (!
LHS.getValueType().isFloatingPoint())
774 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
790 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
791 return DAG.
getNode(Opc,
SDLoc(
N), ValueIfFalse.getValueType(),
792 ValueIfFalse, FCC, ValueIfTrue, Glue);
801 SDValue FirstOperand =
N->getOperand(0);
802 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
804 EVT ValTy =
N->getValueType(0);
808 unsigned SMPos, SMSize;
814 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
824 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
844 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
849 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
871 NewOperand = FirstOperand;
873 return DAG.
getNode(Opc,
DL, ValTy, NewOperand,
884 SDValue FirstOperand =
N->getOperand(0), SecondOperand =
N->getOperand(1);
885 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
889 SecondOperand.getOpcode() ==
ISD::SHL) ||
891 SecondOperand.getOpcode() ==
ISD::AND)) {
902 ? SecondOperand.getOperand(0)
907 if (!(CN = dyn_cast<ConstantSDNode>(AndMask)) ||
912 ? SecondOperand.getOperand(1)
914 if (!(CN = dyn_cast<ConstantSDNode>(ShlShift)))
918 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
922 EVT ValTy =
N->getValueType(0);
923 SMPos1 = ShlShiftValue;
925 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
939 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
944 if (SecondOperand.getOpcode() ==
ISD::AND &&
945 SecondOperand.getOperand(0).getOpcode() ==
ISD::SHL) {
947 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand.getOperand(1))) ||
952 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
957 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
964 EVT ValTy =
N->getValueType(0);
965 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
978 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
979 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
980 (SMSize0 + SMPos0 <= 32))) {
983 if (SecondOperand.getOpcode() ==
ISD::AND) {
984 if (!(CN1 = dyn_cast<ConstantSDNode>(SecondOperand->getOperand(1))))
987 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
996 EVT ValTy =
N->getOperand(0)->getValueType(0);
1002 SecondOperand, Const1);
1071 if (!Mult.hasOneUse())
1079 SDValue MultLHS = Mult->getOperand(0);
1080 SDValue MultRHS = Mult->getOperand(1);
1087 if (!IsSigned && !IsUnsigned)
1093 std::tie(BottomHalf, TopHalf) =
1120 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1135 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1153 EVT ValTy =
N->getValueType(0);
1171 SDValue FirstOperand =
N->getOperand(0);
1172 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1173 SDValue SecondOperand =
N->getOperand(1);
1174 EVT ValTy =
N->getValueType(0);
1178 unsigned SMPos, SMSize;
1183 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1195 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1201 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1216 unsigned Opc =
N->getOpcode();
1255 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1256 return C->getAPIntValue().ule(15);
1264 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1266 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1267 "Expected shift-shift mask");
1269 if (
N->getOperand(0).getValueType().isVector())
1284 switch (
Op.getOpcode())
1299 return lowerFCANONICALIZE(
Op, DAG);
1333 bool Is64Bit,
bool IsMicroMips) {
1342 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1363 switch (
MI.getOpcode()) {
1366 case Mips::ATOMIC_LOAD_ADD_I8:
1367 return emitAtomicBinaryPartword(
MI, BB, 1);
1368 case Mips::ATOMIC_LOAD_ADD_I16:
1369 return emitAtomicBinaryPartword(
MI, BB, 2);
1370 case Mips::ATOMIC_LOAD_ADD_I32:
1371 return emitAtomicBinary(
MI, BB);
1372 case Mips::ATOMIC_LOAD_ADD_I64:
1373 return emitAtomicBinary(
MI, BB);
1375 case Mips::ATOMIC_LOAD_AND_I8:
1376 return emitAtomicBinaryPartword(
MI, BB, 1);
1377 case Mips::ATOMIC_LOAD_AND_I16:
1378 return emitAtomicBinaryPartword(
MI, BB, 2);
1379 case Mips::ATOMIC_LOAD_AND_I32:
1380 return emitAtomicBinary(
MI, BB);
1381 case Mips::ATOMIC_LOAD_AND_I64:
1382 return emitAtomicBinary(
MI, BB);
1384 case Mips::ATOMIC_LOAD_OR_I8:
1385 return emitAtomicBinaryPartword(
MI, BB, 1);
1386 case Mips::ATOMIC_LOAD_OR_I16:
1387 return emitAtomicBinaryPartword(
MI, BB, 2);
1388 case Mips::ATOMIC_LOAD_OR_I32:
1389 return emitAtomicBinary(
MI, BB);
1390 case Mips::ATOMIC_LOAD_OR_I64:
1391 return emitAtomicBinary(
MI, BB);
1393 case Mips::ATOMIC_LOAD_XOR_I8:
1394 return emitAtomicBinaryPartword(
MI, BB, 1);
1395 case Mips::ATOMIC_LOAD_XOR_I16:
1396 return emitAtomicBinaryPartword(
MI, BB, 2);
1397 case Mips::ATOMIC_LOAD_XOR_I32:
1398 return emitAtomicBinary(
MI, BB);
1399 case Mips::ATOMIC_LOAD_XOR_I64:
1400 return emitAtomicBinary(
MI, BB);
1402 case Mips::ATOMIC_LOAD_NAND_I8:
1403 return emitAtomicBinaryPartword(
MI, BB, 1);
1404 case Mips::ATOMIC_LOAD_NAND_I16:
1405 return emitAtomicBinaryPartword(
MI, BB, 2);
1406 case Mips::ATOMIC_LOAD_NAND_I32:
1407 return emitAtomicBinary(
MI, BB);
1408 case Mips::ATOMIC_LOAD_NAND_I64:
1409 return emitAtomicBinary(
MI, BB);
1411 case Mips::ATOMIC_LOAD_SUB_I8:
1412 return emitAtomicBinaryPartword(
MI, BB, 1);
1413 case Mips::ATOMIC_LOAD_SUB_I16:
1414 return emitAtomicBinaryPartword(
MI, BB, 2);
1415 case Mips::ATOMIC_LOAD_SUB_I32:
1416 return emitAtomicBinary(
MI, BB);
1417 case Mips::ATOMIC_LOAD_SUB_I64:
1418 return emitAtomicBinary(
MI, BB);
1420 case Mips::ATOMIC_SWAP_I8:
1421 return emitAtomicBinaryPartword(
MI, BB, 1);
1422 case Mips::ATOMIC_SWAP_I16:
1423 return emitAtomicBinaryPartword(
MI, BB, 2);
1424 case Mips::ATOMIC_SWAP_I32:
1425 return emitAtomicBinary(
MI, BB);
1426 case Mips::ATOMIC_SWAP_I64:
1427 return emitAtomicBinary(
MI, BB);
1429 case Mips::ATOMIC_CMP_SWAP_I8:
1430 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1431 case Mips::ATOMIC_CMP_SWAP_I16:
1432 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1433 case Mips::ATOMIC_CMP_SWAP_I32:
1434 return emitAtomicCmpSwap(
MI, BB);
1435 case Mips::ATOMIC_CMP_SWAP_I64:
1436 return emitAtomicCmpSwap(
MI, BB);
1438 case Mips::ATOMIC_LOAD_MIN_I8:
1439 return emitAtomicBinaryPartword(
MI, BB, 1);
1440 case Mips::ATOMIC_LOAD_MIN_I16:
1441 return emitAtomicBinaryPartword(
MI, BB, 2);
1442 case Mips::ATOMIC_LOAD_MIN_I32:
1443 return emitAtomicBinary(
MI, BB);
1444 case Mips::ATOMIC_LOAD_MIN_I64:
1445 return emitAtomicBinary(
MI, BB);
1447 case Mips::ATOMIC_LOAD_MAX_I8:
1448 return emitAtomicBinaryPartword(
MI, BB, 1);
1449 case Mips::ATOMIC_LOAD_MAX_I16:
1450 return emitAtomicBinaryPartword(
MI, BB, 2);
1451 case Mips::ATOMIC_LOAD_MAX_I32:
1452 return emitAtomicBinary(
MI, BB);
1453 case Mips::ATOMIC_LOAD_MAX_I64:
1454 return emitAtomicBinary(
MI, BB);
1456 case Mips::ATOMIC_LOAD_UMIN_I8:
1457 return emitAtomicBinaryPartword(
MI, BB, 1);
1458 case Mips::ATOMIC_LOAD_UMIN_I16:
1459 return emitAtomicBinaryPartword(
MI, BB, 2);
1460 case Mips::ATOMIC_LOAD_UMIN_I32:
1461 return emitAtomicBinary(
MI, BB);
1462 case Mips::ATOMIC_LOAD_UMIN_I64:
1463 return emitAtomicBinary(
MI, BB);
1465 case Mips::ATOMIC_LOAD_UMAX_I8:
1466 return emitAtomicBinaryPartword(
MI, BB, 1);
1467 case Mips::ATOMIC_LOAD_UMAX_I16:
1468 return emitAtomicBinaryPartword(
MI, BB, 2);
1469 case Mips::ATOMIC_LOAD_UMAX_I32:
1470 return emitAtomicBinary(
MI, BB);
1471 case Mips::ATOMIC_LOAD_UMAX_I64:
1472 return emitAtomicBinary(
MI, BB);
1474 case Mips::PseudoSDIV:
1475 case Mips::PseudoUDIV:
1482 case Mips::SDIV_MM_Pseudo:
1483 case Mips::UDIV_MM_Pseudo:
1486 case Mips::DIV_MMR6:
1487 case Mips::DIVU_MMR6:
1488 case Mips::MOD_MMR6:
1489 case Mips::MODU_MMR6:
1491 case Mips::PseudoDSDIV:
1492 case Mips::PseudoDUDIV:
1499 case Mips::PseudoSELECT_I:
1500 case Mips::PseudoSELECT_I64:
1501 case Mips::PseudoSELECT_S:
1502 case Mips::PseudoSELECT_D32:
1503 case Mips::PseudoSELECT_D64:
1504 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1505 case Mips::PseudoSELECTFP_F_I:
1506 case Mips::PseudoSELECTFP_F_I64:
1507 case Mips::PseudoSELECTFP_F_S:
1508 case Mips::PseudoSELECTFP_F_D32:
1509 case Mips::PseudoSELECTFP_F_D64:
1510 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1511 case Mips::PseudoSELECTFP_T_I:
1512 case Mips::PseudoSELECTFP_T_I64:
1513 case Mips::PseudoSELECTFP_T_S:
1514 case Mips::PseudoSELECTFP_T_D32:
1515 case Mips::PseudoSELECTFP_T_D64:
1516 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1517 case Mips::PseudoD_SELECT_I:
1518 case Mips::PseudoD_SELECT_I64:
1519 return emitPseudoD_SELECT(
MI, BB);
1521 return emitLDR_W(
MI, BB);
1523 return emitLDR_D(
MI, BB);
1525 return emitSTR_W(
MI, BB);
1527 return emitSTR_D(
MI, BB);
1543 bool NeedsAdditionalReg =
false;
1544 switch (
MI.getOpcode()) {
1545 case Mips::ATOMIC_LOAD_ADD_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1548 case Mips::ATOMIC_LOAD_SUB_I32:
1549 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1551 case Mips::ATOMIC_LOAD_AND_I32:
1552 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1554 case Mips::ATOMIC_LOAD_OR_I32:
1555 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1557 case Mips::ATOMIC_LOAD_XOR_I32:
1558 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1560 case Mips::ATOMIC_LOAD_NAND_I32:
1561 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1563 case Mips::ATOMIC_SWAP_I32:
1564 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1566 case Mips::ATOMIC_LOAD_ADD_I64:
1567 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1569 case Mips::ATOMIC_LOAD_SUB_I64:
1570 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1572 case Mips::ATOMIC_LOAD_AND_I64:
1573 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1575 case Mips::ATOMIC_LOAD_OR_I64:
1576 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1578 case Mips::ATOMIC_LOAD_XOR_I64:
1579 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1581 case Mips::ATOMIC_LOAD_NAND_I64:
1582 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1584 case Mips::ATOMIC_SWAP_I64:
1585 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1587 case Mips::ATOMIC_LOAD_MIN_I32:
1588 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1589 NeedsAdditionalReg =
true;
1591 case Mips::ATOMIC_LOAD_MAX_I32:
1592 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1593 NeedsAdditionalReg =
true;
1595 case Mips::ATOMIC_LOAD_UMIN_I32:
1596 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1597 NeedsAdditionalReg =
true;
1599 case Mips::ATOMIC_LOAD_UMAX_I32:
1600 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1601 NeedsAdditionalReg =
true;
1603 case Mips::ATOMIC_LOAD_MIN_I64:
1604 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1605 NeedsAdditionalReg =
true;
1607 case Mips::ATOMIC_LOAD_MAX_I64:
1608 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1609 NeedsAdditionalReg =
true;
1611 case Mips::ATOMIC_LOAD_UMIN_I64:
1612 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1613 NeedsAdditionalReg =
true;
1615 case Mips::ATOMIC_LOAD_UMAX_I64:
1616 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1617 NeedsAdditionalReg =
true;
1678 if (NeedsAdditionalReg) {
1685 MI.eraseFromParent();
1692 unsigned SrcReg)
const {
1712 int64_t ShiftImm = 32 - (
Size * 8);
1723 "Unsupported size for EmitAtomicBinaryPartial.");
1750 unsigned AtomicOp = 0;
1751 bool NeedsAdditionalReg =
false;
1752 switch (
MI.getOpcode()) {
1753 case Mips::ATOMIC_LOAD_NAND_I8:
1754 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1756 case Mips::ATOMIC_LOAD_NAND_I16:
1757 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1759 case Mips::ATOMIC_SWAP_I8:
1760 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1762 case Mips::ATOMIC_SWAP_I16:
1763 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1765 case Mips::ATOMIC_LOAD_ADD_I8:
1766 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1768 case Mips::ATOMIC_LOAD_ADD_I16:
1769 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1771 case Mips::ATOMIC_LOAD_SUB_I8:
1772 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1774 case Mips::ATOMIC_LOAD_SUB_I16:
1775 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1777 case Mips::ATOMIC_LOAD_AND_I8:
1778 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1780 case Mips::ATOMIC_LOAD_AND_I16:
1781 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1783 case Mips::ATOMIC_LOAD_OR_I8:
1784 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1786 case Mips::ATOMIC_LOAD_OR_I16:
1787 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1789 case Mips::ATOMIC_LOAD_XOR_I8:
1790 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1792 case Mips::ATOMIC_LOAD_XOR_I16:
1793 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1795 case Mips::ATOMIC_LOAD_MIN_I8:
1796 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1797 NeedsAdditionalReg =
true;
1799 case Mips::ATOMIC_LOAD_MIN_I16:
1800 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1801 NeedsAdditionalReg =
true;
1803 case Mips::ATOMIC_LOAD_MAX_I8:
1804 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1805 NeedsAdditionalReg =
true;
1807 case Mips::ATOMIC_LOAD_MAX_I16:
1808 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1809 NeedsAdditionalReg =
true;
1811 case Mips::ATOMIC_LOAD_UMIN_I8:
1812 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1813 NeedsAdditionalReg =
true;
1815 case Mips::ATOMIC_LOAD_UMIN_I16:
1816 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1817 NeedsAdditionalReg =
true;
1819 case Mips::ATOMIC_LOAD_UMAX_I8:
1820 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1821 NeedsAdditionalReg =
true;
1823 case Mips::ATOMIC_LOAD_UMAX_I16:
1824 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1825 NeedsAdditionalReg =
true;
1854 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1895 if (NeedsAdditionalReg) {
1901 MI.eraseFromParent();
1915 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1916 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1917 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1919 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1927 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1928 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1929 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1944 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1945 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1963 MI.eraseFromParent();
1971 "Unsupported size for EmitAtomicCmpSwapPartial.");
1998 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1999 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
2000 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
2041 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
2042 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
2044 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2087 MI.eraseFromParent();
2112 FCC0, Dest, CondRes);
2134 "Floating point operand expected.");
2145 EVT Ty =
Op.getValueType();
2193 EVT Ty =
Op.getValueType();
2236 Args.push_back(Entry);
2241 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2242 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2288 EVT Ty =
Op.getValueType();
2301 EVT Ty =
Op.getValueType();
2330 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2337 EVT VT =
Node->getValueType(0);
2342 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2369 unsigned ArgSizeInBytes =
2385 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2394 bool HasExtractInsert) {
2395 EVT TyX =
Op.getOperand(0).getValueType();
2396 EVT TyY =
Op.getOperand(1).getValueType();
2413 if (HasExtractInsert) {
2431 if (TyX == MVT::f32)
2441 bool HasExtractInsert) {
2442 unsigned WidthX =
Op.getOperand(0).getValueSizeInBits();
2443 unsigned WidthY =
Op.getOperand(1).getValueSizeInBits();
2452 if (HasExtractInsert) {
2458 if (WidthX > WidthY)
2460 else if (WidthY > WidthX)
2479 if (WidthX > WidthY)
2481 else if (WidthY > WidthX)
2499 bool HasExtractInsert)
const {
2511 Op.getOperand(0), Const1);
2514 if (HasExtractInsert)
2525 if (
Op.getValueType() == MVT::f32)
2539 bool HasExtractInsert)
const {
2550 if (HasExtractInsert)
2572 EVT VT =
Op.getValueType();
2586 if (
Op.getConstantOperandVal(0) != 0) {
2588 "return address can be determined only for current frame");
2594 EVT VT =
Op.getValueType();
2607 if (
Op.getConstantOperandVal(0) != 0) {
2609 "return address can be determined only for current frame");
2615 MVT VT =
Op.getSimpleValueType();
2616 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2642 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2643 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2733 DL, VTList,
Cond, ShiftRightHi,
2749 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2750 EVT BasePtrVT =
Ptr.getValueType();
2760 LD->getMemOperand());
2766 EVT MemVT = LD->getMemoryVT();
2772 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2773 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2777 EVT VT =
Op.getValueType();
2781 assert((VT == MVT::i32) || (VT == MVT::i64));
2824 SDValue Ops[] = { SRL, LWR.getValue(1) };
2897 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2909 EVT ValTy =
Op->getValueType(0);
2955 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2961 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2969 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2973 else if (ArgFlags.
isZExt())
2981 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2985 else if (ArgFlags.
isZExt())
2996 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
2999 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
3003 if (ValVT == MVT::i32 && isVectorFloat) {
3010 if (Reg == Mips::A2)
3019 }
else if (ValVT == MVT::i32 ||
3020 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3024 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
3027 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3031 if (Reg == Mips::A1 || Reg == Mips::A3)
3047 if (ValVT == MVT::f32) {
3055 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3074 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3076 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3082 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3084 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3091#include "MipsGenCallingConv.inc"
3094 return CC_Mips_FixedArg;
3106 const SDLoc &
DL,
bool IsTailCall,
3124 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3125 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3138 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3139 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3141 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3150 for (
auto &R : RegsToPass) {
3157 for (
auto &R : RegsToPass)
3164 assert(Mask &&
"Missing call preserved mask for calling convention");
3168 Function *
F =
G->getGlobal()->getParent()->getFunction(
Sym);
3169 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3182 switch (
MI.getOpcode()) {
3186 case Mips::JALRPseudo:
3188 case Mips::JALR64Pseudo:
3189 case Mips::JALR16_MM:
3190 case Mips::JALRC16_MMR6:
3191 case Mips::TAILCALLREG:
3192 case Mips::TAILCALLREG64:
3193 case Mips::TAILCALLR6REG:
3194 case Mips::TAILCALL64R6REG:
3195 case Mips::TAILCALLREG_MM:
3196 case Mips::TAILCALLREG_MMR6: {
3200 Node->getNumOperands() < 1 ||
3201 Node->getOperand(0).getNumOperands() < 2) {
3207 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3210 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3214 if (!isa<Function>(
G->getGlobal())) {
3215 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3216 <<
G->getGlobal()->getName() <<
"\n");
3219 Sym =
G->getGlobal()->getName();
3222 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3223 Sym = ES->getSymbol();
3266 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3298 unsigned ReservedArgArea =
3300 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3306 unsigned StackSize = CCInfo.getStackSize();
3313 bool InternalLinkage =
false;
3315 IsTailCall = isEligibleForTailCallOptimization(
3318 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3319 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3320 G->getGlobal()->hasPrivateLinkage() ||
3321 G->getGlobal()->hasHiddenVisibility() ||
3322 G->getGlobal()->hasProtectedVisibility());
3327 "site marked musttail");
3336 StackSize =
alignTo(StackSize, StackAlignment);
3338 if (!(IsTailCall || MemcpyInByVal))
3345 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3348 CCInfo.rewindByValRegsInfo();
3351 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3352 SDValue Arg = OutVals[OutIdx];
3356 bool UseUpperBits =
false;
3359 if (
Flags.isByVal()) {
3360 unsigned FirstByValReg, LastByValReg;
3361 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3362 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3365 "ByVal args of size 0 should have been ignored by front-end.");
3366 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3368 "Do not tail-call optimize if there is a byval argument.");
3369 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3372 CCInfo.nextInRegsParam();
3382 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3383 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3384 (ValVT == MVT::i64 && LocVT == MVT::f64))
3386 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3397 Register LocRegHigh = ArgLocs[++i].getLocReg();
3398 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3399 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3408 UseUpperBits =
true;
3414 UseUpperBits =
true;
3420 UseUpperBits =
true;
3428 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3438 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
3459 Chain, Arg,
DL, IsTailCall, DAG));
3464 if (!MemOpChains.
empty())
3472 bool GlobalOrExternal =
false, IsCallReloc =
false;
3481 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3486 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3490 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3491 if (
F->hasFnAttribute(
"long-call"))
3492 UseLongCalls =
true;
3493 else if (
F->hasFnAttribute(
"short-call"))
3494 UseLongCalls =
false;
3508 if (InternalLinkage)
3524 GlobalOrExternal =
true;
3527 const char *
Sym = S->getSymbol();
3543 GlobalOrExternal =
true;
3549 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3550 IsCallReloc, CLI, Callee, Chain);
3566 if (!(MemcpyInByVal)) {
3573 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3579SDValue MipsTargetLowering::LowerCallResult(
3590 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.
Callee.
getNode());
3591 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.
RetTy,
3595 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3600 RVLocs[i].getLocVT(), InGlue);
3605 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3706SDValue MipsTargetLowering::LowerFormalArguments(
3717 std::vector<SDValue> OutChains;
3727 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3729 "Functions with the interrupt attribute cannot have arguments!");
3731 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3733 CCInfo.getInRegsParamsCount() > 0);
3735 unsigned CurArgIdx = 0;
3736 CCInfo.rewindByValRegsInfo();
3738 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3740 if (Ins[InsIdx].isOrigArg()) {
3741 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3742 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3748 if (
Flags.isByVal()) {
3749 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3750 unsigned FirstByValReg, LastByValReg;
3751 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3752 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3755 "ByVal args of size 0 should have been ignored by front-end.");
3756 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3757 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3758 FirstByValReg, LastByValReg, VA, CCInfo);
3759 CCInfo.nextInRegsParam();
3779 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3780 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3781 (RegVT == MVT::f64 && ValVT == MVT::i64))
3783 else if (
ABI.
IsO32() && RegVT == MVT::i32 &&
3784 ValVT == MVT::f64) {
3793 ArgValue, ArgValue2);
3812 LocVT,
DL, Chain, FIN,
3814 OutChains.push_back(ArgValue.
getValue(1));
3823 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3825 if (ArgLocs[i].needsCustom()) {
3833 if (Ins[InsIdx].
Flags.isSRet()) {
3847 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3851 if (!OutChains.empty()) {
3852 OutChains.push_back(Chain);
3869 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3870 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3873bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
Type *Ty,
3874 bool IsSigned)
const {
3908 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3914 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3918 bool UseUpperBits =
false;
3929 UseUpperBits =
true;
3935 UseUpperBits =
true;
3941 UseUpperBits =
true;
3949 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3975 unsigned V0 =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
3990 return LowerInterruptReturn(RetOps,
DL, DAG);
4003MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
4015 if (Constraint.
size() == 1) {
4016 switch (Constraint[0]) {
4030 if (Constraint ==
"ZC")
4040MipsTargetLowering::getSingleConstraintMatchWeight(
4041 AsmOperandInfo &
info,
const char *constraint)
const {
4043 Value *CallOperandVal =
info.CallOperandVal;
4046 if (!CallOperandVal)
4050 switch (*constraint) {
4079 if (isa<ConstantInt>(CallOperandVal))
4094 unsigned long long &Reg) {
4095 if (
C.front() !=
'{' ||
C.back() !=
'}')
4096 return std::make_pair(
false,
false);
4100 I = std::find_if(
B, E, isdigit);
4106 return std::make_pair(
true,
false);
4117 return VT.
bitsLT(MinVT) ? MinVT : VT;
4120std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4126 unsigned long long Reg;
4131 return std::make_pair(0U,
nullptr);
4133 if ((Prefix ==
"hi" || Prefix ==
"lo")) {
4136 return std::make_pair(0U,
nullptr);
4138 RC =
TRI->getRegClass(Prefix ==
"hi" ?
4139 Mips::HI32RegClassID : Mips::LO32RegClassID);
4140 return std::make_pair(*(RC->
begin()), RC);
4141 }
else if (Prefix.starts_with(
"$msa")) {
4146 return std::make_pair(0U,
nullptr);
4149 .
Case(
"$msair", Mips::MSAIR)
4150 .
Case(
"$msacsr", Mips::MSACSR)
4151 .
Case(
"$msaaccess", Mips::MSAAccess)
4152 .
Case(
"$msasave", Mips::MSASave)
4153 .
Case(
"$msamodify", Mips::MSAModify)
4154 .
Case(
"$msarequest", Mips::MSARequest)
4155 .
Case(
"$msamap", Mips::MSAMap)
4156 .
Case(
"$msaunmap", Mips::MSAUnmap)
4160 return std::make_pair(0U,
nullptr);
4162 RC =
TRI->getRegClass(Mips::MSACtrlRegClassID);
4163 return std::make_pair(Reg, RC);
4167 return std::make_pair(0U,
nullptr);
4169 if (Prefix ==
"$f") {
4172 if (VT == MVT::Other)
4177 if (RC == &Mips::AFGR64RegClass) {
4181 }
else if (Prefix ==
"$fcc")
4182 RC =
TRI->getRegClass(Mips::FCCRegClassID);
4183 else if (Prefix ==
"$w") {
4190 assert(Reg < RC->getNumRegs());
4191 return std::make_pair(*(RC->
begin() + Reg), RC);
4197std::pair<unsigned, const TargetRegisterClass *>
4201 if (Constraint.
size() == 1) {
4202 switch (Constraint[0]) {
4206 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4210 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4211 return std::make_pair(0U, &Mips::GPR32RegClass);
4215 return std::make_pair(0U, &Mips::GPR32RegClass);
4218 return std::make_pair(0U, &Mips::GPR64RegClass);
4220 return std::make_pair(0U,
nullptr);
4222 if (VT == MVT::v16i8)
4223 return std::make_pair(0U, &Mips::MSA128BRegClass);
4224 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4225 return std::make_pair(0U, &Mips::MSA128HRegClass);
4226 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4227 return std::make_pair(0U, &Mips::MSA128WRegClass);
4228 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4229 return std::make_pair(0U, &Mips::MSA128DRegClass);
4230 else if (VT == MVT::f32)
4231 return std::make_pair(0U, &Mips::FGR32RegClass);
4234 return std::make_pair(0U, &Mips::FGR64RegClass);
4235 return std::make_pair(0U, &Mips::AFGR64RegClass);
4240 return std::make_pair((
unsigned)Mips::T9, &Mips::GPR32RegClass);
4242 return std::make_pair((
unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4244 return std::make_pair(0U,
nullptr);
4247 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4248 return std::make_pair((
unsigned)Mips::LO0, &Mips::LO32RegClass);
4249 return std::make_pair((
unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4254 return std::make_pair(0U,
nullptr);
4258 if (!Constraint.
empty()) {
4259 std::pair<unsigned, const TargetRegisterClass *>
R;
4260 R = parseRegForInlineAsmConstraint(Constraint, VT);
4271void MipsTargetLowering::LowerAsmOperandForConstraint(
SDValue Op,
4273 std::vector<SDValue> &Ops,
4279 if (Constraint.
size() > 1)
4282 char ConstraintLetter = Constraint[0];
4283 switch (ConstraintLetter) {
4289 int64_t Val =
C->getSExtValue();
4290 if (isInt<16>(Val)) {
4299 int64_t Val =
C->getZExtValue();
4310 if (isUInt<16>(Val)) {
4319 int64_t Val =
C->getSExtValue();
4320 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4329 int64_t Val =
C->getSExtValue();
4330 if ((Val >= -65535) && (Val <= -1)) {
4339 int64_t Val =
C->getSExtValue();
4340 if ((isInt<15>(Val))) {
4349 int64_t Val =
C->getSExtValue();
4350 if ((Val <= 65535) && (Val >= 1)) {
4359 Ops.push_back(Result);
4366bool MipsTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
4394EVT MipsTargetLowering::getOptimalMemOpType(
4402bool MipsTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
4403 bool ForCodeSize)
const {
4404 if (VT != MVT::f32 && VT != MVT::f64)
4406 if (
Imm.isNegZero())
4408 return Imm.isZero();
4411unsigned MipsTargetLowering::getJumpTableEncoding()
const {
4420bool MipsTargetLowering::useSoftFloat()
const {
4424void MipsTargetLowering::copyByValRegs(
4428 unsigned FirstReg,
unsigned LastReg,
const CCValAssign &VA,
4433 unsigned NumRegs = LastReg - FirstReg;
4434 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4435 unsigned FrameObjSize = std::max(
Flags.getByValSize(), RegAreaSize);
4442 (
int)((ByValArgRegs.
size() - FirstReg) * GPRSizeInBytes);
4464 for (
unsigned I = 0;
I < NumRegs; ++
I) {
4465 unsigned ArgReg = ByValArgRegs[FirstReg +
I];
4466 unsigned VReg =
addLiveIn(MF, ArgReg, RC);
4467 unsigned Offset =
I * GPRSizeInBytes;
4472 OutChains.push_back(Store);
4477void MipsTargetLowering::passByValArg(
4479 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4484 unsigned ByValSizeInBytes =
Flags.getByValSize();
4485 unsigned OffsetInBytes = 0;
4488 std::min(
Flags.getNonZeroByValAlign(),
Align(RegSizeInBytes));
4491 unsigned NumRegs = LastReg - FirstReg;
4495 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4499 for (;
I < NumRegs - LeftoverBytes; ++
I, OffsetInBytes += RegSizeInBytes) {
4505 unsigned ArgReg = ArgRegs[FirstReg +
I];
4506 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4510 if (ByValSizeInBytes == OffsetInBytes)
4514 if (LeftoverBytes) {
4517 for (
unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4518 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4519 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4521 if (RemainingSizeInBytes < LoadSizeInBytes)
4537 Shamt = TotalBytesLoaded * 8;
4539 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4549 OffsetInBytes += LoadSizeInBytes;
4550 TotalBytesLoaded += LoadSizeInBytes;
4551 Alignment = std::min(Alignment,
Align(LoadSizeInBytes));
4554 unsigned ArgReg = ArgRegs[FirstReg +
I];
4555 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4561 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4568 Align(Alignment),
false,
false,
4573void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4594 (
int)(RegSizeInBytes * (ArgRegs.
size() -
Idx));
4606 for (
unsigned I =
Idx;
I < ArgRegs.
size();
4607 ++
I, VaArgOffset += RegSizeInBytes) {
4614 cast<StoreSDNode>(
Store.getNode())->getMemOperand()->setValue(
4616 OutChains.push_back(Store);
4621 Align Alignment)
const {
4624 assert(
Size &&
"Byval argument's size shouldn't be 0.");
4628 unsigned FirstReg = 0;
4629 unsigned NumRegs = 0;
4641 Alignment >=
Align(RegSizeInBytes) &&
4642 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4650 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4651 State->
AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4657 for (
unsigned I = FirstReg;
Size > 0 && (
I < IntArgRegs.
size());
4658 Size -= RegSizeInBytes, ++
I, ++NumRegs)
4668 unsigned Opc)
const {
4670 "Subtarget already supports SELECT nodes with the use of"
4671 "conditional-move instructions.");
4694 F->insert(It, copy0MBB);
4695 F->insert(It, sinkMBB);
4738 MI.eraseFromParent();
4747 "Subtarget already supports SELECT nodes with the use of"
4748 "conditional-move instructions.");
4771 F->insert(It, copy0MBB);
4772 F->insert(It, sinkMBB);
4814 MI.eraseFromParent();
4827 .
Case(
"$28", Mips::GP_64)
4828 .
Case(
"sp", Mips::SP_64)
4834 .
Case(
"$28", Mips::GP)
4835 .
Case(
"sp", Mips::SP)
4853 unsigned Imm =
MI.getOperand(2).getImm();
4859 Register Temp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4868 Register LoadHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4869 Register LoadFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4870 Register Undef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4875 .
addImm(Imm + (IsLittle ? 0 : 3))
4880 .
addImm(Imm + (IsLittle ? 3 : 0))
4885 MI.eraseFromParent();
4899 unsigned Imm =
MI.getOperand(2).getImm();
4906 Register Temp =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
4913 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4914 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4915 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4919 .
addImm(Imm + (IsLittle ? 0 : 4));
4923 .
addImm(Imm + (IsLittle ? 4 : 0));
4933 Register LoHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4934 Register LoFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4935 Register LoUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4936 Register HiHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4937 Register HiFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4938 Register HiUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4939 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4944 .
addImm(Imm + (IsLittle ? 0 : 7))
4949 .
addImm(Imm + (IsLittle ? 3 : 4))
4955 .
addImm(Imm + (IsLittle ? 4 : 3))
4960 .
addImm(Imm + (IsLittle ? 7 : 0))
4969 MI.eraseFromParent();
4981 Register StoreVal =
MI.getOperand(0).getReg();
4983 unsigned Imm =
MI.getOperand(2).getImm();
4989 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4990 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5003 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5011 .
addImm(Imm + (IsLittle ? 0 : 3));
5015 .
addImm(Imm + (IsLittle ? 3 : 0));
5018 MI.eraseFromParent();
5031 Register StoreVal =
MI.getOperand(0).getReg();
5033 unsigned Imm =
MI.getOperand(2).getImm();
5040 Register BitcastD =
MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5041 Register Lo =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
5054 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5055 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5056 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5071 .
addImm(Imm + (IsLittle ? 0 : 4));
5075 .
addImm(Imm + (IsLittle ? 4 : 0));
5081 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5082 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5095 .
addImm(Imm + (IsLittle ? 0 : 3));
5099 .
addImm(Imm + (IsLittle ? 3 : 0));
5103 .
addImm(Imm + (IsLittle ? 4 : 7));
5107 .
addImm(Imm + (IsLittle ? 7 : 4));
5110 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
unsigned const TargetRegisterInfo * TRI
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
cl::opt< bool > EmitJalrReloc
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const MCPhysReg F32Regs[64]
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
const GlobalObject * getAliaseeObject() const
bool hasInternalLinkage() const
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
ArrayRef< MCPhysReg > GetVarArgRegs() const
The registers to use for the variable argument list.
bool ArePtrs64bit() const
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
unsigned GetPtrAddiuOp() const
unsigned GetPtrAndOp() const
ArrayRef< MCPhysReg > GetByValArgRegs() const
The registers to use for byval arguments.
unsigned GetNullPtr() const
bool WasOriginalArgVectorFloat(unsigned ValNo) const
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
int getVarArgsFrameIndex() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
bool inMicroMipsMode() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
bool inAbs2008Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool isSingleFloat() const
bool useLongCalls() const
unsigned getGPRSizeInBytes() const
bool inMips16HardFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SHL
Shift and rotation operations.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ MO_GOT_CALL
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
@ MO_GOTTPREL
MO_GOTTPREL - Represents the offset from the thread pointer (Initial.
@ MO_GOT_HI16
MO_GOT_HI16/LO16, MO_CALL_HI16/LO16 - Relocations used for large GOTs.
@ MO_TLSLDM
MO_TLSLDM - Represents the offset into the global offset table at which.
@ MO_TLSGD
MO_TLSGD - Represents the offset into the global offset table at which.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const