26#include "llvm/IR/IntrinsicsLoongArch.h"
33#define DEBUG_TYPE "loongarch-isel-lowering"
39 cl::desc(
"Trap on integer division by zero."),
216 switch (Op.getOpcode()) {
218 return lowerEH_DWARF_CFA(Op, DAG);
220 return lowerGlobalAddress(Op, DAG);
222 return lowerGlobalTLSAddress(Op, DAG);
224 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
226 return lowerINTRINSIC_W_CHAIN(Op, DAG);
228 return lowerINTRINSIC_VOID(Op, DAG);
230 return lowerBlockAddress(Op, DAG);
232 return lowerJumpTable(Op, DAG);
234 return lowerShiftLeftParts(Op, DAG);
236 return lowerShiftRightParts(Op, DAG,
true);
238 return lowerShiftRightParts(Op, DAG,
false);
240 return lowerConstantPool(Op, DAG);
242 return lowerFP_TO_SINT(Op, DAG);
244 return lowerBITCAST(Op, DAG);
246 return lowerUINT_TO_FP(Op, DAG);
248 return lowerSINT_TO_FP(Op, DAG);
250 return lowerVASTART(Op, DAG);
252 return lowerFRAMEADDR(Op, DAG);
254 return lowerRETURNADDR(Op, DAG);
256 return lowerWRITE_REGISTER(Op, DAG);
264 if (Subtarget.
is64Bit() && Op.getOperand(2).getValueType() ==
MVT::i32) {
266 "On LA64, only 64-bit registers can be written.");
267 return Op.getOperand(0);
270 if (!Subtarget.
is64Bit() && Op.getOperand(2).getValueType() ==
MVT::i64) {
272 "On LA32, only 32-bit registers can be written.");
273 return Op.getOperand(0);
281 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
283 "be a constant integer");
290 EVT VT =
Op.getValueType();
293 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
294 int GRLenInBytes = Subtarget.
getGRLen() / 8;
297 int Offset = -(GRLenInBytes * 2);
312 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
314 "return address can only be determined for the current frame");
348 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
356 !Subtarget.
hasBasicD() &&
"unexpected target features");
361 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
362 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
376 EVT RetVT =
Op.getValueType();
378 MakeLibCallOptions CallOptions;
379 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
382 std::tie(Result, Chain) =
390 !Subtarget.
hasBasicD() &&
"unexpected target features");
401 EVT RetVT =
Op.getValueType();
403 MakeLibCallOptions CallOptions;
404 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
407 std::tie(Result, Chain) =
431 if (
Op.getValueSizeInBits() > 32 && Subtarget.
hasBasicF() &&
465template <
class NodeTy>
467 bool IsLocal)
const {
485 return getAddr(cast<BlockAddressSDNode>(Op), DAG);
490 return getAddr(cast<JumpTableSDNode>(Op), DAG);
495 return getAddr(cast<ConstantPoolSDNode>(Op), DAG);
501 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
502 return getAddr(
N, DAG,
N->getGlobal()->isDSOLocal());
507 unsigned Opc)
const {
522 unsigned Opc)
const {
536 Args.push_back(Entry);
550LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
557 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
565 Addr = getDynamicTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_GD);
570 Addr = getDynamicTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LD);
574 Addr = getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_IE);
579 Addr = getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE);
587LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
589 switch (
Op.getConstantOperandVal(0)) {
592 case Intrinsic::thread_pointer: {
611LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
616 std::string
Name =
Op->getOperationName(0);
617 const StringRef ErrorMsgOOR =
"out of range";
619 switch (
Op.getConstantOperandVal(1)) {
622 case Intrinsic::loongarch_crc_w_b_w:
623 case Intrinsic::loongarch_crc_w_h_w:
624 case Intrinsic::loongarch_crc_w_w_w:
625 case Intrinsic::loongarch_crc_w_d_w:
626 case Intrinsic::loongarch_crcc_w_b_w:
627 case Intrinsic::loongarch_crcc_w_h_w:
628 case Intrinsic::loongarch_crcc_w_w_w:
629 case Intrinsic::loongarch_crcc_w_d_w: {
630 std::string
Name =
Op->getOperationName(0);
634 case Intrinsic::loongarch_csrrd_w:
635 case Intrinsic::loongarch_csrrd_d: {
636 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(2))->getZExtValue();
637 if (!isUInt<14>(Imm))
645 case Intrinsic::loongarch_csrwr_w:
646 case Intrinsic::loongarch_csrwr_d: {
647 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(3))->getZExtValue();
648 if (!isUInt<14>(Imm))
656 case Intrinsic::loongarch_csrxchg_w:
657 case Intrinsic::loongarch_csrxchg_d: {
658 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(4))->getZExtValue();
659 if (!isUInt<14>(Imm))
667 case Intrinsic::loongarch_iocsrrd_d: {
677 "llvm.loongarch.crc.w.d.w requires target: loongarch64");
681#define IOCSRRD_CASE(NAME, NODE) \
682 case Intrinsic::loongarch_##NAME: { \
683 return DAG.getMergeValues( \
684 {DAG.getNode(LoongArchISD::NODE, DL, GRLenVT, Op0, Op.getOperand(2)), \
692 case Intrinsic::loongarch_cpucfg: {
698 case Intrinsic::loongarch_lddir_d: {
699 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(3))->getZExtValue();
700 if (!isUInt<8>(Imm)) {
708 case Intrinsic::loongarch_movfcsr2gr: {
711 "llvm.loongarch.movfcsr2gr expects basic f target feature");
715 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(2))->getZExtValue();
716 if (!isUInt<2>(Imm)) {
738 return Op.getOperand(0);
746 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
748 const StringRef ErrorMsgOOR =
"out of range";
750 switch (IntrinsicEnum) {
754 case Intrinsic::loongarch_cacop_d:
755 case Intrinsic::loongarch_cacop_w: {
756 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit()) {
758 "llvm.loongarch.cacop.d requires target: loongarch64");
759 return Op.getOperand(0);
761 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit()) {
763 "llvm.loongarch.cacop.w requires target: loongarch32");
764 return Op.getOperand(0);
767 unsigned Imm1 = cast<ConstantSDNode>(Op2)->getZExtValue();
768 if (!isUInt<5>(Imm1))
771 int Imm2 = cast<ConstantSDNode>(Op4)->getSExtValue();
772 if (!isInt<12>(Imm2))
778 case Intrinsic::loongarch_dbar: {
779 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
780 if (!isUInt<15>(Imm))
786 case Intrinsic::loongarch_ibar: {
787 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
788 if (!isUInt<15>(Imm))
794 case Intrinsic::loongarch_break: {
795 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
796 if (!isUInt<15>(Imm))
802 case Intrinsic::loongarch_movgr2fcsr: {
805 "llvm.loongarch.movgr2fcsr expects basic f target feature");
808 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
817 case Intrinsic::loongarch_syscall: {
818 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
819 if (!isUInt<15>(Imm))
825#define IOCSRWR_CASE(NAME, NODE) \
826 case Intrinsic::loongarch_##NAME: { \
827 SDValue Op3 = Op.getOperand(3); \
828 if (Subtarget.is64Bit()) \
829 return DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Op0, \
830 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
831 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)); \
833 return DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Op0, Op2, Op3); \
839 case Intrinsic::loongarch_iocsrwr_d: {
846 "llvm.loongarch.iocsrwr.d requires target: loongarch64");
847 return Op.getOperand(0);
850#define ASRT_LE_GT_CASE(NAME) \
851 case Intrinsic::loongarch_##NAME: { \
852 if (!Subtarget.is64Bit()) { \
853 DAG.getContext()->emitError(Op->getOperationName(0) + \
854 " requires target: loongarch64"); \
855 return Op.getOperand(0); \
861#undef ASRT_LE_GT_CASE
862 case Intrinsic::loongarch_ldpte_d: {
863 unsigned Imm = cast<ConstantSDNode>(
Op.getOperand(3))->getZExtValue();
868 " requires target: loongarch64");
869 return Op.getOperand(0);
882 EVT VT =
Lo.getValueType();
922 EVT VT =
Lo.getValueType();
1027 EVT VT =
N->getValueType(0);
1028 switch (
N->getOpcode()) {
1036 "Unexpected custom legalisation");
1044 if ((CN = dyn_cast<ConstantSDNode>(
N->getOperand(1)))) {
1051 "Unexpected custom legalisation");
1065 EVT OpVT = Src.getValueType();
1069 std::tie(Result, Chain) =
1076 EVT SrcVT = Src.getValueType();
1087 "Unexpected custom legalisation");
1090 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
1097 "Unexpected custom legalization");
1119 "Unexpected custom legalization");
1139 "Unexpected custom legalisation");
1145 EVT VT =
N->getValueType(0);
1146 uint64_t Op1 =
N->getConstantOperandVal(1);
1148 if (Op1 == Intrinsic::loongarch_movfcsr2gr) {
1151 "llvm.loongarch.movfcsr2gr expects basic f target feature");
1153 {DAG.getUNDEF(N->getValueType(0)), N->getOperand(0)},
SDLoc(
N)));
1154 Results.push_back(
N->getOperand(0));
1157 unsigned Imm = cast<ConstantSDNode>(
N->getOperand(2))->getZExtValue();
1158 if (!isUInt<2>(Imm)) {
1160 "' " +
"out of range");
1162 {DAG.getUNDEF(N->getValueType(0)), N->getOperand(0)},
SDLoc(
N)));
1163 Results.push_back(
N->getOperand(0));
1170 Results.push_back(
N->getOperand(0));
1174 std::string
Name =
N->getOperationName(0);
1179#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
1180 case Intrinsic::loongarch_##NAME: { \
1181 Results.push_back(DAG.getNode( \
1182 ISD::TRUNCATE, DL, VT, \
1184 LoongArchISD::NODE, DL, MVT::i64, \
1185 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
1186 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))))); \
1187 Results.push_back(N->getOperand(0)); \
1196#undef CRC_CASE_EXT_BINARYOP
1198#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
1199 case Intrinsic::loongarch_##NAME: { \
1200 Results.push_back( \
1201 DAG.getNode(ISD::TRUNCATE, DL, VT, \
1202 DAG.getNode(LoongArchISD::NODE, DL, MVT::i64, Op2, \
1203 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, \
1204 N->getOperand(3))))); \
1205 Results.push_back(N->getOperand(0)); \
1210#undef CRC_CASE_EXT_UNARYOP
1211#define CSR_CASE(ID) \
1212 case Intrinsic::loongarch_##ID: { \
1213 if (!Subtarget.is64Bit()) { \
1214 DAG.getContext()->emitError(Name + " requires target: loongarch64"); \
1215 Results.push_back(DAG.getUNDEF(VT)); \
1216 Results.push_back(N->getOperand(0)); \
1225 case Intrinsic::loongarch_csrrd_w: {
1226 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
1227 if (!isUInt<14>(Imm)) {
1230 Results.push_back(
N->getOperand(0));
1238 Results.push_back(
N->getOperand(0));
1241 case Intrinsic::loongarch_csrwr_w: {
1242 unsigned Imm = cast<ConstantSDNode>(
N->getOperand(3))->getZExtValue();
1243 if (!isUInt<14>(Imm)) {
1246 Results.push_back(
N->getOperand(0));
1255 Results.push_back(
N->getOperand(0));
1258 case Intrinsic::loongarch_csrxchg_w: {
1259 unsigned Imm = cast<ConstantSDNode>(
N->getOperand(4))->getZExtValue();
1260 if (!isUInt<14>(Imm)) {
1263 Results.push_back(
N->getOperand(0));
1274 Results.push_back(
N->getOperand(0));
1277#define IOCSRRD_CASE(NAME, NODE) \
1278 case Intrinsic::loongarch_##NAME: { \
1279 Results.push_back(DAG.getNode( \
1280 ISD::TRUNCATE, DL, N->getValueType(0), \
1281 DAG.getNode(LoongArchISD::NODE, DL, MVT::i64, Op0, \
1282 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)))); \
1283 Results.push_back(N->getOperand(0)); \
1290 case Intrinsic::loongarch_cpucfg: {
1298 case Intrinsic::loongarch_lddir_d: {
1301 " requires target: loongarch64");
1314 "On LA64, only 64-bit registers can be read.");
1317 "On LA32, only 32-bit registers can be read.");
1319 Results.push_back(
N->getOperand(0));
1331 SDValue FirstOperand =
N->getOperand(0);
1332 SDValue SecondOperand =
N->getOperand(1);
1333 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1334 EVT ValTy =
N->getValueType(0);
1337 unsigned SMIdx, SMLen;
1343 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
1354 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
1380 NewOperand = FirstOperand;
1382 msb = lsb + SMLen - 1;
1400 SDValue FirstOperand =
N->getOperand(0);
1402 EVT ValTy =
N->getValueType(0);
1405 unsigned MaskIdx, MaskLen;
1411 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1416 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
1420 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
1433 EVT ValTy =
N->getValueType(0);
1434 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
1438 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
1440 bool SwapAndRetried =
false;
1445 if (ValBits != 32 && ValBits != 64)
1455 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
1458 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1460 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
1461 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
1463 (MaskIdx0 + MaskLen0 <= ValBits)) {
1477 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
1480 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1482 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
1484 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
1485 (MaskIdx0 + MaskLen0 <= ValBits)) {
1500 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
1502 (MaskIdx0 + MaskLen0 <= 64) &&
1503 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
1510 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
1511 : (MaskIdx0 + MaskLen0 - 1),
1523 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
1525 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1527 (MaskIdx0 + MaskLen0 <= ValBits)) {
1542 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
1544 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
1564 unsigned MaskIdx, MaskLen;
1565 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
1566 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
1568 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1590 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1592 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
1593 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
1606 if (!SwapAndRetried) {
1608 SwapAndRetried =
true;
1612 SwapAndRetried =
false;
1624 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
1638 if (!SwapAndRetried) {
1640 SwapAndRetried =
true;
1665 switch (
N->getOpcode()) {
1698 MF->
insert(It, BreakMBB);
1702 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
1703 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
1721 BreakMBB->addSuccessor(SinkMBB);
1734 switch (
MI.getOpcode()) {
1737 case LoongArch::DIV_W:
1738 case LoongArch::DIV_WU:
1739 case LoongArch::MOD_W:
1740 case LoongArch::MOD_WU:
1741 case LoongArch::DIV_D:
1742 case LoongArch::DIV_DU:
1743 case LoongArch::MOD_D:
1744 case LoongArch::MOD_DU:
1747 case LoongArch::WRFCSR: {
1749 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
1750 .
addReg(
MI.getOperand(1).getReg());
1751 MI.eraseFromParent();
1754 case LoongArch::RDFCSR: {
1757 MI.getOperand(0).getReg())
1758 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
1760 MI.eraseFromParent();
1771#define NODE_NAME_CASE(node) \
1772 case LoongArchISD::node: \
1773 return "LoongArchISD::" #node;
1824#undef NODE_NAME_CASE
1837 LoongArch::R7, LoongArch::R8, LoongArch::R9,
1838 LoongArch::R10, LoongArch::R11};
1842 LoongArch::F3, LoongArch::F4, LoongArch::F5,
1843 LoongArch::F6, LoongArch::F7};
1846 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
1847 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
1853 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
1855 unsigned GRLenInBytes = GRLen / 8;
1888 unsigned ValNo,
MVT ValVT,
1890 CCState &State,
bool IsFixed,
bool IsRet,
1892 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
1893 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
1899 if (IsRet && ValNo > 1)
1903 bool UseGPRForFloat =
true;
1916 UseGPRForFloat = !IsFixed;
1922 UseGPRForFloat =
true;
1924 if (UseGPRForFloat && ValVT ==
MVT::f32) {
1927 }
else if (UseGPRForFloat && GRLen == 64 && ValVT ==
MVT::f64) {
1930 }
else if (UseGPRForFloat && GRLen == 32 && ValVT ==
MVT::f64) {
1941 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
1943 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
1946 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
1955 "PendingLocs and PendingArgFlags out of sync");
1973 PendingLocs.
size() <= 2) {
1974 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
1979 PendingLocs.
clear();
1980 PendingArgFlags.
clear();
1987 unsigned StoreSizeBytes = GRLen / 8;
1990 if (ValVT ==
MVT::f32 && !UseGPRForFloat)
1992 else if (ValVT ==
MVT::f64 && !UseGPRForFloat)
2002 if (!PendingLocs.
empty()) {
2004 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
2005 for (
auto &It : PendingLocs) {
2007 It.convertToReg(Reg);
2012 PendingLocs.clear();
2013 PendingArgFlags.
clear();
2016 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
2017 "Expected an GRLenVT at this stage");
2034void LoongArchTargetLowering::analyzeInputArgs(
2037 LoongArchCCAssignFn Fn)
const {
2039 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
2041 Type *ArgTy =
nullptr;
2043 ArgTy = FType->getReturnType();
2044 else if (Ins[i].isOrigArg())
2045 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
2049 CCInfo,
true, IsRet, ArgTy)) {
2057void LoongArchTargetLowering::analyzeOutputArgs(
2060 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
2061 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
2062 MVT ArgVT = Outs[i].VT;
2063 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
2067 CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
2068 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type "
2163 LoongArch::R23, LoongArch::R24, LoongArch::R25, LoongArch::R26, LoongArch::R27,
2164 LoongArch::R28, LoongArch::R29, LoongArch::R30, LoongArch::R31};
2174 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
2175 LoongArch::F26, LoongArch::F27};
2176 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
2185 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
2186 LoongArch::F30_64, LoongArch::F31_64};
2187 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
2215 "GHC calling convention requires the F and D extensions");
2220 unsigned GRLenInBytes = Subtarget.
getGRLen() / 8;
2222 std::vector<SDValue> OutChains;
2231 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
2233 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2245 unsigned ArgIndex = Ins[i].OrigArgIndex;
2246 unsigned ArgPartOffset = Ins[i].PartOffset;
2247 assert(ArgPartOffset == 0);
2248 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2250 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
2273 int VaArgOffset, VarArgsSaveSize;
2279 VarArgsSaveSize = 0;
2281 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() -
Idx);
2282 VaArgOffset = -VarArgsSaveSize;
2288 LoongArchFI->setVarArgsFrameIndex(FI);
2296 VarArgsSaveSize += GRLenInBytes;
2301 for (
unsigned I =
Idx;
I < ArgRegs.
size();
2302 ++
I, VaArgOffset += GRLenInBytes) {
2310 cast<StoreSDNode>(Store.getNode())
2312 ->setValue((
Value *)
nullptr);
2313 OutChains.push_back(Store);
2315 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
2320 if (!OutChains.empty()) {
2321 OutChains.push_back(Chain);
2336 if (
N->getNumValues() != 1)
2338 if (!
N->hasNUsesOfValue(1, 0))
2341 SDNode *Copy = *
N->use_begin();
2347 if (Copy->getGluedNode())
2351 bool HasRet =
false;
2352 for (
SDNode *Node : Copy->uses()) {
2361 Chain = Copy->getOperand(0);
2366bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
2370 auto CalleeCC = CLI.CallConv;
2371 auto &Outs = CLI.Outs;
2373 auto CallerCC = Caller.getCallingConv();
2380 for (
auto &VA : ArgLocs)
2386 auto IsCallerStructRet = Caller.hasStructRetAttr();
2387 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
2388 if (IsCallerStructRet || IsCalleeStructRet)
2392 for (
auto &
Arg : Outs)
2393 if (
Arg.Flags.isByVal())
2398 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2399 if (CalleeCC != CallerCC) {
2400 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2401 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2439 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
2443 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2449 "site marked musttail");
2456 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
2458 if (!
Flags.isByVal())
2463 Align Alignment =
Flags.getNonZeroByValAlign();
2470 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr,
Arg, SizeNode, Alignment,
2484 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(); i != e; ++i) {
2486 SDValue ArgValue = OutVals[i];
2499 unsigned ArgIndex = Outs[i].OrigArgIndex;
2500 unsigned ArgPartOffset = Outs[i].PartOffset;
2501 assert(ArgPartOffset == 0);
2506 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2507 SDValue PartValue = OutVals[i + 1];
2508 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
2518 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2522 for (
const auto &Part : Parts) {
2523 SDValue PartValue = Part.first;
2524 SDValue PartOffset = Part.second;
2531 ArgValue = SpillSlot;
2537 if (
Flags.isByVal())
2538 ArgValue = ByValArgs[j++];
2545 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
2546 "for passing parameters");
2549 if (!StackPtr.getNode())
2562 if (!MemOpChains.
empty())
2568 for (
auto &Reg : RegsToPass) {
2569 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
2598 for (
auto &Reg : RegsToPass)
2604 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2605 assert(Mask &&
"Missing call preserved mask for calling convention");
2632 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
2635 for (
auto &VA : RVLocs) {
2657 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
2661 Outs[i].Flags, CCInfo,
true,
true,
2688 for (
unsigned i = 0, e = RVLocs.
size(); i < e; ++i) {
2710bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
2711 bool ForCodeSize)
const {
2717 return (Imm.isZero() || Imm.isExactlyValue(+1.0));
2728bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
2731 return isa<LoadInst>(
I) || isa<StoreInst>(
I);
2733 if (isa<LoadInst>(
I))
2738 if (isa<StoreInst>(
I)) {
2739 unsigned Size =
I->getOperand(0)->getType()->getIntegerBitWidth();
2756 return Y.getValueType().isScalarInteger() && !isa<ConstantSDNode>(
Y);
2762 unsigned Intrinsic)
const {
2763 switch (Intrinsic) {
2766 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
2767 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
2768 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
2769 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
2772 Info.ptrVal =
I.getArgOperand(0);
2807 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
2809 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
2811 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
2813 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
2815 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
2817 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
2819 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
2821 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
2831 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
2833 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
2835 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
2837 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
2861 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
2869 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2877 unsigned GRLen = Subtarget.
getGRLen();
2904 Builder.CreateSub(
Builder.getIntN(GRLen, GRLen - ValWidth), ShiftAmt);
2905 Result =
Builder.CreateCall(LlwOpScwLoop,
2906 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
2909 Builder.CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
2936 const Constant *PersonalityFn)
const {
2937 return LoongArch::R4;
2941 const Constant *PersonalityFn)
const {
2942 return LoongArch::R5;
2950LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
2968 if (Constraint.
size() == 1) {
2969 switch (Constraint[0]) {
2984 if (Constraint ==
"ZC" || Constraint ==
"ZB")
2991unsigned LoongArchTargetLowering::getInlineAsmMemConstraint(
3000std::pair<unsigned, const TargetRegisterClass *>
3001LoongArchTargetLowering::getRegForInlineAsmConstraint(
3005 if (Constraint.
size() == 1) {
3006 switch (Constraint[0]) {
3011 return std::make_pair(0U, &LoongArch::GPRRegClass);
3014 return std::make_pair(0U, &LoongArch::FPR32RegClass);
3016 return std::make_pair(0U, &LoongArch::FPR64RegClass);
3035 bool IsFP = Constraint[2] ==
'f';
3036 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
3037 std::pair<unsigned, const TargetRegisterClass *>
R;
3039 TRI, join_items(
"", Temp.first, Temp.second), VT);
3042 unsigned RegNo =
R.first;
3043 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
3045 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
3046 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
3056void LoongArchTargetLowering::LowerAsmOperandForConstraint(
3057 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
3060 if (Constraint.length() == 1) {
3061 switch (Constraint[0]) {
3064 if (
auto *
C = dyn_cast<ConstantSDNode>(Op)) {
3066 if (isInt<16>(CVal))
3073 if (
auto *
C = dyn_cast<ConstantSDNode>(Op)) {
3075 if (isInt<12>(CVal))
3082 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
3083 if (
C->getZExtValue() == 0)
3089 if (
auto *
C = dyn_cast<ConstantSDNode>(Op)) {
3091 if (isUInt<12>(CVal))
3103#define GET_REGISTER_MATCHER
3104#include "LoongArchGenAsmMatcher.inc"
3110 std::string NewRegName =
Name.second.str();
3112 if (Reg == LoongArch::NoRegister)
3114 if (Reg == LoongArch::NoRegister)
3118 if (!ReservedRegs.
test(Reg))
3135 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
3136 const APInt &Imm = ConstNode->getAPIntValue();
3137 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
3138 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
3147 Type *Ty,
unsigned AS,
3192 if (
Y.getValueType().isVector())
3195 return !isa<ConstantSDNode>(
Y);
static unsigned MatchRegisterName(StringRef Name)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define NODE_NAME_CASE(node)
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static unsigned MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
const MCPhysReg ArgFPR32s[]
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
const MCPhysReg ArgGPRs[]
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
#define ASRT_LE_GT_CASE(NAME)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
an instruction that atomically reads a memory location, combines it with another value,...
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
unsigned AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
unsigned getLocMemOffset() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
unsigned getValNo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getGRLen() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool hasFeature(unsigned Feature) const
bool isVector() const
Return true if this is a vector value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
constexpr size_t size() const
size - Get the string size.
bool startswith(StringRef Prefix) const
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
Primary interface to the complete machine description for the target machine.
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SHL
Shift and rotation operations.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
ABI getTargetABI(StringRef ABIName)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Align getNonZeroOrigAlign() const
Register getFrameRegister(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Definition: <