28#include "llvm/IR/IntrinsicsLoongArch.h"
37#define DEBUG_TYPE "loongarch-isel-lowering"
42 cl::desc(
"Trap on integer division by zero."),
54 if (Subtarget.hasBasicF())
56 if (Subtarget.hasBasicD())
60 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
62 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
64 if (Subtarget.hasExtLSX())
68 if (Subtarget.hasExtLASX())
69 for (
MVT VT : LASXVTs)
167 if (Subtarget.hasBasicF()) {
191 if (!Subtarget.hasBasicD()) {
202 if (Subtarget.hasBasicD()) {
231 if (Subtarget.hasExtLSX()) {
246 for (
MVT VT : LSXVTs) {
259 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
273 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
275 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
277 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
281 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
300 if (Subtarget.hasExtLASX()) {
301 for (
MVT VT : LASXVTs) {
315 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
329 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
331 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
333 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
337 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
358 if (Subtarget.hasExtLSX())
381 if (Subtarget.hasLAMCAS())
396 switch (
Op.getOpcode()) {
398 return lowerATOMIC_FENCE(
Op, DAG);
400 return lowerEH_DWARF_CFA(
Op, DAG);
402 return lowerGlobalAddress(
Op, DAG);
404 return lowerGlobalTLSAddress(
Op, DAG);
406 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
408 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
410 return lowerINTRINSIC_VOID(
Op, DAG);
412 return lowerBlockAddress(
Op, DAG);
414 return lowerJumpTable(
Op, DAG);
416 return lowerShiftLeftParts(
Op, DAG);
418 return lowerShiftRightParts(
Op, DAG,
true);
420 return lowerShiftRightParts(
Op, DAG,
false);
422 return lowerConstantPool(
Op, DAG);
424 return lowerFP_TO_SINT(
Op, DAG);
426 return lowerBITCAST(
Op, DAG);
428 return lowerUINT_TO_FP(
Op, DAG);
430 return lowerSINT_TO_FP(
Op, DAG);
432 return lowerVASTART(
Op, DAG);
434 return lowerFRAMEADDR(
Op, DAG);
436 return lowerRETURNADDR(
Op, DAG);
438 return lowerWRITE_REGISTER(
Op, DAG);
440 return lowerINSERT_VECTOR_ELT(
Op, DAG);
442 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
444 return lowerBUILD_VECTOR(
Op, DAG);
446 return lowerVECTOR_SHUFFLE(
Op, DAG);
448 return lowerBITREVERSE(
Op, DAG);
455 EVT ResTy =
Op->getValueType(0);
466 for (
unsigned int i = 0; i < NewEltNum; i++) {
469 SDValue RevOp = DAG.
getNode((ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
489 for (
unsigned int i = 0; i < NewEltNum; i++)
490 for (
int j = OrigEltNum / NewEltNum - 1; j >= 0; j--)
491 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
499template <
typename ValType>
502 unsigned CheckStride,
504 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
508 if (*
I != -1 && *
I != ExpectedIndex)
510 ExpectedIndex += ExpectedIndexStride;
514 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
533 for (
const auto &M : Mask) {
540 if (SplatIndex == -1)
543 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
544 if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
545 APInt Imm(64, SplatIndex);
579 int SubMask[4] = {-1, -1, -1, -1};
580 for (
unsigned i = 0; i < 4; ++i) {
581 for (
unsigned j = i; j < Mask.size(); j += 4) {
588 if (Idx < 0 || Idx >= 4)
594 if (SubMask[i] == -1)
598 else if (
Idx != -1 &&
Idx != SubMask[i])
605 for (
int i = 3; i >= 0; --i) {
606 int Idx = SubMask[i];
638 const auto &Begin = Mask.begin();
639 const auto &
End = Mask.end();
640 SDValue OriV1 = V1, OriV2 = V2;
642 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
644 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 2))
649 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
651 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 2))
678 const auto &Begin = Mask.begin();
679 const auto &
End = Mask.end();
680 SDValue OriV1 = V1, OriV2 = V2;
682 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
684 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + 1, 2))
689 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
691 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + 1, 2))
719 const auto &Begin = Mask.begin();
720 const auto &
End = Mask.end();
721 unsigned HalfSize = Mask.size() / 2;
722 SDValue OriV1 = V1, OriV2 = V2;
724 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
726 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + HalfSize, 1))
731 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
733 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + HalfSize,
762 const auto &Begin = Mask.begin();
763 const auto &
End = Mask.end();
764 SDValue OriV1 = V1, OriV2 = V2;
766 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
768 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 1))
773 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
775 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 1))
802 const auto &Begin = Mask.begin();
803 const auto &Mid = Mask.begin() + Mask.size() / 2;
804 const auto &
End = Mask.end();
805 SDValue OriV1 = V1, OriV2 = V2;
807 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
809 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size(), 2))
814 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
816 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size(), 2))
844 const auto &Begin = Mask.begin();
845 const auto &Mid = Mask.begin() + Mask.size() / 2;
846 const auto &
End = Mask.end();
847 SDValue OriV1 = V1, OriV2 = V2;
849 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
851 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size() + 1, 2))
856 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
858 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size() + 1, 2))
900 "Vector type is unsupported for lsx!");
902 "Two operands have different types!");
904 "Unexpected mask size for shuffle!");
905 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
953 for (
const auto &M : Mask) {
960 if (SplatIndex == -1)
963 const auto &Begin = Mask.begin();
964 const auto &
End = Mask.end();
965 unsigned HalfSize = Mask.size() / 2;
967 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
968 if (fitsRegularPattern<int>(Begin, 1,
End - HalfSize, SplatIndex, 0) &&
969 fitsRegularPattern<int>(Begin + HalfSize, 1,
End, SplatIndex + HalfSize,
971 APInt Imm(64, SplatIndex);
985 if (Mask.size() <= 4)
1009 const auto &Begin = Mask.begin();
1010 const auto &
End = Mask.end();
1011 unsigned HalfSize = Mask.size() / 2;
1012 unsigned LeftSize = HalfSize / 2;
1013 SDValue OriV1 = V1, OriV2 = V2;
1015 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, HalfSize - LeftSize,
1017 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize + LeftSize, 1))
1019 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize,
1020 Mask.size() + HalfSize - LeftSize, 1) &&
1021 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1022 Mask.size() + HalfSize + LeftSize, 1))
1027 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, HalfSize - LeftSize,
1029 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize + LeftSize,
1032 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize,
1033 Mask.size() + HalfSize - LeftSize, 1) &&
1034 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1035 Mask.size() + HalfSize + LeftSize, 1))
1048 const auto &Begin = Mask.begin();
1049 const auto &
End = Mask.end();
1050 unsigned HalfSize = Mask.size() / 2;
1051 SDValue OriV1 = V1, OriV2 = V2;
1053 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, 0, 1) &&
1054 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize, 1))
1056 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, Mask.size(), 1) &&
1057 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1058 Mask.size() + HalfSize, 1))
1063 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, 0, 1) &&
1064 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize, 1))
1066 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, Mask.size(),
1068 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1069 Mask.size() + HalfSize, 1))
1082 const auto &Begin = Mask.begin();
1083 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1084 const auto &Mid = Mask.begin() + Mask.size() / 2;
1085 const auto &RightMid = Mask.end() - Mask.size() / 4;
1086 const auto &
End = Mask.end();
1087 unsigned HalfSize = Mask.size() / 2;
1088 SDValue OriV1 = V1, OriV2 = V2;
1090 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 0, 2) &&
1091 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize, 2))
1093 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size(), 2) &&
1094 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize, 2))
1099 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 0, 2) &&
1100 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize, 2))
1102 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size(), 2) &&
1103 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize, 2))
1117 const auto &Begin = Mask.begin();
1118 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1119 const auto &Mid = Mask.begin() + Mask.size() / 2;
1120 const auto &RightMid = Mask.end() - Mask.size() / 4;
1121 const auto &
End = Mask.end();
1122 unsigned HalfSize = Mask.size() / 2;
1123 SDValue OriV1 = V1, OriV2 = V2;
1125 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 1, 2) &&
1126 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize + 1, 2))
1128 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size() + 1, 2) &&
1129 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize + 1,
1135 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 1, 2) &&
1136 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize + 1, 2))
1138 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size() + 1, 2) &&
1139 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize + 1,
1153 int MaskSize = Mask.size();
1154 int HalfSize = Mask.size() / 2;
1155 const auto &Begin = Mask.begin();
1156 const auto &Mid = Mask.begin() + HalfSize;
1157 const auto &
End = Mask.end();
1169 for (
auto it = Begin; it < Mid; it++) {
1172 else if ((*it >= 0 && *it < HalfSize) ||
1173 (*it >= MaskSize && *it <= MaskSize + HalfSize)) {
1174 int M = *it < HalfSize ? *it : *it - HalfSize;
1179 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
1181 for (
auto it = Mid; it <
End; it++) {
1184 else if ((*it >= HalfSize && *it < MaskSize) ||
1185 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
1186 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
1191 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
1222 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
1224 int MaskSize = Mask.size();
1225 int HalfSize = Mask.size() / 2;
1227 HalfMaskType preMask =
None, postMask =
None;
1229 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1230 return M < 0 || (M >= 0 && M < HalfSize) ||
1231 (M >= MaskSize && M < MaskSize + HalfSize);
1233 preMask = HighLaneTy;
1234 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1235 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1236 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1238 preMask = LowLaneTy;
1240 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1241 return M < 0 || (M >= 0 && M < HalfSize) ||
1242 (M >= MaskSize && M < MaskSize + HalfSize);
1244 postMask = HighLaneTy;
1245 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1246 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1247 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1249 postMask = LowLaneTy;
1257 if (preMask == HighLaneTy && postMask == LowLaneTy) {
1260 if (preMask == LowLaneTy && postMask == HighLaneTy) {
1266 if (!V2.isUndef()) {
1273 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1274 *it = *it < 0 ? *it : *it - HalfSize;
1276 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1277 *it = *it < 0 ? *it : *it + HalfSize;
1279 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
1285 if (!V2.isUndef()) {
1292 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1293 *it = *it < 0 ? *it : *it - HalfSize;
1295 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
1301 if (!V2.isUndef()) {
1308 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1309 *it = *it < 0 ? *it : *it + HalfSize;
1325 "Vector type is unsupported for lasx!");
1327 "Two operands have different types!");
1329 "Unexpected mask size for shuffle!");
1330 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1331 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
1376 MVT VT =
Op.getSimpleValueType();
1380 bool V1IsUndef = V1.
isUndef();
1381 bool V2IsUndef =
V2.isUndef();
1382 if (V1IsUndef && V2IsUndef)
1395 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
1397 for (
int &M : NewMask)
1398 if (M >= NumElements)
1404 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
1405 (void)MaskUpperLimit;
1407 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
1408 "Out of bounds shuffle index");
1423 if (isa<ConstantSDNode>(
Op))
1425 if (isa<ConstantFPSDNode>(
Op))
1440 EVT ResTy =
Op->getValueType(0);
1442 APInt SplatValue, SplatUndef;
1443 unsigned SplatBitSize;
1448 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
1449 (!Subtarget.hasExtLASX() || !Is256Vec))
1452 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
1454 SplatBitSize <= 64) {
1456 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1462 switch (SplatBitSize) {
1466 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
1469 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
1472 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
1475 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
1483 if (ViaVecTy != ResTy)
1496 EVT ResTy =
Node->getValueType(0);
1502 for (
unsigned i = 0; i < NumElts; ++i) {
1504 Node->getOperand(i),
1514LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
1516 EVT VecTy =
Op->getOperand(0)->getValueType(0);
1521 if (isa<ConstantSDNode>(
Idx) &&
1522 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
1523 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
1530LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
1532 if (isa<ConstantSDNode>(
Op->getOperand(2)))
1556 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
1558 "On LA64, only 64-bit registers can be written.");
1559 return Op.getOperand(0);
1562 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
1564 "On LA32, only 32-bit registers can be written.");
1565 return Op.getOperand(0);
1573 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
1575 "be a constant integer");
1582 EVT VT =
Op.getValueType();
1585 unsigned Depth =
Op.getConstantOperandVal(0);
1586 int GRLenInBytes = Subtarget.
getGRLen() / 8;
1589 int Offset = -(GRLenInBytes * 2);
1604 if (
Op.getConstantOperandVal(0) != 0) {
1606 "return address can only be determined for the current frame");
1640 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1648 !Subtarget.hasBasicD() &&
"unexpected target features");
1653 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
1654 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
1664 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
1668 EVT RetVT =
Op.getValueType();
1670 MakeLibCallOptions CallOptions;
1671 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1674 std::tie(Result, Chain) =
1682 !Subtarget.hasBasicD() &&
"unexpected target features");
1689 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
1693 EVT RetVT =
Op.getValueType();
1695 MakeLibCallOptions CallOptions;
1696 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1699 std::tie(Result, Chain) =
1710 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
1711 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
1727 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
1728 !Subtarget.hasBasicD()) {
1752 N->getOffset(), Flags);
1760template <
class NodeTy>
1763 bool IsLocal)
const {
1774 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
1826 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
1832 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
1838 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
1845 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1849 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
1850 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
1859 unsigned Opc,
bool UseGOT,
1910 Args.push_back(Entry);
1942LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
1949 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
1952 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1966 return getDynamicTLSAddr(
N, DAG,
1967 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
1968 : LoongArch::PseudoLA_TLS_GD,
1975 return getDynamicTLSAddr(
N, DAG,
1976 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
1977 : LoongArch::PseudoLA_TLS_LD,
1982 return getStaticTLSAddr(
N, DAG,
1983 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1984 : LoongArch::PseudoLA_TLS_IE,
1991 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1995 return getTLSDescAddr(
N, DAG,
1996 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
1997 : LoongArch::PseudoLA_TLS_DESC,
2001template <
unsigned N>
2004 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
2006 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2007 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2009 ": argument out of range.");
2016LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
2019 switch (
Op.getConstantOperandVal(0)) {
2022 case Intrinsic::thread_pointer: {
2026 case Intrinsic::loongarch_lsx_vpickve2gr_d:
2027 case Intrinsic::loongarch_lsx_vpickve2gr_du:
2028 case Intrinsic::loongarch_lsx_vreplvei_d:
2029 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
2030 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
2031 case Intrinsic::loongarch_lsx_vreplvei_w:
2032 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
2033 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
2034 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
2035 case Intrinsic::loongarch_lasx_xvpickve_d:
2036 case Intrinsic::loongarch_lasx_xvpickve_d_f:
2037 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
2038 case Intrinsic::loongarch_lasx_xvinsve0_d:
2039 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
2040 case Intrinsic::loongarch_lsx_vsat_b:
2041 case Intrinsic::loongarch_lsx_vsat_bu:
2042 case Intrinsic::loongarch_lsx_vrotri_b:
2043 case Intrinsic::loongarch_lsx_vsllwil_h_b:
2044 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
2045 case Intrinsic::loongarch_lsx_vsrlri_b:
2046 case Intrinsic::loongarch_lsx_vsrari_b:
2047 case Intrinsic::loongarch_lsx_vreplvei_h:
2048 case Intrinsic::loongarch_lasx_xvsat_b:
2049 case Intrinsic::loongarch_lasx_xvsat_bu:
2050 case Intrinsic::loongarch_lasx_xvrotri_b:
2051 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
2052 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
2053 case Intrinsic::loongarch_lasx_xvsrlri_b:
2054 case Intrinsic::loongarch_lasx_xvsrari_b:
2055 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
2056 case Intrinsic::loongarch_lasx_xvpickve_w:
2057 case Intrinsic::loongarch_lasx_xvpickve_w_f:
2058 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
2059 case Intrinsic::loongarch_lasx_xvinsve0_w:
2060 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
2061 case Intrinsic::loongarch_lsx_vsat_h:
2062 case Intrinsic::loongarch_lsx_vsat_hu:
2063 case Intrinsic::loongarch_lsx_vrotri_h:
2064 case Intrinsic::loongarch_lsx_vsllwil_w_h:
2065 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
2066 case Intrinsic::loongarch_lsx_vsrlri_h:
2067 case Intrinsic::loongarch_lsx_vsrari_h:
2068 case Intrinsic::loongarch_lsx_vreplvei_b:
2069 case Intrinsic::loongarch_lasx_xvsat_h:
2070 case Intrinsic::loongarch_lasx_xvsat_hu:
2071 case Intrinsic::loongarch_lasx_xvrotri_h:
2072 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
2073 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
2074 case Intrinsic::loongarch_lasx_xvsrlri_h:
2075 case Intrinsic::loongarch_lasx_xvsrari_h:
2076 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
2077 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
2078 case Intrinsic::loongarch_lsx_vsrlni_b_h:
2079 case Intrinsic::loongarch_lsx_vsrani_b_h:
2080 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
2081 case Intrinsic::loongarch_lsx_vsrarni_b_h:
2082 case Intrinsic::loongarch_lsx_vssrlni_b_h:
2083 case Intrinsic::loongarch_lsx_vssrani_b_h:
2084 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
2085 case Intrinsic::loongarch_lsx_vssrani_bu_h:
2086 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
2087 case Intrinsic::loongarch_lsx_vssrarni_b_h:
2088 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
2089 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
2090 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
2091 case Intrinsic::loongarch_lasx_xvsrani_b_h:
2092 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
2093 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
2094 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
2095 case Intrinsic::loongarch_lasx_xvssrani_b_h:
2096 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
2097 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
2098 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
2099 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
2100 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
2101 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
2102 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
2103 case Intrinsic::loongarch_lsx_vsat_w:
2104 case Intrinsic::loongarch_lsx_vsat_wu:
2105 case Intrinsic::loongarch_lsx_vrotri_w:
2106 case Intrinsic::loongarch_lsx_vsllwil_d_w:
2107 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
2108 case Intrinsic::loongarch_lsx_vsrlri_w:
2109 case Intrinsic::loongarch_lsx_vsrari_w:
2110 case Intrinsic::loongarch_lsx_vslei_bu:
2111 case Intrinsic::loongarch_lsx_vslei_hu:
2112 case Intrinsic::loongarch_lsx_vslei_wu:
2113 case Intrinsic::loongarch_lsx_vslei_du:
2114 case Intrinsic::loongarch_lsx_vslti_bu:
2115 case Intrinsic::loongarch_lsx_vslti_hu:
2116 case Intrinsic::loongarch_lsx_vslti_wu:
2117 case Intrinsic::loongarch_lsx_vslti_du:
2118 case Intrinsic::loongarch_lsx_vbsll_v:
2119 case Intrinsic::loongarch_lsx_vbsrl_v:
2120 case Intrinsic::loongarch_lasx_xvsat_w:
2121 case Intrinsic::loongarch_lasx_xvsat_wu:
2122 case Intrinsic::loongarch_lasx_xvrotri_w:
2123 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
2124 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
2125 case Intrinsic::loongarch_lasx_xvsrlri_w:
2126 case Intrinsic::loongarch_lasx_xvsrari_w:
2127 case Intrinsic::loongarch_lasx_xvslei_bu:
2128 case Intrinsic::loongarch_lasx_xvslei_hu:
2129 case Intrinsic::loongarch_lasx_xvslei_wu:
2130 case Intrinsic::loongarch_lasx_xvslei_du:
2131 case Intrinsic::loongarch_lasx_xvslti_bu:
2132 case Intrinsic::loongarch_lasx_xvslti_hu:
2133 case Intrinsic::loongarch_lasx_xvslti_wu:
2134 case Intrinsic::loongarch_lasx_xvslti_du:
2135 case Intrinsic::loongarch_lasx_xvbsll_v:
2136 case Intrinsic::loongarch_lasx_xvbsrl_v:
2137 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
2138 case Intrinsic::loongarch_lsx_vseqi_b:
2139 case Intrinsic::loongarch_lsx_vseqi_h:
2140 case Intrinsic::loongarch_lsx_vseqi_w:
2141 case Intrinsic::loongarch_lsx_vseqi_d:
2142 case Intrinsic::loongarch_lsx_vslei_b:
2143 case Intrinsic::loongarch_lsx_vslei_h:
2144 case Intrinsic::loongarch_lsx_vslei_w:
2145 case Intrinsic::loongarch_lsx_vslei_d:
2146 case Intrinsic::loongarch_lsx_vslti_b:
2147 case Intrinsic::loongarch_lsx_vslti_h:
2148 case Intrinsic::loongarch_lsx_vslti_w:
2149 case Intrinsic::loongarch_lsx_vslti_d:
2150 case Intrinsic::loongarch_lasx_xvseqi_b:
2151 case Intrinsic::loongarch_lasx_xvseqi_h:
2152 case Intrinsic::loongarch_lasx_xvseqi_w:
2153 case Intrinsic::loongarch_lasx_xvseqi_d:
2154 case Intrinsic::loongarch_lasx_xvslei_b:
2155 case Intrinsic::loongarch_lasx_xvslei_h:
2156 case Intrinsic::loongarch_lasx_xvslei_w:
2157 case Intrinsic::loongarch_lasx_xvslei_d:
2158 case Intrinsic::loongarch_lasx_xvslti_b:
2159 case Intrinsic::loongarch_lasx_xvslti_h:
2160 case Intrinsic::loongarch_lasx_xvslti_w:
2161 case Intrinsic::loongarch_lasx_xvslti_d:
2162 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
2163 case Intrinsic::loongarch_lsx_vsrlni_h_w:
2164 case Intrinsic::loongarch_lsx_vsrani_h_w:
2165 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
2166 case Intrinsic::loongarch_lsx_vsrarni_h_w:
2167 case Intrinsic::loongarch_lsx_vssrlni_h_w:
2168 case Intrinsic::loongarch_lsx_vssrani_h_w:
2169 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
2170 case Intrinsic::loongarch_lsx_vssrani_hu_w:
2171 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
2172 case Intrinsic::loongarch_lsx_vssrarni_h_w:
2173 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
2174 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
2175 case Intrinsic::loongarch_lsx_vfrstpi_b:
2176 case Intrinsic::loongarch_lsx_vfrstpi_h:
2177 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
2178 case Intrinsic::loongarch_lasx_xvsrani_h_w:
2179 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
2180 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
2181 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
2182 case Intrinsic::loongarch_lasx_xvssrani_h_w:
2183 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
2184 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
2185 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
2186 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
2187 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
2188 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
2189 case Intrinsic::loongarch_lasx_xvfrstpi_b:
2190 case Intrinsic::loongarch_lasx_xvfrstpi_h:
2191 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
2192 case Intrinsic::loongarch_lsx_vsat_d:
2193 case Intrinsic::loongarch_lsx_vsat_du:
2194 case Intrinsic::loongarch_lsx_vrotri_d:
2195 case Intrinsic::loongarch_lsx_vsrlri_d:
2196 case Intrinsic::loongarch_lsx_vsrari_d:
2197 case Intrinsic::loongarch_lasx_xvsat_d:
2198 case Intrinsic::loongarch_lasx_xvsat_du:
2199 case Intrinsic::loongarch_lasx_xvrotri_d:
2200 case Intrinsic::loongarch_lasx_xvsrlri_d:
2201 case Intrinsic::loongarch_lasx_xvsrari_d:
2202 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
2203 case Intrinsic::loongarch_lsx_vsrlni_w_d:
2204 case Intrinsic::loongarch_lsx_vsrani_w_d:
2205 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
2206 case Intrinsic::loongarch_lsx_vsrarni_w_d:
2207 case Intrinsic::loongarch_lsx_vssrlni_w_d:
2208 case Intrinsic::loongarch_lsx_vssrani_w_d:
2209 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
2210 case Intrinsic::loongarch_lsx_vssrani_wu_d:
2211 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
2212 case Intrinsic::loongarch_lsx_vssrarni_w_d:
2213 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
2214 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
2215 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
2216 case Intrinsic::loongarch_lasx_xvsrani_w_d:
2217 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
2218 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
2219 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
2220 case Intrinsic::loongarch_lasx_xvssrani_w_d:
2221 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
2222 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
2223 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
2224 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
2225 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
2226 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
2227 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
2228 case Intrinsic::loongarch_lsx_vsrlni_d_q:
2229 case Intrinsic::loongarch_lsx_vsrani_d_q:
2230 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
2231 case Intrinsic::loongarch_lsx_vsrarni_d_q:
2232 case Intrinsic::loongarch_lsx_vssrlni_d_q:
2233 case Intrinsic::loongarch_lsx_vssrani_d_q:
2234 case Intrinsic::loongarch_lsx_vssrlni_du_q:
2235 case Intrinsic::loongarch_lsx_vssrani_du_q:
2236 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
2237 case Intrinsic::loongarch_lsx_vssrarni_d_q:
2238 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
2239 case Intrinsic::loongarch_lsx_vssrarni_du_q:
2240 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
2241 case Intrinsic::loongarch_lasx_xvsrani_d_q:
2242 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
2243 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
2244 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
2245 case Intrinsic::loongarch_lasx_xvssrani_d_q:
2246 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
2247 case Intrinsic::loongarch_lasx_xvssrani_du_q:
2248 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
2249 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
2250 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
2251 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
2252 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
2253 case Intrinsic::loongarch_lsx_vnori_b:
2254 case Intrinsic::loongarch_lsx_vshuf4i_b:
2255 case Intrinsic::loongarch_lsx_vshuf4i_h:
2256 case Intrinsic::loongarch_lsx_vshuf4i_w:
2257 case Intrinsic::loongarch_lasx_xvnori_b:
2258 case Intrinsic::loongarch_lasx_xvshuf4i_b:
2259 case Intrinsic::loongarch_lasx_xvshuf4i_h:
2260 case Intrinsic::loongarch_lasx_xvshuf4i_w:
2261 case Intrinsic::loongarch_lasx_xvpermi_d:
2262 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
2263 case Intrinsic::loongarch_lsx_vshuf4i_d:
2264 case Intrinsic::loongarch_lsx_vpermi_w:
2265 case Intrinsic::loongarch_lsx_vbitseli_b:
2266 case Intrinsic::loongarch_lsx_vextrins_b:
2267 case Intrinsic::loongarch_lsx_vextrins_h:
2268 case Intrinsic::loongarch_lsx_vextrins_w:
2269 case Intrinsic::loongarch_lsx_vextrins_d:
2270 case Intrinsic::loongarch_lasx_xvshuf4i_d:
2271 case Intrinsic::loongarch_lasx_xvpermi_w:
2272 case Intrinsic::loongarch_lasx_xvpermi_q:
2273 case Intrinsic::loongarch_lasx_xvbitseli_b:
2274 case Intrinsic::loongarch_lasx_xvextrins_b:
2275 case Intrinsic::loongarch_lasx_xvextrins_h:
2276 case Intrinsic::loongarch_lasx_xvextrins_w:
2277 case Intrinsic::loongarch_lasx_xvextrins_d:
2278 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
2279 case Intrinsic::loongarch_lsx_vrepli_b:
2280 case Intrinsic::loongarch_lsx_vrepli_h:
2281 case Intrinsic::loongarch_lsx_vrepli_w:
2282 case Intrinsic::loongarch_lsx_vrepli_d:
2283 case Intrinsic::loongarch_lasx_xvrepli_b:
2284 case Intrinsic::loongarch_lasx_xvrepli_h:
2285 case Intrinsic::loongarch_lasx_xvrepli_w:
2286 case Intrinsic::loongarch_lasx_xvrepli_d:
2287 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
2288 case Intrinsic::loongarch_lsx_vldi:
2289 case Intrinsic::loongarch_lasx_xvldi:
2290 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
2305LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
2309 EVT VT =
Op.getValueType();
2311 const StringRef ErrorMsgOOR =
"argument out of range";
2312 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2313 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2315 switch (
Op.getConstantOperandVal(1)) {
2318 case Intrinsic::loongarch_crc_w_b_w:
2319 case Intrinsic::loongarch_crc_w_h_w:
2320 case Intrinsic::loongarch_crc_w_w_w:
2321 case Intrinsic::loongarch_crc_w_d_w:
2322 case Intrinsic::loongarch_crcc_w_b_w:
2323 case Intrinsic::loongarch_crcc_w_h_w:
2324 case Intrinsic::loongarch_crcc_w_w_w:
2325 case Intrinsic::loongarch_crcc_w_d_w:
2327 case Intrinsic::loongarch_csrrd_w:
2328 case Intrinsic::loongarch_csrrd_d: {
2329 unsigned Imm =
Op.getConstantOperandVal(2);
2330 return !isUInt<14>(Imm)
2335 case Intrinsic::loongarch_csrwr_w:
2336 case Intrinsic::loongarch_csrwr_d: {
2337 unsigned Imm =
Op.getConstantOperandVal(3);
2338 return !isUInt<14>(Imm)
2341 {Chain,
Op.getOperand(2),
2344 case Intrinsic::loongarch_csrxchg_w:
2345 case Intrinsic::loongarch_csrxchg_d: {
2346 unsigned Imm =
Op.getConstantOperandVal(4);
2347 return !isUInt<14>(Imm)
2350 {Chain,
Op.getOperand(2),
Op.getOperand(3),
2353 case Intrinsic::loongarch_iocsrrd_d: {
2358#define IOCSRRD_CASE(NAME, NODE) \
2359 case Intrinsic::loongarch_##NAME: { \
2360 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
2361 {Chain, Op.getOperand(2)}); \
2367 case Intrinsic::loongarch_cpucfg: {
2369 {Chain,
Op.getOperand(2)});
2371 case Intrinsic::loongarch_lddir_d: {
2372 unsigned Imm =
Op.getConstantOperandVal(3);
2373 return !isUInt<8>(Imm)
2377 case Intrinsic::loongarch_movfcsr2gr: {
2378 if (!Subtarget.hasBasicF())
2380 unsigned Imm =
Op.getConstantOperandVal(2);
2381 return !isUInt<2>(Imm)
2386 case Intrinsic::loongarch_lsx_vld:
2387 case Intrinsic::loongarch_lsx_vldrepl_b:
2388 case Intrinsic::loongarch_lasx_xvld:
2389 case Intrinsic::loongarch_lasx_xvldrepl_b:
2390 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2393 case Intrinsic::loongarch_lsx_vldrepl_h:
2394 case Intrinsic::loongarch_lasx_xvldrepl_h:
2395 return !isShiftedInt<11, 1>(
2396 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2398 Op,
"argument out of range or not a multiple of 2", DAG)
2400 case Intrinsic::loongarch_lsx_vldrepl_w:
2401 case Intrinsic::loongarch_lasx_xvldrepl_w:
2402 return !isShiftedInt<10, 2>(
2403 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2405 Op,
"argument out of range or not a multiple of 4", DAG)
2407 case Intrinsic::loongarch_lsx_vldrepl_d:
2408 case Intrinsic::loongarch_lasx_xvldrepl_d:
2409 return !isShiftedInt<9, 3>(
2410 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2412 Op,
"argument out of range or not a multiple of 8", DAG)
2423 return Op.getOperand(0);
2431 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
2433 const StringRef ErrorMsgOOR =
"argument out of range";
2434 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2435 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
2436 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2438 switch (IntrinsicEnum) {
2442 case Intrinsic::loongarch_cacop_d:
2443 case Intrinsic::loongarch_cacop_w: {
2444 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
2446 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
2450 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
2451 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
2455 case Intrinsic::loongarch_dbar: {
2457 return !isUInt<15>(Imm)
2462 case Intrinsic::loongarch_ibar: {
2464 return !isUInt<15>(Imm)
2469 case Intrinsic::loongarch_break: {
2471 return !isUInt<15>(Imm)
2476 case Intrinsic::loongarch_movgr2fcsr: {
2477 if (!Subtarget.hasBasicF())
2480 return !isUInt<2>(Imm)
2487 case Intrinsic::loongarch_syscall: {
2489 return !isUInt<15>(Imm)
2494#define IOCSRWR_CASE(NAME, NODE) \
2495 case Intrinsic::loongarch_##NAME: { \
2496 SDValue Op3 = Op.getOperand(3); \
2497 return Subtarget.is64Bit() \
2498 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
2499 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2500 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
2501 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
2508 case Intrinsic::loongarch_iocsrwr_d: {
2516#define ASRT_LE_GT_CASE(NAME) \
2517 case Intrinsic::loongarch_##NAME: { \
2518 return !Subtarget.is64Bit() \
2519 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
2524#undef ASRT_LE_GT_CASE
2525 case Intrinsic::loongarch_ldpte_d: {
2526 unsigned Imm =
Op.getConstantOperandVal(3);
2532 case Intrinsic::loongarch_lsx_vst:
2533 case Intrinsic::loongarch_lasx_xvst:
2534 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
2537 case Intrinsic::loongarch_lasx_xvstelm_b:
2538 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2539 !isUInt<5>(
Op.getConstantOperandVal(5)))
2542 case Intrinsic::loongarch_lsx_vstelm_b:
2543 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2544 !isUInt<4>(
Op.getConstantOperandVal(5)))
2547 case Intrinsic::loongarch_lasx_xvstelm_h:
2548 return (!isShiftedInt<8, 1>(
2549 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2550 !isUInt<4>(
Op.getConstantOperandVal(5)))
2552 Op,
"argument out of range or not a multiple of 2", DAG)
2554 case Intrinsic::loongarch_lsx_vstelm_h:
2555 return (!isShiftedInt<8, 1>(
2556 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2557 !isUInt<3>(
Op.getConstantOperandVal(5)))
2559 Op,
"argument out of range or not a multiple of 2", DAG)
2561 case Intrinsic::loongarch_lasx_xvstelm_w:
2562 return (!isShiftedInt<8, 2>(
2563 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2564 !isUInt<3>(
Op.getConstantOperandVal(5)))
2566 Op,
"argument out of range or not a multiple of 4", DAG)
2568 case Intrinsic::loongarch_lsx_vstelm_w:
2569 return (!isShiftedInt<8, 2>(
2570 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2571 !isUInt<2>(
Op.getConstantOperandVal(5)))
2573 Op,
"argument out of range or not a multiple of 4", DAG)
2575 case Intrinsic::loongarch_lasx_xvstelm_d:
2576 return (!isShiftedInt<8, 3>(
2577 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2578 !isUInt<2>(
Op.getConstantOperandVal(5)))
2580 Op,
"argument out of range or not a multiple of 8", DAG)
2582 case Intrinsic::loongarch_lsx_vstelm_d:
2583 return (!isShiftedInt<8, 3>(
2584 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2585 !isUInt<1>(
Op.getConstantOperandVal(5)))
2587 Op,
"argument out of range or not a multiple of 8", DAG)
2598 EVT VT =
Lo.getValueType();
2639 EVT VT =
Lo.getValueType();
2731 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2732 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
2736 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2742 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
2769 StringRef ErrorMsg,
bool WithChain =
true) {
2774 Results.push_back(
N->getOperand(0));
2777template <
unsigned N>
2782 const StringRef ErrorMsgOOR =
"argument out of range";
2783 unsigned Imm =
Node->getConstantOperandVal(2);
2784 if (!isUInt<N>(Imm)) {
2817 switch (
N->getConstantOperandVal(0)) {
2820 case Intrinsic::loongarch_lsx_vpickve2gr_b:
2821 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2824 case Intrinsic::loongarch_lsx_vpickve2gr_h:
2825 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
2826 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2829 case Intrinsic::loongarch_lsx_vpickve2gr_w:
2830 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2833 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
2834 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2837 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
2838 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
2839 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2842 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
2843 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2846 case Intrinsic::loongarch_lsx_bz_b:
2847 case Intrinsic::loongarch_lsx_bz_h:
2848 case Intrinsic::loongarch_lsx_bz_w:
2849 case Intrinsic::loongarch_lsx_bz_d:
2850 case Intrinsic::loongarch_lasx_xbz_b:
2851 case Intrinsic::loongarch_lasx_xbz_h:
2852 case Intrinsic::loongarch_lasx_xbz_w:
2853 case Intrinsic::loongarch_lasx_xbz_d:
2857 case Intrinsic::loongarch_lsx_bz_v:
2858 case Intrinsic::loongarch_lasx_xbz_v:
2862 case Intrinsic::loongarch_lsx_bnz_b:
2863 case Intrinsic::loongarch_lsx_bnz_h:
2864 case Intrinsic::loongarch_lsx_bnz_w:
2865 case Intrinsic::loongarch_lsx_bnz_d:
2866 case Intrinsic::loongarch_lasx_xbnz_b:
2867 case Intrinsic::loongarch_lasx_xbnz_h:
2868 case Intrinsic::loongarch_lasx_xbnz_w:
2869 case Intrinsic::loongarch_lasx_xbnz_d:
2873 case Intrinsic::loongarch_lsx_bnz_v:
2874 case Intrinsic::loongarch_lasx_xbnz_v:
2884 EVT VT =
N->getValueType(0);
2885 switch (
N->getOpcode()) {
2891 "Unexpected custom legalisation");
2899 "Unexpected custom legalisation");
2901 Subtarget.hasDiv32() && VT == MVT::i32
2909 "Unexpected custom legalisation");
2918 "Unexpected custom legalisation");
2923 "Unexpected custom legalisation");
2928 if (Src.getValueType() == MVT::f16)
2939 EVT OpVT = Src.getValueType();
2943 std::tie(Result, Chain) =
2950 EVT SrcVT = Src.getValueType();
2951 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
2952 Subtarget.hasBasicF()) {
2961 "Unexpected custom legalisation");
2964 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
2970 assert((VT == MVT::i16 || VT == MVT::i32) &&
2971 "Unexpected custom legalization");
2992 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
2993 "Unexpected custom legalization");
3013 "Unexpected custom legalisation");
3021 const StringRef ErrorMsgOOR =
"argument out of range";
3022 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3023 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3025 switch (
N->getConstantOperandVal(1)) {
3028 case Intrinsic::loongarch_movfcsr2gr: {
3029 if (!Subtarget.hasBasicF()) {
3034 if (!isUInt<2>(Imm)) {
3046#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
3047 case Intrinsic::loongarch_##NAME: { \
3048 SDValue NODE = DAG.getNode( \
3049 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3050 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
3051 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3052 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3053 Results.push_back(NODE.getValue(1)); \
3062#undef CRC_CASE_EXT_BINARYOP
3064#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
3065 case Intrinsic::loongarch_##NAME: { \
3066 SDValue NODE = DAG.getNode( \
3067 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3069 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3070 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3071 Results.push_back(NODE.getValue(1)); \
3076#undef CRC_CASE_EXT_UNARYOP
3077#define CSR_CASE(ID) \
3078 case Intrinsic::loongarch_##ID: { \
3079 if (!Subtarget.is64Bit()) \
3080 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
3088 case Intrinsic::loongarch_csrrd_w: {
3090 if (!isUInt<14>(Imm)) {
3102 case Intrinsic::loongarch_csrwr_w: {
3103 unsigned Imm =
N->getConstantOperandVal(3);
3104 if (!isUInt<14>(Imm)) {
3117 case Intrinsic::loongarch_csrxchg_w: {
3118 unsigned Imm =
N->getConstantOperandVal(4);
3119 if (!isUInt<14>(Imm)) {
3133#define IOCSRRD_CASE(NAME, NODE) \
3134 case Intrinsic::loongarch_##NAME: { \
3135 SDValue IOCSRRDResults = \
3136 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3137 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
3138 Results.push_back( \
3139 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
3140 Results.push_back(IOCSRRDResults.getValue(1)); \
3147 case Intrinsic::loongarch_cpucfg: {
3156 case Intrinsic::loongarch_lddir_d: {
3169 "On LA64, only 64-bit registers can be read.");
3172 "On LA32, only 32-bit registers can be read.");
3174 Results.push_back(
N->getOperand(0));
3185 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
3202 SDValue FirstOperand =
N->getOperand(0);
3203 SDValue SecondOperand =
N->getOperand(1);
3204 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
3205 EVT ValTy =
N->getValueType(0);
3208 unsigned SMIdx, SMLen;
3214 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
3225 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
3266 NewOperand = FirstOperand;
3269 msb = lsb + SMLen - 1;
3273 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
3294 SDValue FirstOperand =
N->getOperand(0);
3296 EVT ValTy =
N->getValueType(0);
3299 unsigned MaskIdx, MaskLen;
3305 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
3310 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
3314 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
3327 EVT ValTy =
N->getValueType(0);
3328 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
3332 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
3334 bool SwapAndRetried =
false;
3339 if (ValBits != 32 && ValBits != 64)
3349 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3352 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3354 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
3355 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3357 (MaskIdx0 + MaskLen0 <= ValBits)) {
3371 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3374 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3376 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3378 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
3379 (MaskIdx0 + MaskLen0 <= ValBits)) {
3394 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3396 (MaskIdx0 + MaskLen0 <= 64) &&
3397 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
3404 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3405 : (MaskIdx0 + MaskLen0 - 1),
3417 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3419 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3421 (MaskIdx0 + MaskLen0 <= ValBits)) {
3436 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3438 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
3444 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3445 : (MaskIdx0 + MaskLen0 - 1),
3460 unsigned MaskIdx, MaskLen;
3461 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
3462 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3464 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3486 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3488 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
3489 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3502 if (!SwapAndRetried) {
3504 SwapAndRetried =
true;
3508 SwapAndRetried =
false;
3520 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3534 if (!SwapAndRetried) {
3536 SwapAndRetried =
true;
3546 switch (V.getNode()->getOpcode()) {
3548 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
3557 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3558 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3565 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3566 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3643 SDNode *AndNode =
N->getOperand(0).getNode();
3651 SDValue CmpInputValue =
N->getOperand(1);
3659 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
3662 AndInputValue1 = AndInputValue1.
getOperand(0);
3666 if (AndInputValue2 != CmpInputValue)
3699 TruncInputValue1, TruncInputValue2);
3721template <
unsigned N>
3725 bool IsSigned =
false) {
3727 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3729 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3730 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3732 ": argument out of range.");
3738template <
unsigned N>
3742 EVT ResTy =
Node->getValueType(0);
3743 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3746 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3747 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3749 ": argument out of range.");
3754 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
3760 EVT ResTy =
Node->getValueType(0);
3768 EVT ResTy =
Node->getValueType(0);
3777template <
unsigned N>
3780 EVT ResTy =
Node->getValueType(0);
3781 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3783 if (!isUInt<N>(CImm->getZExtValue())) {
3785 ": argument out of range.");
3795template <
unsigned N>
3798 EVT ResTy =
Node->getValueType(0);
3799 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3801 if (!isUInt<N>(CImm->getZExtValue())) {
3803 ": argument out of range.");
3812template <
unsigned N>
3815 EVT ResTy =
Node->getValueType(0);
3816 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3818 if (!isUInt<N>(CImm->getZExtValue())) {
3820 ": argument out of range.");
3834 switch (
N->getConstantOperandVal(0)) {
3837 case Intrinsic::loongarch_lsx_vadd_b:
3838 case Intrinsic::loongarch_lsx_vadd_h:
3839 case Intrinsic::loongarch_lsx_vadd_w:
3840 case Intrinsic::loongarch_lsx_vadd_d:
3841 case Intrinsic::loongarch_lasx_xvadd_b:
3842 case Intrinsic::loongarch_lasx_xvadd_h:
3843 case Intrinsic::loongarch_lasx_xvadd_w:
3844 case Intrinsic::loongarch_lasx_xvadd_d:
3847 case Intrinsic::loongarch_lsx_vaddi_bu:
3848 case Intrinsic::loongarch_lsx_vaddi_hu:
3849 case Intrinsic::loongarch_lsx_vaddi_wu:
3850 case Intrinsic::loongarch_lsx_vaddi_du:
3851 case Intrinsic::loongarch_lasx_xvaddi_bu:
3852 case Intrinsic::loongarch_lasx_xvaddi_hu:
3853 case Intrinsic::loongarch_lasx_xvaddi_wu:
3854 case Intrinsic::loongarch_lasx_xvaddi_du:
3856 lowerVectorSplatImm<5>(
N, 2, DAG));
3857 case Intrinsic::loongarch_lsx_vsub_b:
3858 case Intrinsic::loongarch_lsx_vsub_h:
3859 case Intrinsic::loongarch_lsx_vsub_w:
3860 case Intrinsic::loongarch_lsx_vsub_d:
3861 case Intrinsic::loongarch_lasx_xvsub_b:
3862 case Intrinsic::loongarch_lasx_xvsub_h:
3863 case Intrinsic::loongarch_lasx_xvsub_w:
3864 case Intrinsic::loongarch_lasx_xvsub_d:
3867 case Intrinsic::loongarch_lsx_vsubi_bu:
3868 case Intrinsic::loongarch_lsx_vsubi_hu:
3869 case Intrinsic::loongarch_lsx_vsubi_wu:
3870 case Intrinsic::loongarch_lsx_vsubi_du:
3871 case Intrinsic::loongarch_lasx_xvsubi_bu:
3872 case Intrinsic::loongarch_lasx_xvsubi_hu:
3873 case Intrinsic::loongarch_lasx_xvsubi_wu:
3874 case Intrinsic::loongarch_lasx_xvsubi_du:
3876 lowerVectorSplatImm<5>(
N, 2, DAG));
3877 case Intrinsic::loongarch_lsx_vneg_b:
3878 case Intrinsic::loongarch_lsx_vneg_h:
3879 case Intrinsic::loongarch_lsx_vneg_w:
3880 case Intrinsic::loongarch_lsx_vneg_d:
3881 case Intrinsic::loongarch_lasx_xvneg_b:
3882 case Intrinsic::loongarch_lasx_xvneg_h:
3883 case Intrinsic::loongarch_lasx_xvneg_w:
3884 case Intrinsic::loongarch_lasx_xvneg_d:
3888 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
3890 SDLoc(
N),
N->getValueType(0)),
3892 case Intrinsic::loongarch_lsx_vmax_b:
3893 case Intrinsic::loongarch_lsx_vmax_h:
3894 case Intrinsic::loongarch_lsx_vmax_w:
3895 case Intrinsic::loongarch_lsx_vmax_d:
3896 case Intrinsic::loongarch_lasx_xvmax_b:
3897 case Intrinsic::loongarch_lasx_xvmax_h:
3898 case Intrinsic::loongarch_lasx_xvmax_w:
3899 case Intrinsic::loongarch_lasx_xvmax_d:
3902 case Intrinsic::loongarch_lsx_vmax_bu:
3903 case Intrinsic::loongarch_lsx_vmax_hu:
3904 case Intrinsic::loongarch_lsx_vmax_wu:
3905 case Intrinsic::loongarch_lsx_vmax_du:
3906 case Intrinsic::loongarch_lasx_xvmax_bu:
3907 case Intrinsic::loongarch_lasx_xvmax_hu:
3908 case Intrinsic::loongarch_lasx_xvmax_wu:
3909 case Intrinsic::loongarch_lasx_xvmax_du:
3912 case Intrinsic::loongarch_lsx_vmaxi_b:
3913 case Intrinsic::loongarch_lsx_vmaxi_h:
3914 case Intrinsic::loongarch_lsx_vmaxi_w:
3915 case Intrinsic::loongarch_lsx_vmaxi_d:
3916 case Intrinsic::loongarch_lasx_xvmaxi_b:
3917 case Intrinsic::loongarch_lasx_xvmaxi_h:
3918 case Intrinsic::loongarch_lasx_xvmaxi_w:
3919 case Intrinsic::loongarch_lasx_xvmaxi_d:
3921 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3922 case Intrinsic::loongarch_lsx_vmaxi_bu:
3923 case Intrinsic::loongarch_lsx_vmaxi_hu:
3924 case Intrinsic::loongarch_lsx_vmaxi_wu:
3925 case Intrinsic::loongarch_lsx_vmaxi_du:
3926 case Intrinsic::loongarch_lasx_xvmaxi_bu:
3927 case Intrinsic::loongarch_lasx_xvmaxi_hu:
3928 case Intrinsic::loongarch_lasx_xvmaxi_wu:
3929 case Intrinsic::loongarch_lasx_xvmaxi_du:
3931 lowerVectorSplatImm<5>(
N, 2, DAG));
3932 case Intrinsic::loongarch_lsx_vmin_b:
3933 case Intrinsic::loongarch_lsx_vmin_h:
3934 case Intrinsic::loongarch_lsx_vmin_w:
3935 case Intrinsic::loongarch_lsx_vmin_d:
3936 case Intrinsic::loongarch_lasx_xvmin_b:
3937 case Intrinsic::loongarch_lasx_xvmin_h:
3938 case Intrinsic::loongarch_lasx_xvmin_w:
3939 case Intrinsic::loongarch_lasx_xvmin_d:
3942 case Intrinsic::loongarch_lsx_vmin_bu:
3943 case Intrinsic::loongarch_lsx_vmin_hu:
3944 case Intrinsic::loongarch_lsx_vmin_wu:
3945 case Intrinsic::loongarch_lsx_vmin_du:
3946 case Intrinsic::loongarch_lasx_xvmin_bu:
3947 case Intrinsic::loongarch_lasx_xvmin_hu:
3948 case Intrinsic::loongarch_lasx_xvmin_wu:
3949 case Intrinsic::loongarch_lasx_xvmin_du:
3952 case Intrinsic::loongarch_lsx_vmini_b:
3953 case Intrinsic::loongarch_lsx_vmini_h:
3954 case Intrinsic::loongarch_lsx_vmini_w:
3955 case Intrinsic::loongarch_lsx_vmini_d:
3956 case Intrinsic::loongarch_lasx_xvmini_b:
3957 case Intrinsic::loongarch_lasx_xvmini_h:
3958 case Intrinsic::loongarch_lasx_xvmini_w:
3959 case Intrinsic::loongarch_lasx_xvmini_d:
3961 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3962 case Intrinsic::loongarch_lsx_vmini_bu:
3963 case Intrinsic::loongarch_lsx_vmini_hu:
3964 case Intrinsic::loongarch_lsx_vmini_wu:
3965 case Intrinsic::loongarch_lsx_vmini_du:
3966 case Intrinsic::loongarch_lasx_xvmini_bu:
3967 case Intrinsic::loongarch_lasx_xvmini_hu:
3968 case Intrinsic::loongarch_lasx_xvmini_wu:
3969 case Intrinsic::loongarch_lasx_xvmini_du:
3971 lowerVectorSplatImm<5>(
N, 2, DAG));
3972 case Intrinsic::loongarch_lsx_vmul_b:
3973 case Intrinsic::loongarch_lsx_vmul_h:
3974 case Intrinsic::loongarch_lsx_vmul_w:
3975 case Intrinsic::loongarch_lsx_vmul_d:
3976 case Intrinsic::loongarch_lasx_xvmul_b:
3977 case Intrinsic::loongarch_lasx_xvmul_h:
3978 case Intrinsic::loongarch_lasx_xvmul_w:
3979 case Intrinsic::loongarch_lasx_xvmul_d:
3982 case Intrinsic::loongarch_lsx_vmadd_b:
3983 case Intrinsic::loongarch_lsx_vmadd_h:
3984 case Intrinsic::loongarch_lsx_vmadd_w:
3985 case Intrinsic::loongarch_lsx_vmadd_d:
3986 case Intrinsic::loongarch_lasx_xvmadd_b:
3987 case Intrinsic::loongarch_lasx_xvmadd_h:
3988 case Intrinsic::loongarch_lasx_xvmadd_w:
3989 case Intrinsic::loongarch_lasx_xvmadd_d: {
3990 EVT ResTy =
N->getValueType(0);
3995 case Intrinsic::loongarch_lsx_vmsub_b:
3996 case Intrinsic::loongarch_lsx_vmsub_h:
3997 case Intrinsic::loongarch_lsx_vmsub_w:
3998 case Intrinsic::loongarch_lsx_vmsub_d:
3999 case Intrinsic::loongarch_lasx_xvmsub_b:
4000 case Intrinsic::loongarch_lasx_xvmsub_h:
4001 case Intrinsic::loongarch_lasx_xvmsub_w:
4002 case Intrinsic::loongarch_lasx_xvmsub_d: {
4003 EVT ResTy =
N->getValueType(0);
4008 case Intrinsic::loongarch_lsx_vdiv_b:
4009 case Intrinsic::loongarch_lsx_vdiv_h:
4010 case Intrinsic::loongarch_lsx_vdiv_w:
4011 case Intrinsic::loongarch_lsx_vdiv_d:
4012 case Intrinsic::loongarch_lasx_xvdiv_b:
4013 case Intrinsic::loongarch_lasx_xvdiv_h:
4014 case Intrinsic::loongarch_lasx_xvdiv_w:
4015 case Intrinsic::loongarch_lasx_xvdiv_d:
4018 case Intrinsic::loongarch_lsx_vdiv_bu:
4019 case Intrinsic::loongarch_lsx_vdiv_hu:
4020 case Intrinsic::loongarch_lsx_vdiv_wu:
4021 case Intrinsic::loongarch_lsx_vdiv_du:
4022 case Intrinsic::loongarch_lasx_xvdiv_bu:
4023 case Intrinsic::loongarch_lasx_xvdiv_hu:
4024 case Intrinsic::loongarch_lasx_xvdiv_wu:
4025 case Intrinsic::loongarch_lasx_xvdiv_du:
4028 case Intrinsic::loongarch_lsx_vmod_b:
4029 case Intrinsic::loongarch_lsx_vmod_h:
4030 case Intrinsic::loongarch_lsx_vmod_w:
4031 case Intrinsic::loongarch_lsx_vmod_d:
4032 case Intrinsic::loongarch_lasx_xvmod_b:
4033 case Intrinsic::loongarch_lasx_xvmod_h:
4034 case Intrinsic::loongarch_lasx_xvmod_w:
4035 case Intrinsic::loongarch_lasx_xvmod_d:
4038 case Intrinsic::loongarch_lsx_vmod_bu:
4039 case Intrinsic::loongarch_lsx_vmod_hu:
4040 case Intrinsic::loongarch_lsx_vmod_wu:
4041 case Intrinsic::loongarch_lsx_vmod_du:
4042 case Intrinsic::loongarch_lasx_xvmod_bu:
4043 case Intrinsic::loongarch_lasx_xvmod_hu:
4044 case Intrinsic::loongarch_lasx_xvmod_wu:
4045 case Intrinsic::loongarch_lasx_xvmod_du:
4048 case Intrinsic::loongarch_lsx_vand_v:
4049 case Intrinsic::loongarch_lasx_xvand_v:
4052 case Intrinsic::loongarch_lsx_vor_v:
4053 case Intrinsic::loongarch_lasx_xvor_v:
4056 case Intrinsic::loongarch_lsx_vxor_v:
4057 case Intrinsic::loongarch_lasx_xvxor_v:
4060 case Intrinsic::loongarch_lsx_vnor_v:
4061 case Intrinsic::loongarch_lasx_xvnor_v: {
4066 case Intrinsic::loongarch_lsx_vandi_b:
4067 case Intrinsic::loongarch_lasx_xvandi_b:
4069 lowerVectorSplatImm<8>(
N, 2, DAG));
4070 case Intrinsic::loongarch_lsx_vori_b:
4071 case Intrinsic::loongarch_lasx_xvori_b:
4073 lowerVectorSplatImm<8>(
N, 2, DAG));
4074 case Intrinsic::loongarch_lsx_vxori_b:
4075 case Intrinsic::loongarch_lasx_xvxori_b:
4077 lowerVectorSplatImm<8>(
N, 2, DAG));
4078 case Intrinsic::loongarch_lsx_vsll_b:
4079 case Intrinsic::loongarch_lsx_vsll_h:
4080 case Intrinsic::loongarch_lsx_vsll_w:
4081 case Intrinsic::loongarch_lsx_vsll_d:
4082 case Intrinsic::loongarch_lasx_xvsll_b:
4083 case Intrinsic::loongarch_lasx_xvsll_h:
4084 case Intrinsic::loongarch_lasx_xvsll_w:
4085 case Intrinsic::loongarch_lasx_xvsll_d:
4088 case Intrinsic::loongarch_lsx_vslli_b:
4089 case Intrinsic::loongarch_lasx_xvslli_b:
4091 lowerVectorSplatImm<3>(
N, 2, DAG));
4092 case Intrinsic::loongarch_lsx_vslli_h:
4093 case Intrinsic::loongarch_lasx_xvslli_h:
4095 lowerVectorSplatImm<4>(
N, 2, DAG));
4096 case Intrinsic::loongarch_lsx_vslli_w:
4097 case Intrinsic::loongarch_lasx_xvslli_w:
4099 lowerVectorSplatImm<5>(
N, 2, DAG));
4100 case Intrinsic::loongarch_lsx_vslli_d:
4101 case Intrinsic::loongarch_lasx_xvslli_d:
4103 lowerVectorSplatImm<6>(
N, 2, DAG));
4104 case Intrinsic::loongarch_lsx_vsrl_b:
4105 case Intrinsic::loongarch_lsx_vsrl_h:
4106 case Intrinsic::loongarch_lsx_vsrl_w:
4107 case Intrinsic::loongarch_lsx_vsrl_d:
4108 case Intrinsic::loongarch_lasx_xvsrl_b:
4109 case Intrinsic::loongarch_lasx_xvsrl_h:
4110 case Intrinsic::loongarch_lasx_xvsrl_w:
4111 case Intrinsic::loongarch_lasx_xvsrl_d:
4114 case Intrinsic::loongarch_lsx_vsrli_b:
4115 case Intrinsic::loongarch_lasx_xvsrli_b:
4117 lowerVectorSplatImm<3>(
N, 2, DAG));
4118 case Intrinsic::loongarch_lsx_vsrli_h:
4119 case Intrinsic::loongarch_lasx_xvsrli_h:
4121 lowerVectorSplatImm<4>(
N, 2, DAG));
4122 case Intrinsic::loongarch_lsx_vsrli_w:
4123 case Intrinsic::loongarch_lasx_xvsrli_w:
4125 lowerVectorSplatImm<5>(
N, 2, DAG));
4126 case Intrinsic::loongarch_lsx_vsrli_d:
4127 case Intrinsic::loongarch_lasx_xvsrli_d:
4129 lowerVectorSplatImm<6>(
N, 2, DAG));
4130 case Intrinsic::loongarch_lsx_vsra_b:
4131 case Intrinsic::loongarch_lsx_vsra_h:
4132 case Intrinsic::loongarch_lsx_vsra_w:
4133 case Intrinsic::loongarch_lsx_vsra_d:
4134 case Intrinsic::loongarch_lasx_xvsra_b:
4135 case Intrinsic::loongarch_lasx_xvsra_h:
4136 case Intrinsic::loongarch_lasx_xvsra_w:
4137 case Intrinsic::loongarch_lasx_xvsra_d:
4140 case Intrinsic::loongarch_lsx_vsrai_b:
4141 case Intrinsic::loongarch_lasx_xvsrai_b:
4143 lowerVectorSplatImm<3>(
N, 2, DAG));
4144 case Intrinsic::loongarch_lsx_vsrai_h:
4145 case Intrinsic::loongarch_lasx_xvsrai_h:
4147 lowerVectorSplatImm<4>(
N, 2, DAG));
4148 case Intrinsic::loongarch_lsx_vsrai_w:
4149 case Intrinsic::loongarch_lasx_xvsrai_w:
4151 lowerVectorSplatImm<5>(
N, 2, DAG));
4152 case Intrinsic::loongarch_lsx_vsrai_d:
4153 case Intrinsic::loongarch_lasx_xvsrai_d:
4155 lowerVectorSplatImm<6>(
N, 2, DAG));
4156 case Intrinsic::loongarch_lsx_vclz_b:
4157 case Intrinsic::loongarch_lsx_vclz_h:
4158 case Intrinsic::loongarch_lsx_vclz_w:
4159 case Intrinsic::loongarch_lsx_vclz_d:
4160 case Intrinsic::loongarch_lasx_xvclz_b:
4161 case Intrinsic::loongarch_lasx_xvclz_h:
4162 case Intrinsic::loongarch_lasx_xvclz_w:
4163 case Intrinsic::loongarch_lasx_xvclz_d:
4165 case Intrinsic::loongarch_lsx_vpcnt_b:
4166 case Intrinsic::loongarch_lsx_vpcnt_h:
4167 case Intrinsic::loongarch_lsx_vpcnt_w:
4168 case Intrinsic::loongarch_lsx_vpcnt_d:
4169 case Intrinsic::loongarch_lasx_xvpcnt_b:
4170 case Intrinsic::loongarch_lasx_xvpcnt_h:
4171 case Intrinsic::loongarch_lasx_xvpcnt_w:
4172 case Intrinsic::loongarch_lasx_xvpcnt_d:
4174 case Intrinsic::loongarch_lsx_vbitclr_b:
4175 case Intrinsic::loongarch_lsx_vbitclr_h:
4176 case Intrinsic::loongarch_lsx_vbitclr_w:
4177 case Intrinsic::loongarch_lsx_vbitclr_d:
4178 case Intrinsic::loongarch_lasx_xvbitclr_b:
4179 case Intrinsic::loongarch_lasx_xvbitclr_h:
4180 case Intrinsic::loongarch_lasx_xvbitclr_w:
4181 case Intrinsic::loongarch_lasx_xvbitclr_d:
4183 case Intrinsic::loongarch_lsx_vbitclri_b:
4184 case Intrinsic::loongarch_lasx_xvbitclri_b:
4185 return lowerVectorBitClearImm<3>(
N, DAG);
4186 case Intrinsic::loongarch_lsx_vbitclri_h:
4187 case Intrinsic::loongarch_lasx_xvbitclri_h:
4188 return lowerVectorBitClearImm<4>(
N, DAG);
4189 case Intrinsic::loongarch_lsx_vbitclri_w:
4190 case Intrinsic::loongarch_lasx_xvbitclri_w:
4191 return lowerVectorBitClearImm<5>(
N, DAG);
4192 case Intrinsic::loongarch_lsx_vbitclri_d:
4193 case Intrinsic::loongarch_lasx_xvbitclri_d:
4194 return lowerVectorBitClearImm<6>(
N, DAG);
4195 case Intrinsic::loongarch_lsx_vbitset_b:
4196 case Intrinsic::loongarch_lsx_vbitset_h:
4197 case Intrinsic::loongarch_lsx_vbitset_w:
4198 case Intrinsic::loongarch_lsx_vbitset_d:
4199 case Intrinsic::loongarch_lasx_xvbitset_b:
4200 case Intrinsic::loongarch_lasx_xvbitset_h:
4201 case Intrinsic::loongarch_lasx_xvbitset_w:
4202 case Intrinsic::loongarch_lasx_xvbitset_d: {
4203 EVT VecTy =
N->getValueType(0);
4209 case Intrinsic::loongarch_lsx_vbitseti_b:
4210 case Intrinsic::loongarch_lasx_xvbitseti_b:
4211 return lowerVectorBitSetImm<3>(
N, DAG);
4212 case Intrinsic::loongarch_lsx_vbitseti_h:
4213 case Intrinsic::loongarch_lasx_xvbitseti_h:
4214 return lowerVectorBitSetImm<4>(
N, DAG);
4215 case Intrinsic::loongarch_lsx_vbitseti_w:
4216 case Intrinsic::loongarch_lasx_xvbitseti_w:
4217 return lowerVectorBitSetImm<5>(
N, DAG);
4218 case Intrinsic::loongarch_lsx_vbitseti_d:
4219 case Intrinsic::loongarch_lasx_xvbitseti_d:
4220 return lowerVectorBitSetImm<6>(
N, DAG);
4221 case Intrinsic::loongarch_lsx_vbitrev_b:
4222 case Intrinsic::loongarch_lsx_vbitrev_h:
4223 case Intrinsic::loongarch_lsx_vbitrev_w:
4224 case Intrinsic::loongarch_lsx_vbitrev_d:
4225 case Intrinsic::loongarch_lasx_xvbitrev_b:
4226 case Intrinsic::loongarch_lasx_xvbitrev_h:
4227 case Intrinsic::loongarch_lasx_xvbitrev_w:
4228 case Intrinsic::loongarch_lasx_xvbitrev_d: {
4229 EVT VecTy =
N->getValueType(0);
4235 case Intrinsic::loongarch_lsx_vbitrevi_b:
4236 case Intrinsic::loongarch_lasx_xvbitrevi_b:
4237 return lowerVectorBitRevImm<3>(
N, DAG);
4238 case Intrinsic::loongarch_lsx_vbitrevi_h:
4239 case Intrinsic::loongarch_lasx_xvbitrevi_h:
4240 return lowerVectorBitRevImm<4>(
N, DAG);
4241 case Intrinsic::loongarch_lsx_vbitrevi_w:
4242 case Intrinsic::loongarch_lasx_xvbitrevi_w:
4243 return lowerVectorBitRevImm<5>(
N, DAG);
4244 case Intrinsic::loongarch_lsx_vbitrevi_d:
4245 case Intrinsic::loongarch_lasx_xvbitrevi_d:
4246 return lowerVectorBitRevImm<6>(
N, DAG);
4247 case Intrinsic::loongarch_lsx_vfadd_s:
4248 case Intrinsic::loongarch_lsx_vfadd_d:
4249 case Intrinsic::loongarch_lasx_xvfadd_s:
4250 case Intrinsic::loongarch_lasx_xvfadd_d:
4253 case Intrinsic::loongarch_lsx_vfsub_s:
4254 case Intrinsic::loongarch_lsx_vfsub_d:
4255 case Intrinsic::loongarch_lasx_xvfsub_s:
4256 case Intrinsic::loongarch_lasx_xvfsub_d:
4259 case Intrinsic::loongarch_lsx_vfmul_s:
4260 case Intrinsic::loongarch_lsx_vfmul_d:
4261 case Intrinsic::loongarch_lasx_xvfmul_s:
4262 case Intrinsic::loongarch_lasx_xvfmul_d:
4265 case Intrinsic::loongarch_lsx_vfdiv_s:
4266 case Intrinsic::loongarch_lsx_vfdiv_d:
4267 case Intrinsic::loongarch_lasx_xvfdiv_s:
4268 case Intrinsic::loongarch_lasx_xvfdiv_d:
4271 case Intrinsic::loongarch_lsx_vfmadd_s:
4272 case Intrinsic::loongarch_lsx_vfmadd_d: