29#include "llvm/IR/IntrinsicsLoongArch.h"
38#define DEBUG_TYPE "loongarch-isel-lowering"
43 cl::desc(
"Trap on integer division by zero."),
55 if (Subtarget.hasBasicF())
57 if (Subtarget.hasBasicD())
61 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
63 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
65 if (Subtarget.hasExtLSX())
69 if (Subtarget.hasExtLASX())
70 for (
MVT VT : LASXVTs)
166 if (Subtarget.hasBasicF()) {
190 if (!Subtarget.hasBasicD()) {
201 if (Subtarget.hasBasicD()) {
230 if (Subtarget.hasExtLSX()) {
245 for (
MVT VT : LSXVTs) {
258 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
272 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
276 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
290 if (Subtarget.hasExtLASX()) {
291 for (
MVT VT : LASXVTs) {
304 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
318 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
322 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
343 if (Subtarget.hasExtLSX())
377 switch (
Op.getOpcode()) {
379 return lowerATOMIC_FENCE(
Op, DAG);
381 return lowerEH_DWARF_CFA(
Op, DAG);
383 return lowerGlobalAddress(
Op, DAG);
385 return lowerGlobalTLSAddress(
Op, DAG);
387 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
389 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
391 return lowerINTRINSIC_VOID(
Op, DAG);
393 return lowerBlockAddress(
Op, DAG);
395 return lowerJumpTable(
Op, DAG);
397 return lowerShiftLeftParts(
Op, DAG);
399 return lowerShiftRightParts(
Op, DAG,
true);
401 return lowerShiftRightParts(
Op, DAG,
false);
403 return lowerConstantPool(
Op, DAG);
405 return lowerFP_TO_SINT(
Op, DAG);
407 return lowerBITCAST(
Op, DAG);
409 return lowerUINT_TO_FP(
Op, DAG);
411 return lowerSINT_TO_FP(
Op, DAG);
413 return lowerVASTART(
Op, DAG);
415 return lowerFRAMEADDR(
Op, DAG);
417 return lowerRETURNADDR(
Op, DAG);
419 return lowerWRITE_REGISTER(
Op, DAG);
421 return lowerINSERT_VECTOR_ELT(
Op, DAG);
423 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
425 return lowerBUILD_VECTOR(
Op, DAG);
427 return lowerVECTOR_SHUFFLE(
Op, DAG);
434template <
typename ValType>
437 unsigned CheckStride,
439 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
443 if (*
I != -1 && *
I != ExpectedIndex)
445 ExpectedIndex += ExpectedIndexStride;
449 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
468 for (
const auto &M : Mask) {
475 if (SplatIndex == -1)
478 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
479 if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
480 APInt Imm(64, SplatIndex);
514 int SubMask[4] = {-1, -1, -1, -1};
515 for (
unsigned i = 0; i < 4; ++i) {
516 for (
unsigned j = i; j < Mask.size(); j += 4) {
523 if (Idx < 0 || Idx >= 4)
529 if (SubMask[i] == -1)
533 else if (
Idx != -1 &&
Idx != SubMask[i])
540 for (
int i = 3; i >= 0; --i) {
541 int Idx = SubMask[i];
573 const auto &Begin = Mask.begin();
574 const auto &
End = Mask.end();
575 SDValue OriV1 = V1, OriV2 = V2;
577 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
579 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 2))
584 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
586 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 2))
613 const auto &Begin = Mask.begin();
614 const auto &
End = Mask.end();
615 SDValue OriV1 = V1, OriV2 = V2;
617 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
619 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + 1, 2))
624 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
626 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + 1, 2))
654 const auto &Begin = Mask.begin();
655 const auto &
End = Mask.end();
656 unsigned HalfSize = Mask.size() / 2;
657 SDValue OriV1 = V1, OriV2 = V2;
659 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
661 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + HalfSize, 1))
666 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
668 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + HalfSize,
697 const auto &Begin = Mask.begin();
698 const auto &
End = Mask.end();
699 SDValue OriV1 = V1, OriV2 = V2;
701 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
703 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 1))
708 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
710 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 1))
737 const auto &Begin = Mask.begin();
738 const auto &Mid = Mask.begin() + Mask.size() / 2;
739 const auto &
End = Mask.end();
740 SDValue OriV1 = V1, OriV2 = V2;
742 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
744 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size(), 2))
749 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
751 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size(), 2))
779 const auto &Begin = Mask.begin();
780 const auto &Mid = Mask.begin() + Mask.size() / 2;
781 const auto &
End = Mask.end();
782 SDValue OriV1 = V1, OriV2 = V2;
784 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
786 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size() + 1, 2))
791 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
793 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size() + 1, 2))
835 "Vector type is unsupported for lsx!");
837 "Two operands have different types!");
839 "Unexpected mask size for shuffle!");
840 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
888 for (
const auto &M : Mask) {
895 if (SplatIndex == -1)
898 const auto &Begin = Mask.begin();
899 const auto &
End = Mask.end();
900 unsigned HalfSize = Mask.size() / 2;
902 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
903 if (fitsRegularPattern<int>(Begin, 1,
End - HalfSize, SplatIndex, 0) &&
904 fitsRegularPattern<int>(Begin + HalfSize, 1,
End, SplatIndex + HalfSize,
906 APInt Imm(64, SplatIndex);
920 if (Mask.size() <= 4)
944 const auto &Begin = Mask.begin();
945 const auto &
End = Mask.end();
946 unsigned HalfSize = Mask.size() / 2;
947 unsigned LeftSize = HalfSize / 2;
948 SDValue OriV1 = V1, OriV2 = V2;
950 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, HalfSize - LeftSize,
952 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize + LeftSize, 1))
954 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize,
955 Mask.size() + HalfSize - LeftSize, 1) &&
956 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
957 Mask.size() + HalfSize + LeftSize, 1))
962 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, HalfSize - LeftSize,
964 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize + LeftSize,
967 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize,
968 Mask.size() + HalfSize - LeftSize, 1) &&
969 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
970 Mask.size() + HalfSize + LeftSize, 1))
983 const auto &Begin = Mask.begin();
984 const auto &
End = Mask.end();
985 unsigned HalfSize = Mask.size() / 2;
986 SDValue OriV1 = V1, OriV2 = V2;
988 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, 0, 1) &&
989 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize, 1))
991 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, Mask.size(), 1) &&
992 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
993 Mask.size() + HalfSize, 1))
998 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, 0, 1) &&
999 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize, 1))
1001 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, Mask.size(),
1003 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1004 Mask.size() + HalfSize, 1))
1017 const auto &Begin = Mask.begin();
1018 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1019 const auto &Mid = Mask.begin() + Mask.size() / 2;
1020 const auto &RightMid = Mask.end() - Mask.size() / 4;
1021 const auto &
End = Mask.end();
1022 unsigned HalfSize = Mask.size() / 2;
1023 SDValue OriV1 = V1, OriV2 = V2;
1025 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 0, 2) &&
1026 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize, 2))
1028 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size(), 2) &&
1029 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize, 2))
1034 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 0, 2) &&
1035 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize, 2))
1037 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size(), 2) &&
1038 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize, 2))
1052 const auto &Begin = Mask.begin();
1053 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1054 const auto &Mid = Mask.begin() + Mask.size() / 2;
1055 const auto &RightMid = Mask.end() - Mask.size() / 4;
1056 const auto &
End = Mask.end();
1057 unsigned HalfSize = Mask.size() / 2;
1058 SDValue OriV1 = V1, OriV2 = V2;
1060 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 1, 2) &&
1061 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize + 1, 2))
1063 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size() + 1, 2) &&
1064 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize + 1,
1070 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 1, 2) &&
1071 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize + 1, 2))
1073 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size() + 1, 2) &&
1074 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize + 1,
1088 int MaskSize = Mask.size();
1089 int HalfSize = Mask.size() / 2;
1090 const auto &Begin = Mask.begin();
1091 const auto &Mid = Mask.begin() + HalfSize;
1092 const auto &
End = Mask.end();
1104 for (
auto it = Begin; it < Mid; it++) {
1107 else if ((*it >= 0 && *it < HalfSize) ||
1108 (*it >= MaskSize && *it <= MaskSize + HalfSize)) {
1109 int M = *it < HalfSize ? *it : *it - HalfSize;
1114 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
1116 for (
auto it = Mid; it <
End; it++) {
1119 else if ((*it >= HalfSize && *it < MaskSize) ||
1120 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
1121 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
1126 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
1157 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
1159 int MaskSize = Mask.size();
1160 int HalfSize = Mask.size() / 2;
1162 HalfMaskType preMask =
None, postMask =
None;
1164 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1165 return M < 0 || (M >= 0 && M < HalfSize) ||
1166 (M >= MaskSize && M < MaskSize + HalfSize);
1168 preMask = HighLaneTy;
1169 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1170 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1171 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1173 preMask = LowLaneTy;
1175 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1176 return M < 0 || (M >= 0 && M < HalfSize) ||
1177 (M >= MaskSize && M < MaskSize + HalfSize);
1179 postMask = HighLaneTy;
1180 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1181 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1182 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1184 postMask = LowLaneTy;
1192 if (preMask == HighLaneTy && postMask == LowLaneTy) {
1195 if (preMask == LowLaneTy && postMask == HighLaneTy) {
1201 if (!V2.isUndef()) {
1208 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1209 *it = *it < 0 ? *it : *it - HalfSize;
1211 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1212 *it = *it < 0 ? *it : *it + HalfSize;
1214 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
1220 if (!V2.isUndef()) {
1227 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1228 *it = *it < 0 ? *it : *it - HalfSize;
1230 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
1236 if (!V2.isUndef()) {
1243 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1244 *it = *it < 0 ? *it : *it + HalfSize;
1260 "Vector type is unsupported for lasx!");
1262 "Two operands have different types!");
1264 "Unexpected mask size for shuffle!");
1265 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1266 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
1311 MVT VT =
Op.getSimpleValueType();
1315 bool V1IsUndef = V1.
isUndef();
1316 bool V2IsUndef =
V2.isUndef();
1317 if (V1IsUndef && V2IsUndef)
1330 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
1332 for (
int &M : NewMask)
1333 if (M >= NumElements)
1339 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
1340 (void)MaskUpperLimit;
1342 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
1343 "Out of bounds shuffle index");
1358 if (isa<ConstantSDNode>(
Op))
1360 if (isa<ConstantFPSDNode>(
Op))
1375 EVT ResTy =
Op->getValueType(0);
1377 APInt SplatValue, SplatUndef;
1378 unsigned SplatBitSize;
1383 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
1384 (!Subtarget.hasExtLASX() || !Is256Vec))
1387 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
1389 SplatBitSize <= 64) {
1391 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1397 switch (SplatBitSize) {
1401 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
1404 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
1407 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
1410 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
1418 if (ViaVecTy != ResTy)
1431 EVT ResTy =
Node->getValueType(0);
1437 for (
unsigned i = 0; i < NumElts; ++i) {
1439 Node->getOperand(i),
1449LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
1451 EVT VecTy =
Op->getOperand(0)->getValueType(0);
1456 if (isa<ConstantSDNode>(
Idx) &&
1457 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
1458 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
1465LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
1467 if (isa<ConstantSDNode>(
Op->getOperand(2)))
1491 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
1493 "On LA64, only 64-bit registers can be written.");
1494 return Op.getOperand(0);
1497 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
1499 "On LA32, only 32-bit registers can be written.");
1500 return Op.getOperand(0);
1508 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
1510 "be a constant integer");
1517 EVT VT =
Op.getValueType();
1520 unsigned Depth =
Op.getConstantOperandVal(0);
1521 int GRLenInBytes = Subtarget.
getGRLen() / 8;
1524 int Offset = -(GRLenInBytes * 2);
1539 if (
Op.getConstantOperandVal(0) != 0) {
1541 "return address can only be determined for the current frame");
1575 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1583 !Subtarget.hasBasicD() &&
"unexpected target features");
1588 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
1589 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
1599 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
1603 EVT RetVT =
Op.getValueType();
1605 MakeLibCallOptions CallOptions;
1606 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1609 std::tie(Result, Chain) =
1617 !Subtarget.hasBasicD() &&
"unexpected target features");
1624 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
1628 EVT RetVT =
Op.getValueType();
1630 MakeLibCallOptions CallOptions;
1631 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1634 std::tie(Result, Chain) =
1645 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
1646 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
1658 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
1659 !Subtarget.hasBasicD()) {
1684 N->getOffset(), Flags);
1692template <
class NodeTy>
1695 bool IsLocal)
const {
1706 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
1758 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
1764 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
1770 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
1777 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1781 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
1782 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
1791 unsigned Opc,
bool UseGOT,
1842 Args.push_back(Entry);
1874LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
1881 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
1884 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1898 return getDynamicTLSAddr(
N, DAG,
1899 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
1900 : LoongArch::PseudoLA_TLS_GD,
1907 return getDynamicTLSAddr(
N, DAG,
1908 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
1909 : LoongArch::PseudoLA_TLS_LD,
1914 return getStaticTLSAddr(
N, DAG,
1915 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1916 : LoongArch::PseudoLA_TLS_IE,
1923 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1927 return getTLSDescAddr(
N, DAG,
1928 Large ? LoongArch::PseudoLA_TLS_DESC_PC_LARGE
1929 : LoongArch::PseudoLA_TLS_DESC_PC,
1933template <
unsigned N>
1936 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
1938 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
1939 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
1941 ": argument out of range.");
1948LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
1951 switch (
Op.getConstantOperandVal(0)) {
1954 case Intrinsic::thread_pointer: {
1958 case Intrinsic::loongarch_lsx_vpickve2gr_d:
1959 case Intrinsic::loongarch_lsx_vpickve2gr_du:
1960 case Intrinsic::loongarch_lsx_vreplvei_d:
1961 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
1962 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
1963 case Intrinsic::loongarch_lsx_vreplvei_w:
1964 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
1965 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
1966 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
1967 case Intrinsic::loongarch_lasx_xvpickve_d:
1968 case Intrinsic::loongarch_lasx_xvpickve_d_f:
1969 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
1970 case Intrinsic::loongarch_lasx_xvinsve0_d:
1971 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
1972 case Intrinsic::loongarch_lsx_vsat_b:
1973 case Intrinsic::loongarch_lsx_vsat_bu:
1974 case Intrinsic::loongarch_lsx_vrotri_b:
1975 case Intrinsic::loongarch_lsx_vsllwil_h_b:
1976 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
1977 case Intrinsic::loongarch_lsx_vsrlri_b:
1978 case Intrinsic::loongarch_lsx_vsrari_b:
1979 case Intrinsic::loongarch_lsx_vreplvei_h:
1980 case Intrinsic::loongarch_lasx_xvsat_b:
1981 case Intrinsic::loongarch_lasx_xvsat_bu:
1982 case Intrinsic::loongarch_lasx_xvrotri_b:
1983 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
1984 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
1985 case Intrinsic::loongarch_lasx_xvsrlri_b:
1986 case Intrinsic::loongarch_lasx_xvsrari_b:
1987 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
1988 case Intrinsic::loongarch_lasx_xvpickve_w:
1989 case Intrinsic::loongarch_lasx_xvpickve_w_f:
1990 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
1991 case Intrinsic::loongarch_lasx_xvinsve0_w:
1992 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
1993 case Intrinsic::loongarch_lsx_vsat_h:
1994 case Intrinsic::loongarch_lsx_vsat_hu:
1995 case Intrinsic::loongarch_lsx_vrotri_h:
1996 case Intrinsic::loongarch_lsx_vsllwil_w_h:
1997 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
1998 case Intrinsic::loongarch_lsx_vsrlri_h:
1999 case Intrinsic::loongarch_lsx_vsrari_h:
2000 case Intrinsic::loongarch_lsx_vreplvei_b:
2001 case Intrinsic::loongarch_lasx_xvsat_h:
2002 case Intrinsic::loongarch_lasx_xvsat_hu:
2003 case Intrinsic::loongarch_lasx_xvrotri_h:
2004 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
2005 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
2006 case Intrinsic::loongarch_lasx_xvsrlri_h:
2007 case Intrinsic::loongarch_lasx_xvsrari_h:
2008 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
2009 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
2010 case Intrinsic::loongarch_lsx_vsrlni_b_h:
2011 case Intrinsic::loongarch_lsx_vsrani_b_h:
2012 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
2013 case Intrinsic::loongarch_lsx_vsrarni_b_h:
2014 case Intrinsic::loongarch_lsx_vssrlni_b_h:
2015 case Intrinsic::loongarch_lsx_vssrani_b_h:
2016 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
2017 case Intrinsic::loongarch_lsx_vssrani_bu_h:
2018 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
2019 case Intrinsic::loongarch_lsx_vssrarni_b_h:
2020 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
2021 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
2022 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
2023 case Intrinsic::loongarch_lasx_xvsrani_b_h:
2024 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
2025 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
2026 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
2027 case Intrinsic::loongarch_lasx_xvssrani_b_h:
2028 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
2029 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
2030 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
2031 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
2032 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
2033 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
2034 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
2035 case Intrinsic::loongarch_lsx_vsat_w:
2036 case Intrinsic::loongarch_lsx_vsat_wu:
2037 case Intrinsic::loongarch_lsx_vrotri_w:
2038 case Intrinsic::loongarch_lsx_vsllwil_d_w:
2039 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
2040 case Intrinsic::loongarch_lsx_vsrlri_w:
2041 case Intrinsic::loongarch_lsx_vsrari_w:
2042 case Intrinsic::loongarch_lsx_vslei_bu:
2043 case Intrinsic::loongarch_lsx_vslei_hu:
2044 case Intrinsic::loongarch_lsx_vslei_wu:
2045 case Intrinsic::loongarch_lsx_vslei_du:
2046 case Intrinsic::loongarch_lsx_vslti_bu:
2047 case Intrinsic::loongarch_lsx_vslti_hu:
2048 case Intrinsic::loongarch_lsx_vslti_wu:
2049 case Intrinsic::loongarch_lsx_vslti_du:
2050 case Intrinsic::loongarch_lsx_vbsll_v:
2051 case Intrinsic::loongarch_lsx_vbsrl_v:
2052 case Intrinsic::loongarch_lasx_xvsat_w:
2053 case Intrinsic::loongarch_lasx_xvsat_wu:
2054 case Intrinsic::loongarch_lasx_xvrotri_w:
2055 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
2056 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
2057 case Intrinsic::loongarch_lasx_xvsrlri_w:
2058 case Intrinsic::loongarch_lasx_xvsrari_w:
2059 case Intrinsic::loongarch_lasx_xvslei_bu:
2060 case Intrinsic::loongarch_lasx_xvslei_hu:
2061 case Intrinsic::loongarch_lasx_xvslei_wu:
2062 case Intrinsic::loongarch_lasx_xvslei_du:
2063 case Intrinsic::loongarch_lasx_xvslti_bu:
2064 case Intrinsic::loongarch_lasx_xvslti_hu:
2065 case Intrinsic::loongarch_lasx_xvslti_wu:
2066 case Intrinsic::loongarch_lasx_xvslti_du:
2067 case Intrinsic::loongarch_lasx_xvbsll_v:
2068 case Intrinsic::loongarch_lasx_xvbsrl_v:
2069 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
2070 case Intrinsic::loongarch_lsx_vseqi_b:
2071 case Intrinsic::loongarch_lsx_vseqi_h:
2072 case Intrinsic::loongarch_lsx_vseqi_w:
2073 case Intrinsic::loongarch_lsx_vseqi_d:
2074 case Intrinsic::loongarch_lsx_vslei_b:
2075 case Intrinsic::loongarch_lsx_vslei_h:
2076 case Intrinsic::loongarch_lsx_vslei_w:
2077 case Intrinsic::loongarch_lsx_vslei_d:
2078 case Intrinsic::loongarch_lsx_vslti_b:
2079 case Intrinsic::loongarch_lsx_vslti_h:
2080 case Intrinsic::loongarch_lsx_vslti_w:
2081 case Intrinsic::loongarch_lsx_vslti_d:
2082 case Intrinsic::loongarch_lasx_xvseqi_b:
2083 case Intrinsic::loongarch_lasx_xvseqi_h:
2084 case Intrinsic::loongarch_lasx_xvseqi_w:
2085 case Intrinsic::loongarch_lasx_xvseqi_d:
2086 case Intrinsic::loongarch_lasx_xvslei_b:
2087 case Intrinsic::loongarch_lasx_xvslei_h:
2088 case Intrinsic::loongarch_lasx_xvslei_w:
2089 case Intrinsic::loongarch_lasx_xvslei_d:
2090 case Intrinsic::loongarch_lasx_xvslti_b:
2091 case Intrinsic::loongarch_lasx_xvslti_h:
2092 case Intrinsic::loongarch_lasx_xvslti_w:
2093 case Intrinsic::loongarch_lasx_xvslti_d:
2094 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
2095 case Intrinsic::loongarch_lsx_vsrlni_h_w:
2096 case Intrinsic::loongarch_lsx_vsrani_h_w:
2097 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
2098 case Intrinsic::loongarch_lsx_vsrarni_h_w:
2099 case Intrinsic::loongarch_lsx_vssrlni_h_w:
2100 case Intrinsic::loongarch_lsx_vssrani_h_w:
2101 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
2102 case Intrinsic::loongarch_lsx_vssrani_hu_w:
2103 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
2104 case Intrinsic::loongarch_lsx_vssrarni_h_w:
2105 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
2106 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
2107 case Intrinsic::loongarch_lsx_vfrstpi_b:
2108 case Intrinsic::loongarch_lsx_vfrstpi_h:
2109 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
2110 case Intrinsic::loongarch_lasx_xvsrani_h_w:
2111 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
2112 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
2113 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
2114 case Intrinsic::loongarch_lasx_xvssrani_h_w:
2115 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
2116 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
2117 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
2118 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
2119 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
2120 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
2121 case Intrinsic::loongarch_lasx_xvfrstpi_b:
2122 case Intrinsic::loongarch_lasx_xvfrstpi_h:
2123 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
2124 case Intrinsic::loongarch_lsx_vsat_d:
2125 case Intrinsic::loongarch_lsx_vsat_du:
2126 case Intrinsic::loongarch_lsx_vrotri_d:
2127 case Intrinsic::loongarch_lsx_vsrlri_d:
2128 case Intrinsic::loongarch_lsx_vsrari_d:
2129 case Intrinsic::loongarch_lasx_xvsat_d:
2130 case Intrinsic::loongarch_lasx_xvsat_du:
2131 case Intrinsic::loongarch_lasx_xvrotri_d:
2132 case Intrinsic::loongarch_lasx_xvsrlri_d:
2133 case Intrinsic::loongarch_lasx_xvsrari_d:
2134 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
2135 case Intrinsic::loongarch_lsx_vsrlni_w_d:
2136 case Intrinsic::loongarch_lsx_vsrani_w_d:
2137 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
2138 case Intrinsic::loongarch_lsx_vsrarni_w_d:
2139 case Intrinsic::loongarch_lsx_vssrlni_w_d:
2140 case Intrinsic::loongarch_lsx_vssrani_w_d:
2141 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
2142 case Intrinsic::loongarch_lsx_vssrani_wu_d:
2143 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
2144 case Intrinsic::loongarch_lsx_vssrarni_w_d:
2145 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
2146 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
2147 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
2148 case Intrinsic::loongarch_lasx_xvsrani_w_d:
2149 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
2150 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
2151 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
2152 case Intrinsic::loongarch_lasx_xvssrani_w_d:
2153 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
2154 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
2155 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
2156 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
2157 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
2158 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
2159 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
2160 case Intrinsic::loongarch_lsx_vsrlni_d_q:
2161 case Intrinsic::loongarch_lsx_vsrani_d_q:
2162 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
2163 case Intrinsic::loongarch_lsx_vsrarni_d_q:
2164 case Intrinsic::loongarch_lsx_vssrlni_d_q:
2165 case Intrinsic::loongarch_lsx_vssrani_d_q:
2166 case Intrinsic::loongarch_lsx_vssrlni_du_q:
2167 case Intrinsic::loongarch_lsx_vssrani_du_q:
2168 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
2169 case Intrinsic::loongarch_lsx_vssrarni_d_q:
2170 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
2171 case Intrinsic::loongarch_lsx_vssrarni_du_q:
2172 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
2173 case Intrinsic::loongarch_lasx_xvsrani_d_q:
2174 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
2175 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
2176 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
2177 case Intrinsic::loongarch_lasx_xvssrani_d_q:
2178 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
2179 case Intrinsic::loongarch_lasx_xvssrani_du_q:
2180 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
2181 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
2182 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
2183 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
2184 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
2185 case Intrinsic::loongarch_lsx_vnori_b:
2186 case Intrinsic::loongarch_lsx_vshuf4i_b:
2187 case Intrinsic::loongarch_lsx_vshuf4i_h:
2188 case Intrinsic::loongarch_lsx_vshuf4i_w:
2189 case Intrinsic::loongarch_lasx_xvnori_b:
2190 case Intrinsic::loongarch_lasx_xvshuf4i_b:
2191 case Intrinsic::loongarch_lasx_xvshuf4i_h:
2192 case Intrinsic::loongarch_lasx_xvshuf4i_w:
2193 case Intrinsic::loongarch_lasx_xvpermi_d:
2194 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
2195 case Intrinsic::loongarch_lsx_vshuf4i_d:
2196 case Intrinsic::loongarch_lsx_vpermi_w:
2197 case Intrinsic::loongarch_lsx_vbitseli_b:
2198 case Intrinsic::loongarch_lsx_vextrins_b:
2199 case Intrinsic::loongarch_lsx_vextrins_h:
2200 case Intrinsic::loongarch_lsx_vextrins_w:
2201 case Intrinsic::loongarch_lsx_vextrins_d:
2202 case Intrinsic::loongarch_lasx_xvshuf4i_d:
2203 case Intrinsic::loongarch_lasx_xvpermi_w:
2204 case Intrinsic::loongarch_lasx_xvpermi_q:
2205 case Intrinsic::loongarch_lasx_xvbitseli_b:
2206 case Intrinsic::loongarch_lasx_xvextrins_b:
2207 case Intrinsic::loongarch_lasx_xvextrins_h:
2208 case Intrinsic::loongarch_lasx_xvextrins_w:
2209 case Intrinsic::loongarch_lasx_xvextrins_d:
2210 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
2211 case Intrinsic::loongarch_lsx_vrepli_b:
2212 case Intrinsic::loongarch_lsx_vrepli_h:
2213 case Intrinsic::loongarch_lsx_vrepli_w:
2214 case Intrinsic::loongarch_lsx_vrepli_d:
2215 case Intrinsic::loongarch_lasx_xvrepli_b:
2216 case Intrinsic::loongarch_lasx_xvrepli_h:
2217 case Intrinsic::loongarch_lasx_xvrepli_w:
2218 case Intrinsic::loongarch_lasx_xvrepli_d:
2219 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
2220 case Intrinsic::loongarch_lsx_vldi:
2221 case Intrinsic::loongarch_lasx_xvldi:
2222 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
2237LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
2241 EVT VT =
Op.getValueType();
2243 const StringRef ErrorMsgOOR =
"argument out of range";
2244 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2245 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2247 switch (
Op.getConstantOperandVal(1)) {
2250 case Intrinsic::loongarch_crc_w_b_w:
2251 case Intrinsic::loongarch_crc_w_h_w:
2252 case Intrinsic::loongarch_crc_w_w_w:
2253 case Intrinsic::loongarch_crc_w_d_w:
2254 case Intrinsic::loongarch_crcc_w_b_w:
2255 case Intrinsic::loongarch_crcc_w_h_w:
2256 case Intrinsic::loongarch_crcc_w_w_w:
2257 case Intrinsic::loongarch_crcc_w_d_w:
2259 case Intrinsic::loongarch_csrrd_w:
2260 case Intrinsic::loongarch_csrrd_d: {
2261 unsigned Imm =
Op.getConstantOperandVal(2);
2262 return !isUInt<14>(Imm)
2267 case Intrinsic::loongarch_csrwr_w:
2268 case Intrinsic::loongarch_csrwr_d: {
2269 unsigned Imm =
Op.getConstantOperandVal(3);
2270 return !isUInt<14>(Imm)
2273 {Chain,
Op.getOperand(2),
2276 case Intrinsic::loongarch_csrxchg_w:
2277 case Intrinsic::loongarch_csrxchg_d: {
2278 unsigned Imm =
Op.getConstantOperandVal(4);
2279 return !isUInt<14>(Imm)
2282 {Chain,
Op.getOperand(2),
Op.getOperand(3),
2285 case Intrinsic::loongarch_iocsrrd_d: {
2290#define IOCSRRD_CASE(NAME, NODE) \
2291 case Intrinsic::loongarch_##NAME: { \
2292 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
2293 {Chain, Op.getOperand(2)}); \
2299 case Intrinsic::loongarch_cpucfg: {
2301 {Chain,
Op.getOperand(2)});
2303 case Intrinsic::loongarch_lddir_d: {
2304 unsigned Imm =
Op.getConstantOperandVal(3);
2305 return !isUInt<8>(Imm)
2309 case Intrinsic::loongarch_movfcsr2gr: {
2310 if (!Subtarget.hasBasicF())
2312 unsigned Imm =
Op.getConstantOperandVal(2);
2313 return !isUInt<2>(Imm)
2318 case Intrinsic::loongarch_lsx_vld:
2319 case Intrinsic::loongarch_lsx_vldrepl_b:
2320 case Intrinsic::loongarch_lasx_xvld:
2321 case Intrinsic::loongarch_lasx_xvldrepl_b:
2322 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2325 case Intrinsic::loongarch_lsx_vldrepl_h:
2326 case Intrinsic::loongarch_lasx_xvldrepl_h:
2327 return !isShiftedInt<11, 1>(
2328 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2330 Op,
"argument out of range or not a multiple of 2", DAG)
2332 case Intrinsic::loongarch_lsx_vldrepl_w:
2333 case Intrinsic::loongarch_lasx_xvldrepl_w:
2334 return !isShiftedInt<10, 2>(
2335 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2337 Op,
"argument out of range or not a multiple of 4", DAG)
2339 case Intrinsic::loongarch_lsx_vldrepl_d:
2340 case Intrinsic::loongarch_lasx_xvldrepl_d:
2341 return !isShiftedInt<9, 3>(
2342 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2344 Op,
"argument out of range or not a multiple of 8", DAG)
2355 return Op.getOperand(0);
2363 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
2365 const StringRef ErrorMsgOOR =
"argument out of range";
2366 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2367 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
2368 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2370 switch (IntrinsicEnum) {
2374 case Intrinsic::loongarch_cacop_d:
2375 case Intrinsic::loongarch_cacop_w: {
2376 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
2378 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
2382 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
2383 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
2387 case Intrinsic::loongarch_dbar: {
2389 return !isUInt<15>(Imm)
2394 case Intrinsic::loongarch_ibar: {
2396 return !isUInt<15>(Imm)
2401 case Intrinsic::loongarch_break: {
2403 return !isUInt<15>(Imm)
2408 case Intrinsic::loongarch_movgr2fcsr: {
2409 if (!Subtarget.hasBasicF())
2412 return !isUInt<2>(Imm)
2419 case Intrinsic::loongarch_syscall: {
2421 return !isUInt<15>(Imm)
2426#define IOCSRWR_CASE(NAME, NODE) \
2427 case Intrinsic::loongarch_##NAME: { \
2428 SDValue Op3 = Op.getOperand(3); \
2429 return Subtarget.is64Bit() \
2430 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
2431 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2432 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
2433 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
2440 case Intrinsic::loongarch_iocsrwr_d: {
2448#define ASRT_LE_GT_CASE(NAME) \
2449 case Intrinsic::loongarch_##NAME: { \
2450 return !Subtarget.is64Bit() \
2451 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
2456#undef ASRT_LE_GT_CASE
2457 case Intrinsic::loongarch_ldpte_d: {
2458 unsigned Imm =
Op.getConstantOperandVal(3);
2464 case Intrinsic::loongarch_lsx_vst:
2465 case Intrinsic::loongarch_lasx_xvst:
2466 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
2469 case Intrinsic::loongarch_lasx_xvstelm_b:
2470 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2471 !isUInt<5>(
Op.getConstantOperandVal(5)))
2474 case Intrinsic::loongarch_lsx_vstelm_b:
2475 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2476 !isUInt<4>(
Op.getConstantOperandVal(5)))
2479 case Intrinsic::loongarch_lasx_xvstelm_h:
2480 return (!isShiftedInt<8, 1>(
2481 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2482 !isUInt<4>(
Op.getConstantOperandVal(5)))
2484 Op,
"argument out of range or not a multiple of 2", DAG)
2486 case Intrinsic::loongarch_lsx_vstelm_h:
2487 return (!isShiftedInt<8, 1>(
2488 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2489 !isUInt<3>(
Op.getConstantOperandVal(5)))
2491 Op,
"argument out of range or not a multiple of 2", DAG)
2493 case Intrinsic::loongarch_lasx_xvstelm_w:
2494 return (!isShiftedInt<8, 2>(
2495 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2496 !isUInt<3>(
Op.getConstantOperandVal(5)))
2498 Op,
"argument out of range or not a multiple of 4", DAG)
2500 case Intrinsic::loongarch_lsx_vstelm_w:
2501 return (!isShiftedInt<8, 2>(
2502 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2503 !isUInt<2>(
Op.getConstantOperandVal(5)))
2505 Op,
"argument out of range or not a multiple of 4", DAG)
2507 case Intrinsic::loongarch_lasx_xvstelm_d:
2508 return (!isShiftedInt<8, 3>(
2509 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2510 !isUInt<2>(
Op.getConstantOperandVal(5)))
2512 Op,
"argument out of range or not a multiple of 8", DAG)
2514 case Intrinsic::loongarch_lsx_vstelm_d:
2515 return (!isShiftedInt<8, 3>(
2516 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2517 !isUInt<1>(
Op.getConstantOperandVal(5)))
2519 Op,
"argument out of range or not a multiple of 8", DAG)
2530 EVT VT =
Lo.getValueType();
2570 EVT VT =
Lo.getValueType();
2657 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2658 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
2662 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2668 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
2695 StringRef ErrorMsg,
bool WithChain =
true) {
2700 Results.push_back(
N->getOperand(0));
2703template <
unsigned N>
2708 const StringRef ErrorMsgOOR =
"argument out of range";
2709 unsigned Imm =
Node->getConstantOperandVal(2);
2710 if (!isUInt<N>(Imm)) {
2743 switch (
N->getConstantOperandVal(0)) {
2746 case Intrinsic::loongarch_lsx_vpickve2gr_b:
2747 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2750 case Intrinsic::loongarch_lsx_vpickve2gr_h:
2751 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
2752 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2755 case Intrinsic::loongarch_lsx_vpickve2gr_w:
2756 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2759 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
2760 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2763 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
2764 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
2765 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2768 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
2769 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2772 case Intrinsic::loongarch_lsx_bz_b:
2773 case Intrinsic::loongarch_lsx_bz_h:
2774 case Intrinsic::loongarch_lsx_bz_w:
2775 case Intrinsic::loongarch_lsx_bz_d:
2776 case Intrinsic::loongarch_lasx_xbz_b:
2777 case Intrinsic::loongarch_lasx_xbz_h:
2778 case Intrinsic::loongarch_lasx_xbz_w:
2779 case Intrinsic::loongarch_lasx_xbz_d:
2783 case Intrinsic::loongarch_lsx_bz_v:
2784 case Intrinsic::loongarch_lasx_xbz_v:
2788 case Intrinsic::loongarch_lsx_bnz_b:
2789 case Intrinsic::loongarch_lsx_bnz_h:
2790 case Intrinsic::loongarch_lsx_bnz_w:
2791 case Intrinsic::loongarch_lsx_bnz_d:
2792 case Intrinsic::loongarch_lasx_xbnz_b:
2793 case Intrinsic::loongarch_lasx_xbnz_h:
2794 case Intrinsic::loongarch_lasx_xbnz_w:
2795 case Intrinsic::loongarch_lasx_xbnz_d:
2799 case Intrinsic::loongarch_lsx_bnz_v:
2800 case Intrinsic::loongarch_lasx_xbnz_v:
2810 EVT VT =
N->getValueType(0);
2811 switch (
N->getOpcode()) {
2817 "Unexpected custom legalisation");
2823 "Unexpected custom legalisation");
2830 "Unexpected custom legalisation");
2839 "Unexpected custom legalisation");
2844 "Unexpected custom legalisation");
2858 EVT OpVT = Src.getValueType();
2862 std::tie(Result, Chain) =
2869 EVT SrcVT = Src.getValueType();
2870 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
2871 Subtarget.hasBasicF()) {
2880 "Unexpected custom legalisation");
2883 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
2889 assert((VT == MVT::i16 || VT == MVT::i32) &&
2890 "Unexpected custom legalization");
2911 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
2912 "Unexpected custom legalization");
2932 "Unexpected custom legalisation");
2940 const StringRef ErrorMsgOOR =
"argument out of range";
2941 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2942 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2944 switch (
N->getConstantOperandVal(1)) {
2947 case Intrinsic::loongarch_movfcsr2gr: {
2948 if (!Subtarget.hasBasicF()) {
2953 if (!isUInt<2>(Imm)) {
2965#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
2966 case Intrinsic::loongarch_##NAME: { \
2967 SDValue NODE = DAG.getNode( \
2968 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2969 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2970 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2971 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2972 Results.push_back(NODE.getValue(1)); \
2981#undef CRC_CASE_EXT_BINARYOP
2983#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
2984 case Intrinsic::loongarch_##NAME: { \
2985 SDValue NODE = DAG.getNode( \
2986 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2988 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2989 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2990 Results.push_back(NODE.getValue(1)); \
2995#undef CRC_CASE_EXT_UNARYOP
2996#define CSR_CASE(ID) \
2997 case Intrinsic::loongarch_##ID: { \
2998 if (!Subtarget.is64Bit()) \
2999 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
3007 case Intrinsic::loongarch_csrrd_w: {
3009 if (!isUInt<14>(Imm)) {
3021 case Intrinsic::loongarch_csrwr_w: {
3022 unsigned Imm =
N->getConstantOperandVal(3);
3023 if (!isUInt<14>(Imm)) {
3036 case Intrinsic::loongarch_csrxchg_w: {
3037 unsigned Imm =
N->getConstantOperandVal(4);
3038 if (!isUInt<14>(Imm)) {
3052#define IOCSRRD_CASE(NAME, NODE) \
3053 case Intrinsic::loongarch_##NAME: { \
3054 SDValue IOCSRRDResults = \
3055 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3056 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
3057 Results.push_back( \
3058 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
3059 Results.push_back(IOCSRRDResults.getValue(1)); \
3066 case Intrinsic::loongarch_cpucfg: {
3075 case Intrinsic::loongarch_lddir_d: {
3088 "On LA64, only 64-bit registers can be read.");
3091 "On LA32, only 32-bit registers can be read.");
3093 Results.push_back(
N->getOperand(0));
3109 SDValue FirstOperand =
N->getOperand(0);
3110 SDValue SecondOperand =
N->getOperand(1);
3111 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
3112 EVT ValTy =
N->getValueType(0);
3115 unsigned SMIdx, SMLen;
3121 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
3132 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
3173 NewOperand = FirstOperand;
3176 msb = lsb + SMLen - 1;
3180 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
3201 SDValue FirstOperand =
N->getOperand(0);
3203 EVT ValTy =
N->getValueType(0);
3206 unsigned MaskIdx, MaskLen;
3212 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
3217 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
3221 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
3234 EVT ValTy =
N->getValueType(0);
3235 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
3239 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
3241 bool SwapAndRetried =
false;
3246 if (ValBits != 32 && ValBits != 64)
3256 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3259 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3261 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
3262 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3264 (MaskIdx0 + MaskLen0 <= ValBits)) {
3278 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3281 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3283 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3285 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
3286 (MaskIdx0 + MaskLen0 <= ValBits)) {
3301 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3303 (MaskIdx0 + MaskLen0 <= 64) &&
3304 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
3311 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3312 : (MaskIdx0 + MaskLen0 - 1),
3324 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3326 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3328 (MaskIdx0 + MaskLen0 <= ValBits)) {
3343 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3345 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
3351 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3352 : (MaskIdx0 + MaskLen0 - 1),
3367 unsigned MaskIdx, MaskLen;
3368 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
3369 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3371 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3393 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3395 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
3396 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3409 if (!SwapAndRetried) {
3411 SwapAndRetried =
true;
3415 SwapAndRetried =
false;
3427 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3441 if (!SwapAndRetried) {
3443 SwapAndRetried =
true;
3453 switch (V.getNode()->getOpcode()) {
3455 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
3464 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3465 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3472 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3473 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3550 SDNode *AndNode =
N->getOperand(0).getNode();
3558 SDValue CmpInputValue =
N->getOperand(1);
3566 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
3569 AndInputValue1 = AndInputValue1.
getOperand(0);
3573 if (AndInputValue2 != CmpInputValue)
3606 TruncInputValue1, TruncInputValue2);
3628template <
unsigned N>
3632 bool IsSigned =
false) {
3634 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3636 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3637 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3639 ": argument out of range.");
3645template <
unsigned N>
3649 EVT ResTy =
Node->getValueType(0);
3650 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3653 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3654 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3656 ": argument out of range.");
3661 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
3667 EVT ResTy =
Node->getValueType(0);
3675 EVT ResTy =
Node->getValueType(0);
3684template <
unsigned N>
3687 EVT ResTy =
Node->getValueType(0);
3688 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3690 if (!isUInt<N>(CImm->getZExtValue())) {
3692 ": argument out of range.");
3702template <
unsigned N>
3705 EVT ResTy =
Node->getValueType(0);
3706 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3708 if (!isUInt<N>(CImm->getZExtValue())) {
3710 ": argument out of range.");
3719template <
unsigned N>
3722 EVT ResTy =
Node->getValueType(0);
3723 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3725 if (!isUInt<N>(CImm->getZExtValue())) {
3727 ": argument out of range.");
3741 switch (
N->getConstantOperandVal(0)) {
3744 case Intrinsic::loongarch_lsx_vadd_b:
3745 case Intrinsic::loongarch_lsx_vadd_h:
3746 case Intrinsic::loongarch_lsx_vadd_w:
3747 case Intrinsic::loongarch_lsx_vadd_d:
3748 case Intrinsic::loongarch_lasx_xvadd_b:
3749 case Intrinsic::loongarch_lasx_xvadd_h:
3750 case Intrinsic::loongarch_lasx_xvadd_w:
3751 case Intrinsic::loongarch_lasx_xvadd_d:
3754 case Intrinsic::loongarch_lsx_vaddi_bu:
3755 case Intrinsic::loongarch_lsx_vaddi_hu:
3756 case Intrinsic::loongarch_lsx_vaddi_wu:
3757 case Intrinsic::loongarch_lsx_vaddi_du:
3758 case Intrinsic::loongarch_lasx_xvaddi_bu:
3759 case Intrinsic::loongarch_lasx_xvaddi_hu:
3760 case Intrinsic::loongarch_lasx_xvaddi_wu:
3761 case Intrinsic::loongarch_lasx_xvaddi_du:
3763 lowerVectorSplatImm<5>(
N, 2, DAG));
3764 case Intrinsic::loongarch_lsx_vsub_b:
3765 case Intrinsic::loongarch_lsx_vsub_h:
3766 case Intrinsic::loongarch_lsx_vsub_w:
3767 case Intrinsic::loongarch_lsx_vsub_d:
3768 case Intrinsic::loongarch_lasx_xvsub_b:
3769 case Intrinsic::loongarch_lasx_xvsub_h:
3770 case Intrinsic::loongarch_lasx_xvsub_w:
3771 case Intrinsic::loongarch_lasx_xvsub_d:
3774 case Intrinsic::loongarch_lsx_vsubi_bu:
3775 case Intrinsic::loongarch_lsx_vsubi_hu:
3776 case Intrinsic::loongarch_lsx_vsubi_wu:
3777 case Intrinsic::loongarch_lsx_vsubi_du:
3778 case Intrinsic::loongarch_lasx_xvsubi_bu:
3779 case Intrinsic::loongarch_lasx_xvsubi_hu:
3780 case Intrinsic::loongarch_lasx_xvsubi_wu:
3781 case Intrinsic::loongarch_lasx_xvsubi_du:
3783 lowerVectorSplatImm<5>(
N, 2, DAG));
3784 case Intrinsic::loongarch_lsx_vneg_b:
3785 case Intrinsic::loongarch_lsx_vneg_h:
3786 case Intrinsic::loongarch_lsx_vneg_w:
3787 case Intrinsic::loongarch_lsx_vneg_d:
3788 case Intrinsic::loongarch_lasx_xvneg_b:
3789 case Intrinsic::loongarch_lasx_xvneg_h:
3790 case Intrinsic::loongarch_lasx_xvneg_w:
3791 case Intrinsic::loongarch_lasx_xvneg_d:
3795 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
3797 SDLoc(
N),
N->getValueType(0)),
3799 case Intrinsic::loongarch_lsx_vmax_b:
3800 case Intrinsic::loongarch_lsx_vmax_h:
3801 case Intrinsic::loongarch_lsx_vmax_w:
3802 case Intrinsic::loongarch_lsx_vmax_d:
3803 case Intrinsic::loongarch_lasx_xvmax_b:
3804 case Intrinsic::loongarch_lasx_xvmax_h:
3805 case Intrinsic::loongarch_lasx_xvmax_w:
3806 case Intrinsic::loongarch_lasx_xvmax_d:
3809 case Intrinsic::loongarch_lsx_vmax_bu:
3810 case Intrinsic::loongarch_lsx_vmax_hu:
3811 case Intrinsic::loongarch_lsx_vmax_wu:
3812 case Intrinsic::loongarch_lsx_vmax_du:
3813 case Intrinsic::loongarch_lasx_xvmax_bu:
3814 case Intrinsic::loongarch_lasx_xvmax_hu:
3815 case Intrinsic::loongarch_lasx_xvmax_wu:
3816 case Intrinsic::loongarch_lasx_xvmax_du:
3819 case Intrinsic::loongarch_lsx_vmaxi_b:
3820 case Intrinsic::loongarch_lsx_vmaxi_h:
3821 case Intrinsic::loongarch_lsx_vmaxi_w:
3822 case Intrinsic::loongarch_lsx_vmaxi_d:
3823 case Intrinsic::loongarch_lasx_xvmaxi_b:
3824 case Intrinsic::loongarch_lasx_xvmaxi_h:
3825 case Intrinsic::loongarch_lasx_xvmaxi_w:
3826 case Intrinsic::loongarch_lasx_xvmaxi_d:
3828 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3829 case Intrinsic::loongarch_lsx_vmaxi_bu:
3830 case Intrinsic::loongarch_lsx_vmaxi_hu:
3831 case Intrinsic::loongarch_lsx_vmaxi_wu:
3832 case Intrinsic::loongarch_lsx_vmaxi_du:
3833 case Intrinsic::loongarch_lasx_xvmaxi_bu:
3834 case Intrinsic::loongarch_lasx_xvmaxi_hu:
3835 case Intrinsic::loongarch_lasx_xvmaxi_wu:
3836 case Intrinsic::loongarch_lasx_xvmaxi_du:
3838 lowerVectorSplatImm<5>(
N, 2, DAG));
3839 case Intrinsic::loongarch_lsx_vmin_b:
3840 case Intrinsic::loongarch_lsx_vmin_h:
3841 case Intrinsic::loongarch_lsx_vmin_w:
3842 case Intrinsic::loongarch_lsx_vmin_d:
3843 case Intrinsic::loongarch_lasx_xvmin_b:
3844 case Intrinsic::loongarch_lasx_xvmin_h:
3845 case Intrinsic::loongarch_lasx_xvmin_w:
3846 case Intrinsic::loongarch_lasx_xvmin_d:
3849 case Intrinsic::loongarch_lsx_vmin_bu:
3850 case Intrinsic::loongarch_lsx_vmin_hu:
3851 case Intrinsic::loongarch_lsx_vmin_wu:
3852 case Intrinsic::loongarch_lsx_vmin_du:
3853 case Intrinsic::loongarch_lasx_xvmin_bu:
3854 case Intrinsic::loongarch_lasx_xvmin_hu:
3855 case Intrinsic::loongarch_lasx_xvmin_wu:
3856 case Intrinsic::loongarch_lasx_xvmin_du:
3859 case Intrinsic::loongarch_lsx_vmini_b:
3860 case Intrinsic::loongarch_lsx_vmini_h:
3861 case Intrinsic::loongarch_lsx_vmini_w:
3862 case Intrinsic::loongarch_lsx_vmini_d:
3863 case Intrinsic::loongarch_lasx_xvmini_b:
3864 case Intrinsic::loongarch_lasx_xvmini_h:
3865 case Intrinsic::loongarch_lasx_xvmini_w:
3866 case Intrinsic::loongarch_lasx_xvmini_d:
3868 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3869 case Intrinsic::loongarch_lsx_vmini_bu:
3870 case Intrinsic::loongarch_lsx_vmini_hu:
3871 case Intrinsic::loongarch_lsx_vmini_wu:
3872 case Intrinsic::loongarch_lsx_vmini_du:
3873 case Intrinsic::loongarch_lasx_xvmini_bu:
3874 case Intrinsic::loongarch_lasx_xvmini_hu:
3875 case Intrinsic::loongarch_lasx_xvmini_wu:
3876 case Intrinsic::loongarch_lasx_xvmini_du:
3878 lowerVectorSplatImm<5>(
N, 2, DAG));
3879 case Intrinsic::loongarch_lsx_vmul_b:
3880 case Intrinsic::loongarch_lsx_vmul_h:
3881 case Intrinsic::loongarch_lsx_vmul_w:
3882 case Intrinsic::loongarch_lsx_vmul_d:
3883 case Intrinsic::loongarch_lasx_xvmul_b:
3884 case Intrinsic::loongarch_lasx_xvmul_h:
3885 case Intrinsic::loongarch_lasx_xvmul_w:
3886 case Intrinsic::loongarch_lasx_xvmul_d:
3889 case Intrinsic::loongarch_lsx_vmadd_b:
3890 case Intrinsic::loongarch_lsx_vmadd_h:
3891 case Intrinsic::loongarch_lsx_vmadd_w:
3892 case Intrinsic::loongarch_lsx_vmadd_d:
3893 case Intrinsic::loongarch_lasx_xvmadd_b:
3894 case Intrinsic::loongarch_lasx_xvmadd_h:
3895 case Intrinsic::loongarch_lasx_xvmadd_w:
3896 case Intrinsic::loongarch_lasx_xvmadd_d: {
3897 EVT ResTy =
N->getValueType(0);
3902 case Intrinsic::loongarch_lsx_vmsub_b:
3903 case Intrinsic::loongarch_lsx_vmsub_h:
3904 case Intrinsic::loongarch_lsx_vmsub_w:
3905 case Intrinsic::loongarch_lsx_vmsub_d:
3906 case Intrinsic::loongarch_lasx_xvmsub_b:
3907 case Intrinsic::loongarch_lasx_xvmsub_h:
3908 case Intrinsic::loongarch_lasx_xvmsub_w:
3909 case Intrinsic::loongarch_lasx_xvmsub_d: {
3910 EVT ResTy =
N->getValueType(0);
3915 case Intrinsic::loongarch_lsx_vdiv_b:
3916 case Intrinsic::loongarch_lsx_vdiv_h:
3917 case Intrinsic::loongarch_lsx_vdiv_w:
3918 case Intrinsic::loongarch_lsx_vdiv_d:
3919 case Intrinsic::loongarch_lasx_xvdiv_b:
3920 case Intrinsic::loongarch_lasx_xvdiv_h:
3921 case Intrinsic::loongarch_lasx_xvdiv_w:
3922 case Intrinsic::loongarch_lasx_xvdiv_d:
3925 case Intrinsic::loongarch_lsx_vdiv_bu:
3926 case Intrinsic::loongarch_lsx_vdiv_hu:
3927 case Intrinsic::loongarch_lsx_vdiv_wu:
3928 case Intrinsic::loongarch_lsx_vdiv_du:
3929 case Intrinsic::loongarch_lasx_xvdiv_bu:
3930 case Intrinsic::loongarch_lasx_xvdiv_hu:
3931 case Intrinsic::loongarch_lasx_xvdiv_wu:
3932 case Intrinsic::loongarch_lasx_xvdiv_du:
3935 case Intrinsic::loongarch_lsx_vmod_b:
3936 case Intrinsic::loongarch_lsx_vmod_h:
3937 case Intrinsic::loongarch_lsx_vmod_w:
3938 case Intrinsic::loongarch_lsx_vmod_d:
3939 case Intrinsic::loongarch_lasx_xvmod_b:
3940 case Intrinsic::loongarch_lasx_xvmod_h:
3941 case Intrinsic::loongarch_lasx_xvmod_w:
3942 case Intrinsic::loongarch_lasx_xvmod_d:
3945 case Intrinsic::loongarch_lsx_vmod_bu:
3946 case Intrinsic::loongarch_lsx_vmod_hu:
3947 case Intrinsic::loongarch_lsx_vmod_wu:
3948 case Intrinsic::loongarch_lsx_vmod_du:
3949 case Intrinsic::loongarch_lasx_xvmod_bu:
3950 case Intrinsic::loongarch_lasx_xvmod_hu:
3951 case Intrinsic::loongarch_lasx_xvmod_wu:
3952 case Intrinsic::loongarch_lasx_xvmod_du:
3955 case Intrinsic::loongarch_lsx_vand_v:
3956 case Intrinsic::loongarch_lasx_xvand_v:
3959 case Intrinsic::loongarch_lsx_vor_v:
3960 case Intrinsic::loongarch_lasx_xvor_v:
3963 case Intrinsic::loongarch_lsx_vxor_v:
3964 case Intrinsic::loongarch_lasx_xvxor_v:
3967 case Intrinsic::loongarch_lsx_vnor_v:
3968 case Intrinsic::loongarch_lasx_xvnor_v: {
3973 case Intrinsic::loongarch_lsx_vandi_b:
3974 case Intrinsic::loongarch_lasx_xvandi_b:
3976 lowerVectorSplatImm<8>(
N, 2, DAG));
3977 case Intrinsic::loongarch_lsx_vori_b:
3978 case Intrinsic::loongarch_lasx_xvori_b:
3980 lowerVectorSplatImm<8>(
N, 2, DAG));
3981 case Intrinsic::loongarch_lsx_vxori_b:
3982 case Intrinsic::loongarch_lasx_xvxori_b:
3984 lowerVectorSplatImm<8>(
N, 2, DAG));
3985 case Intrinsic::loongarch_lsx_vsll_b:
3986 case Intrinsic::loongarch_lsx_vsll_h:
3987 case Intrinsic::loongarch_lsx_vsll_w:
3988 case Intrinsic::loongarch_lsx_vsll_d:
3989 case Intrinsic::loongarch_lasx_xvsll_b:
3990 case Intrinsic::loongarch_lasx_xvsll_h:
3991 case Intrinsic::loongarch_lasx_xvsll_w:
3992 case Intrinsic::loongarch_lasx_xvsll_d:
3995 case Intrinsic::loongarch_lsx_vslli_b:
3996 case Intrinsic::loongarch_lasx_xvslli_b:
3998 lowerVectorSplatImm<3>(
N, 2, DAG));
3999 case Intrinsic::loongarch_lsx_vslli_h:
4000 case Intrinsic::loongarch_lasx_xvslli_h:
4002 lowerVectorSplatImm<4>(
N, 2, DAG));
4003 case Intrinsic::loongarch_lsx_vslli_w:
4004 case Intrinsic::loongarch_lasx_xvslli_w:
4006 lowerVectorSplatImm<5>(
N, 2, DAG));
4007 case Intrinsic::loongarch_lsx_vslli_d:
4008 case Intrinsic::loongarch_lasx_xvslli_d:
4010 lowerVectorSplatImm<6>(
N, 2, DAG));
4011 case Intrinsic::loongarch_lsx_vsrl_b:
4012 case Intrinsic::loongarch_lsx_vsrl_h:
4013 case Intrinsic::loongarch_lsx_vsrl_w:
4014 case Intrinsic::loongarch_lsx_vsrl_d:
4015 case Intrinsic::loongarch_lasx_xvsrl_b:
4016 case Intrinsic::loongarch_lasx_xvsrl_h:
4017 case Intrinsic::loongarch_lasx_xvsrl_w:
4018 case Intrinsic::loongarch_lasx_xvsrl_d:
4021 case Intrinsic::loongarch_lsx_vsrli_b:
4022 case Intrinsic::loongarch_lasx_xvsrli_b:
4024 lowerVectorSplatImm<3>(
N, 2, DAG));
4025 case Intrinsic::loongarch_lsx_vsrli_h:
4026 case Intrinsic::loongarch_lasx_xvsrli_h:
4028 lowerVectorSplatImm<4>(
N, 2, DAG));
4029 case Intrinsic::loongarch_lsx_vsrli_w:
4030 case Intrinsic::loongarch_lasx_xvsrli_w:
4032 lowerVectorSplatImm<5>(
N, 2, DAG));
4033 case Intrinsic::loongarch_lsx_vsrli_d:
4034 case Intrinsic::loongarch_lasx_xvsrli_d:
4036 lowerVectorSplatImm<6>(
N, 2, DAG));
4037 case Intrinsic::loongarch_lsx_vsra_b:
4038 case Intrinsic::loongarch_lsx_vsra_h:
4039 case Intrinsic::loongarch_lsx_vsra_w:
4040 case Intrinsic::loongarch_lsx_vsra_d:
4041 case Intrinsic::loongarch_lasx_xvsra_b:
4042 case Intrinsic::loongarch_lasx_xvsra_h:
4043 case Intrinsic::loongarch_lasx_xvsra_w:
4044 case Intrinsic::loongarch_lasx_xvsra_d:
4047 case Intrinsic::loongarch_lsx_vsrai_b:
4048 case Intrinsic::loongarch_lasx_xvsrai_b:
4050 lowerVectorSplatImm<3>(
N, 2, DAG));
4051 case Intrinsic::loongarch_lsx_vsrai_h:
4052 case Intrinsic::loongarch_lasx_xvsrai_h:
4054 lowerVectorSplatImm<4>(
N, 2, DAG));
4055 case Intrinsic::loongarch_lsx_vsrai_w:
4056 case Intrinsic::loongarch_lasx_xvsrai_w:
4058 lowerVectorSplatImm<5>(
N, 2, DAG));
4059 case Intrinsic::loongarch_lsx_vsrai_d:
4060 case Intrinsic::loongarch_lasx_xvsrai_d:
4062 lowerVectorSplatImm<6>(
N, 2, DAG));
4063 case Intrinsic::loongarch_lsx_vclz_b:
4064 case Intrinsic::loongarch_lsx_vclz_h:
4065 case Intrinsic::loongarch_lsx_vclz_w:
4066 case Intrinsic::loongarch_lsx_vclz_d:
4067 case Intrinsic::loongarch_lasx_xvclz_b:
4068 case Intrinsic::loongarch_lasx_xvclz_h:
4069 case Intrinsic::loongarch_lasx_xvclz_w:
4070 case Intrinsic::loongarch_lasx_xvclz_d:
4072 case Intrinsic::loongarch_lsx_vpcnt_b:
4073 case Intrinsic::loongarch_lsx_vpcnt_h:
4074 case Intrinsic::loongarch_lsx_vpcnt_w:
4075 case Intrinsic::loongarch_lsx_vpcnt_d:
4076 case Intrinsic::loongarch_lasx_xvpcnt_b:
4077 case Intrinsic::loongarch_lasx_xvpcnt_h:
4078 case Intrinsic::loongarch_lasx_xvpcnt_w:
4079 case Intrinsic::loongarch_lasx_xvpcnt_d:
4081 case Intrinsic::loongarch_lsx_vbitclr_b:
4082 case Intrinsic::loongarch_lsx_vbitclr_h:
4083 case Intrinsic::loongarch_lsx_vbitclr_w:
4084 case Intrinsic::loongarch_lsx_vbitclr_d:
4085 case Intrinsic::loongarch_lasx_xvbitclr_b:
4086 case Intrinsic::loongarch_lasx_xvbitclr_h:
4087 case Intrinsic::loongarch_lasx_xvbitclr_w:
4088 case Intrinsic::loongarch_lasx_xvbitclr_d:
4090 case Intrinsic::loongarch_lsx_vbitclri_b:
4091 case Intrinsic::loongarch_lasx_xvbitclri_b:
4092 return lowerVectorBitClearImm<3>(
N, DAG);
4093 case Intrinsic::loongarch_lsx_vbitclri_h:
4094 case Intrinsic::loongarch_lasx_xvbitclri_h:
4095 return lowerVectorBitClearImm<4>(
N, DAG);
4096 case Intrinsic::loongarch_lsx_vbitclri_w:
4097 case Intrinsic::loongarch_lasx_xvbitclri_w:
4098 return lowerVectorBitClearImm<5>(
N, DAG);
4099 case Intrinsic::loongarch_lsx_vbitclri_d:
4100 case Intrinsic::loongarch_lasx_xvbitclri_d:
4101 return lowerVectorBitClearImm<6>(
N, DAG);
4102 case Intrinsic::loongarch_lsx_vbitset_b:
4103 case Intrinsic::loongarch_lsx_vbitset_h:
4104 case Intrinsic::loongarch_lsx_vbitset_w:
4105 case Intrinsic::loongarch_lsx_vbitset_d:
4106 case Intrinsic::loongarch_lasx_xvbitset_b:
4107 case Intrinsic::loongarch_lasx_xvbitset_h:
4108 case Intrinsic::loongarch_lasx_xvbitset_w:
4109 case Intrinsic::loongarch_lasx_xvbitset_d: {
4110 EVT VecTy =
N->getValueType(0);
4116 case Intrinsic::loongarch_lsx_vbitseti_b:
4117 case Intrinsic::loongarch_lasx_xvbitseti_b:
4118 return lowerVectorBitSetImm<3>(
N, DAG);
4119 case Intrinsic::loongarch_lsx_vbitseti_h:
4120 case Intrinsic::loongarch_lasx_xvbitseti_h:
4121 return lowerVectorBitSetImm<4>(
N, DAG);
4122 case Intrinsic::loongarch_lsx_vbitseti_w:
4123 case Intrinsic::loongarch_lasx_xvbitseti_w:
4124 return lowerVectorBitSetImm<5>(
N, DAG);
4125 case Intrinsic::loongarch_lsx_vbitseti_d:
4126 case Intrinsic::loongarch_lasx_xvbitseti_d:
4127 return lowerVectorBitSetImm<6>(
N, DAG);
4128 case Intrinsic::loongarch_lsx_vbitrev_b:
4129 case Intrinsic::loongarch_lsx_vbitrev_h:
4130 case Intrinsic::loongarch_lsx_vbitrev_w:
4131 case Intrinsic::loongarch_lsx_vbitrev_d:
4132 case Intrinsic::loongarch_lasx_xvbitrev_b:
4133 case Intrinsic::loongarch_lasx_xvbitrev_h:
4134 case Intrinsic::loongarch_lasx_xvbitrev_w:
4135 case Intrinsic::loongarch_lasx_xvbitrev_d: {
4136 EVT VecTy =
N->getValueType(0);
4142 case Intrinsic::loongarch_lsx_vbitrevi_b:
4143 case Intrinsic::loongarch_lasx_xvbitrevi_b:
4144 return lowerVectorBitRevImm<3>(
N, DAG);
4145 case Intrinsic::loongarch_lsx_vbitrevi_h:
4146 case Intrinsic::loongarch_lasx_xvbitrevi_h:
4147 return lowerVectorBitRevImm<4>(
N, DAG);
4148 case Intrinsic::loongarch_lsx_vbitrevi_w:
4149 case Intrinsic::loongarch_lasx_xvbitrevi_w:
4150 return lowerVectorBitRevImm<5>(
N, DAG);
4151 case Intrinsic::loongarch_lsx_vbitrevi_d:
4152 case Intrinsic::loongarch_lasx_xvbitrevi_d:
4153 return lowerVectorBitRevImm<6>(
N, DAG);
4154 case Intrinsic::loongarch_lsx_vfadd_s:
4155 case Intrinsic::loongarch_lsx_vfadd_d:
4156 case Intrinsic::loongarch_lasx_xvfadd_s:
4157 case Intrinsic::loongarch_lasx_xvfadd_d:
4160 case Intrinsic::loongarch_lsx_vfsub_s:
4161 case Intrinsic::loongarch_lsx_vfsub_d:
4162 case Intrinsic::loongarch_lasx_xvfsub_s:
4163 case Intrinsic::loongarch_lasx_xvfsub_d:
4166 case Intrinsic::loongarch_lsx_vfmul_s:
4167 case Intrinsic::loongarch_lsx_vfmul_d:
4168 case Intrinsic::loongarch_lasx_xvfmul_s:
4169 case Intrinsic::loongarch_lasx_xvfmul_d:
4172 case Intrinsic::loongarch_lsx_vfdiv_s:
4173 case Intrinsic::loongarch_lsx_vfdiv_d:
4174 case Intrinsic::loongarch_lasx_xvfdiv_s:
4175 case Intrinsic::loongarch_lasx_xvfdiv_d:
4178 case Intrinsic::loongarch_lsx_vfmadd_s:
4179 case Intrinsic::loongarch_lsx_vfmadd_d:
4180 case Intrinsic::loongarch_lasx_xvfmadd_s:
4181 case Intrinsic::loongarch_lasx_xvfmadd_d:
4183 N->getOperand(2),
N->getOperand(3));
4184 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
4186 N->getOperand(1),
N->getOperand(2),
4187 legalizeIntrinsicImmArg<4>(
N, 3, DAG, Subtarget));
4188 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
4189 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
4191 N->getOperand(1),
N->getOperand(2),
4192 legalizeIntrinsicImmArg<3>(
N, 3, DAG, Subtarget));
4193 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
4194 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
4196 N->getOperand(1),
N->getOperand(2),
4197 legalizeIntrinsicImmArg<2>(
N, 3, DAG, Subtarget));
4198 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
4200 N->getOperand(1),
N->getOperand(2),
4201 legalizeIntrinsicImmArg<1>(
N, 3, DAG, Subtarget));
4202 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
4203 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
4204 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
4205 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
4206 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
4207 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
4208 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
4209 case Intrinsic::loongarch_lasx_xvreplgr2vr_d: {
4210 EVT ResTy =
N->getValueType(0);
4214 case Intrinsic::loongarch_lsx_vreplve_b:
4215 case Intrinsic::loongarch_lsx_vreplve_h:
4216 case Intrinsic::loongarch_lsx_vreplve_w:
4217 case Intrinsic::loongarch_lsx_vreplve_d:
4218 case Intrinsic::loongarch_lasx_xvreplve_b:
4219 case Intrinsic::loongarch_lasx_xvreplve_h:
4220 case Intrinsic::loongarch_lasx_xvreplve_w:
4221 case Intrinsic::loongarch_lasx_xvreplve_d:
4233 switch (
N->getOpcode()) {
4270 MF->
insert(It, BreakMBB);
4274 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
4275 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
4293 BreakMBB->addSuccessor(SinkMBB);
4305 switch (
MI.getOpcode()) {
4308 case LoongArch::PseudoVBZ:
4309 CondOpc = LoongArch::VSETEQZ_V;
4311 case LoongArch::PseudoVBZ_B:
4312 CondOpc = LoongArch::VSETANYEQZ_B;
4314 case LoongArch::PseudoVBZ_H:
4315 CondOpc = LoongArch::VSETANYEQZ_H;
4317 case LoongArch::PseudoVBZ_W:
4318 CondOpc = LoongArch::VSETANYEQZ_W;
4320 case LoongArch::PseudoVBZ_D:
4321 CondOpc = LoongArch::VSETANYEQZ_D;
4323 case LoongArch::PseudoVBNZ:
4324 CondOpc = LoongArch::VSETNEZ_V;
4326 case LoongArch::PseudoVBNZ_B:
4327 CondOpc = LoongArch::VSETALLNEZ_B;
4329 case LoongArch::PseudoVBNZ_H:
4330 CondOpc = LoongArch::VSETALLNEZ_H;
4332 case LoongArch::PseudoVBNZ_W:
4333 CondOpc = LoongArch::VSETALLNEZ_W;
4335 case LoongArch::PseudoVBNZ_D:
4336 CondOpc = LoongArch::VSETALLNEZ_D;
4338 case LoongArch::PseudoXVBZ:
4339 CondOpc = LoongArch::XVSETEQZ_V;
4341 case LoongArch::PseudoXVBZ_B:
4342 CondOpc = LoongArch::XVSETANYEQZ_B;
4344 case LoongArch::PseudoXVBZ_H:
4345 CondOpc = LoongArch::XVSETANYEQZ_H;
4347 case LoongArch::PseudoXVBZ_W:
4348 CondOpc = LoongArch::XVSETANYEQZ_W;
4350 case LoongArch::PseudoXVBZ_D:
4351 CondOpc = LoongArch::XVSETANYEQZ_D;
4353 case LoongArch::PseudoXVBNZ:
4354 CondOpc = LoongArch::XVSETNEZ_V;
4356 case LoongArch::PseudoXVBNZ_B:
4357 CondOpc = LoongArch::XVSETALLNEZ_B;
4359 case LoongArch::PseudoXVBNZ_H:
4360 CondOpc = LoongArch::XVSETALLNEZ_H;
4362 case LoongArch::PseudoXVBNZ_W:
4363 CondOpc = LoongArch::XVSETALLNEZ_W;
4365 case LoongArch::PseudoXVBNZ_D:
4366 CondOpc = LoongArch::XVSETALLNEZ_D;
4381 F->insert(It, FalseBB);
4382 F->insert(It, TrueBB);
4383 F->insert(It, SinkBB);
4386 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
4390 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
4399 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
4407 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
4415 MI.getOperand(0).getReg())
4422 MI.eraseFromParent();
4431 switch (
MI.getOpcode()) {
4434 case LoongArch::PseudoXVINSGR2VR_B:
4436 InsOp = LoongArch::VINSGR2VR_B;
4438 case LoongArch::PseudoXVINSGR2VR_H:
4440 InsOp = LoongArch::VINSGR2VR_H;
4452 unsigned Idx =
MI.getOperand(3).getImm();
4455 if (
Idx >= HalfSize) {
4456 ScratchReg1 =
MRI.createVirtualRegister(RC);
4457 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg1)
4463 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
4464 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
4466 .
addReg(ScratchReg1, 0, LoongArch::sub_128);
4473 if (
Idx >= HalfSize)
4474 ScratchReg2 =
MRI.createVirtualRegister(RC);
4476 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
4479 .
addImm(LoongArch::sub_128);
4481 if (
Idx >= HalfSize)
4487 MI.eraseFromParent();
4496 switch (
MI.getOpcode()) {
4499 case LoongArch::DIV_W:
4500 case LoongArch::DIV_WU:
4501 case LoongArch::MOD_W:
4502 case LoongArch::MOD_WU:
4503 case LoongArch::DIV_D:
4504 case LoongArch::DIV_DU:
4505 case LoongArch::MOD_D:
4506 case LoongArch::MOD_DU:
4509 case LoongArch::WRFCSR: {
4511 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
4512 .
addReg(
MI.getOperand(1).getReg());
4513 MI.eraseFromParent();
4516 case LoongArch::RDFCSR: {
4519 MI.getOperand(0).getReg())
4520 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
4522 MI.eraseFromParent();
4525 case LoongArch::PseudoVBZ:
4526 case LoongArch::PseudoVBZ_B:
4527 case LoongArch::PseudoVBZ_H:
4528 case LoongArch::PseudoVBZ_W:
4529 case LoongArch::PseudoVBZ_D:
4530 case LoongArch::PseudoVBNZ:
4531 case LoongArch::PseudoVBNZ_B:
4532 case LoongArch::PseudoVBNZ_H:
4533 case LoongArch::PseudoVBNZ_W:
4534 case LoongArch::PseudoVBNZ_D:
4535 case LoongArch::PseudoXVBZ:
4536 case LoongArch::PseudoXVBZ_B:
4537 case LoongArch::PseudoXVBZ_H:
4538 case LoongArch::PseudoXVBZ_W:
4539 case LoongArch::PseudoXVBZ_D:
4540 case LoongArch::PseudoXVBNZ:
4541 case LoongArch::PseudoXVBNZ_B:
4542 case LoongArch::PseudoXVBNZ_H:
4543 case LoongArch::PseudoXVBNZ_W:
4544 case LoongArch::PseudoXVBNZ_D:
4546 case LoongArch::PseudoXVINSGR2VR_B:
4547 case LoongArch::PseudoXVINSGR2VR_H:
4554 unsigned *
Fast)
const {
4555 if (!Subtarget.hasUAL())
4569#define NODE_NAME_CASE(node) \
4570 case LoongArchISD::node: \
4571 return "LoongArchISD::" #node;
4645#undef NODE_NAME_CASE
4658 LoongArch::R7, LoongArch::R8, LoongArch::R9,
4659 LoongArch::R10, LoongArch::R11};
4663 LoongArch::F3, LoongArch::F4, LoongArch::F5,
4664 LoongArch::F6, LoongArch::F7};
4667 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
4668 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
4671 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
4672 LoongArch::VR6, LoongArch::VR7};
4675 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
4676 LoongArch::XR6, LoongArch::XR7};
4682 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
4684 unsigned GRLenInBytes = GRLen / 8;
4717 unsigned ValNo,
MVT ValVT,
4719 CCState &State,
bool IsFixed,
bool IsRet,
4721 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
4722 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
4723 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
4728 if (IsRet && ValNo > 1)
4732 bool UseGPRForFloat =
true;
4742 UseGPRForFloat = !IsFixed;
4751 UseGPRForFloat =
true;
4753 if (UseGPRForFloat && ValVT == MVT::f32) {
4756 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
4759 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
4770 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
4772 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
4775 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
4784 "PendingLocs and PendingArgFlags out of sync");
4802 PendingLocs.
size() <= 2) {
4803 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
4808 PendingLocs.
clear();
4809 PendingArgFlags.
clear();
4816 unsigned StoreSizeBytes = GRLen / 8;
4819 if (ValVT == MVT::f32 && !UseGPRForFloat)
4821 else if (ValVT == MVT::f64 && !UseGPRForFloat)
4835 if (!PendingLocs.
empty()) {
4837 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
4838 for (
auto &It : PendingLocs) {
4840 It.convertToReg(Reg);
4845 PendingLocs.clear();
4846 PendingArgFlags.
clear();
4849 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
4850 "Expected an GRLenVT at this stage");
4867void LoongArchTargetLowering::analyzeInputArgs(
4870 LoongArchCCAssignFn Fn)
const {
4872 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
4874 Type *ArgTy =
nullptr;
4876 ArgTy = FType->getReturnType();
4877 else if (Ins[i].isOrigArg())
4878 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
4882 CCInfo,
true, IsRet, ArgTy)) {
4883 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
4890void LoongArchTargetLowering::analyzeOutputArgs(
4893 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
4894 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
4895 MVT ArgVT = Outs[i].VT;
4896 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
4900 CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
4901 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
4942 if (In.isOrigArg()) {
4947 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
4948 (
BitWidth < 32 && In.Flags.isZExt())) {
5008 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
5012 LoongArch::R23, LoongArch::R24, LoongArch::R25,
5013 LoongArch::R26, LoongArch::R27, LoongArch::R28,
5014 LoongArch::R29, LoongArch::R30, LoongArch::R31};
5021 if (LocVT == MVT::f32) {
5024 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
5025 LoongArch::F26, LoongArch::F27};
5026 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
5032 if (LocVT == MVT::f64) {
5035 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
5036 LoongArch::F30_64, LoongArch::F31_64};
5037 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
5065 "GHC calling convention requires the F and D extensions");
5070 unsigned GRLenInBytes = Subtarget.
getGRLen() / 8;
5072 std::vector<SDValue> OutChains;
5081 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
5083 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
5095 unsigned ArgIndex = Ins[i].OrigArgIndex;
5096 unsigned ArgPartOffset = Ins[i].PartOffset;
5097 assert(ArgPartOffset == 0);
5098 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
5100 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
5123 int VaArgOffset, VarArgsSaveSize;
5129 VarArgsSaveSize = 0;
5131 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() -
Idx);
5132 VaArgOffset = -VarArgsSaveSize;
5138 LoongArchFI->setVarArgsFrameIndex(FI);
5146 VarArgsSaveSize += GRLenInBytes;
5151 for (
unsigned I =
Idx;
I < ArgRegs.
size();
5152 ++
I, VaArgOffset += GRLenInBytes) {
5160 cast<StoreSDNode>(Store.getNode())
5162 ->setValue((
Value *)
nullptr);
5163 OutChains.push_back(Store);
5165 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
5170 if (!OutChains.empty()) {
5171 OutChains.push_back(Chain);
5186 if (
N->getNumValues() != 1)
5188 if (!
N->hasNUsesOfValue(1, 0))
5191 SDNode *Copy = *
N->use_begin();
5197 if (Copy->getGluedNode())
5201 bool HasRet =
false;
5202 for (
SDNode *Node : Copy->uses()) {
5211 Chain = Copy->getOperand(0);
5216bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
5220 auto CalleeCC = CLI.CallConv;
5221 auto &Outs = CLI.Outs;
5223 auto CallerCC = Caller.getCallingConv();
5230 for (
auto &VA : ArgLocs)
5236 auto IsCallerStructRet = Caller.hasStructRetAttr();
5237 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
5238 if (IsCallerStructRet || IsCalleeStructRet)
5242 for (
auto &Arg : Outs)
5243 if (Arg.Flags.isByVal())
5248 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
5249 if (CalleeCC != CallerCC) {
5250 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
5251 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5289 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
5293 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
5299 "site marked musttail");
5306 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
5308 if (!Flags.isByVal())
5312 unsigned Size = Flags.getByValSize();
5313 Align Alignment = Flags.getNonZeroByValAlign();
5320 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
5322 false,
nullptr, std::nullopt,
5334 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(); i != e; ++i) {
5336 SDValue ArgValue = OutVals[i];
5349 unsigned ArgIndex = Outs[i].OrigArgIndex;
5350 unsigned ArgPartOffset = Outs[i].PartOffset;
5351 assert(ArgPartOffset == 0);
5356 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
5357 SDValue PartValue = OutVals[i + 1];
5358 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
5368 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
5372 for (
const auto &Part : Parts) {
5373 SDValue PartValue = Part.first;
5374 SDValue PartOffset = Part.second;
5381 ArgValue = SpillSlot;
5387 if (Flags.isByVal())
5388 ArgValue = ByValArgs[j++];
5395 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
5396 "for passing parameters");
5399 if (!StackPtr.getNode())
5412 if (!MemOpChains.
empty())
5418 for (
auto &Reg : RegsToPass) {
5419 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
5446 for (
auto &Reg : RegsToPass)
5452 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
5453 assert(Mask &&
"Missing call preserved mask for calling convention");
5471 assert(Subtarget.
is64Bit() &&
"Medium code model requires LA64");
5475 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
5498 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
5501 for (
auto &VA : RVLocs) {
5521 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
5523 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
5527 Outs[i].Flags, CCInfo,
true,
true,
5554 for (
unsigned i = 0, e = RVLocs.
size(); i < e; ++i) {
5578 if (!Subtarget.hasExtLSX())
5581 if (VT == MVT::f32) {
5582 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
5583 return (masked == 0x3e000000 || masked == 0x40000000);
5586 if (VT == MVT::f64) {
5587 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
5588 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
5594bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
5595 bool ForCodeSize)
const {
5597 if (VT == MVT::f32 && !Subtarget.hasBasicF())
5599 if (VT == MVT::f64 && !Subtarget.hasBasicD())
5601 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
5612bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
5615 return isa<LoadInst>(
I) || isa<StoreInst>(
I);
5617 if (isa<LoadInst>(
I))
5622 Type *Ty =
I->getOperand(0)->getType();
5641 return Y.getValueType().isScalarInteger() && !isa<ConstantSDNode>(
Y);
5647 unsigned Intrinsic)
const {
5648 switch (Intrinsic) {
5651 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
5652 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
5653 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
5654 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
5656 Info.memVT = MVT::i32;
5657 Info.ptrVal =
I.getArgOperand(0);
5692 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
5694 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
5696 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
5698 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
5700 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
5702 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
5704 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
5706 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
5716 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
5718 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
5720 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
5722 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
5743 Value *FailureOrdering =
5747 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
5755 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
5779 unsigned GRLen = Subtarget.
getGRLen();
5808 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
5811 Builder.
CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
5838 const Constant *PersonalityFn)
const {
5839 return LoongArch::R4;
5843 const Constant *PersonalityFn)
const {
5844 return LoongArch::R5;
5852LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
5870 if (Constraint.
size() == 1) {
5871 switch (Constraint[0]) {
5886 if (Constraint ==
"ZC" || Constraint ==
"ZB")
5902std::pair<unsigned, const TargetRegisterClass *>
5903LoongArchTargetLowering::getRegForInlineAsmConstraint(
5907 if (Constraint.
size() == 1) {
5908 switch (Constraint[0]) {
5913 return std::make_pair(0U, &LoongArch::GPRRegClass);
5915 if (Subtarget.hasBasicF() && VT == MVT::f32)
5916 return std::make_pair(0U, &LoongArch::FPR32RegClass);
5917 if (Subtarget.hasBasicD() && VT == MVT::f64)
5918 return std::make_pair(0U, &LoongArch::FPR64RegClass);
5919 if (Subtarget.hasExtLSX() &&
5920 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
5921 return std::make_pair(0U, &LoongArch::LSX128RegClass);
5922 if (Subtarget.hasExtLASX() &&
5923 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
5924 return std::make_pair(0U, &LoongArch::LASX256RegClass);
5944 bool IsFP = Constraint[2] ==
'f';
5945 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
5946 std::pair<unsigned, const TargetRegisterClass *>
R;
5948 TRI, join_items(
"", Temp.first, Temp.second), VT);
5951 unsigned RegNo =
R.first;
5952 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
5953 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
5954 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
5955 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
5965void LoongArchTargetLowering::LowerAsmOperandForConstraint(
5969 if (Constraint.
size() == 1) {
5970 switch (Constraint[0]) {
5973 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
5975 if (isInt<16>(CVal))
5982 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
5984 if (isInt<12>(CVal))
5991 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
5992 if (
C->getZExtValue() == 0)
5998 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
6000 if (isUInt<12>(CVal))
6012#define GET_REGISTER_MATCHER
6013#include "LoongArchGenAsmMatcher.inc"
6019 std::string NewRegName =
Name.second.str();
6021 if (Reg == LoongArch::NoRegister)
6023 if (Reg == LoongArch::NoRegister)
6027 if (!ReservedRegs.
test(Reg))
6043 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
6044 const APInt &Imm = ConstNode->getAPIntValue();
6046 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
6047 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
6050 if (ConstNode->hasOneUse() &&
6051 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
6052 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
6058 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
6059 unsigned Shifts = Imm.countr_zero();
6065 APInt ImmPop = Imm.ashr(Shifts);
6066 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
6070 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
6071 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
6072 (ImmSmall - Imm).isPowerOf2())
6082 Type *Ty,
unsigned AS,
6098 !(isShiftedInt<14, 2>(AM.
BaseOffs) && Subtarget.hasUAL()))
6125 return isInt<12>(Imm);
6129 return isInt<12>(Imm);
6136 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
6137 EVT MemVT = LD->getMemoryVT();
6138 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
6149 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
6158 if (
Y.getValueType().isVector())
6161 return !isa<ConstantSDNode>(
Y);
6170 EVT Type,
bool IsSigned)
const {
6191 Align &PrefAlign)
const {
6192 if (!isa<MemIntrinsic>(CI))
6197 PrefAlign =
Align(8);
6200 PrefAlign =
Align(4);
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
const MCPhysReg ArgFPR32s[]
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static void canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG)
Shuffle vectors by lane to generate more optimized instructions.
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static bool isConstantOrUndef(const SDValue Op)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF.
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getMaxBytesForAlignment() const
Align getPrefFunctionAlignment() const
unsigned getGRLen() const
Align getPrefLoopAlignment() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool hasFeature(unsigned Feature) const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
EVT getMemoryVT() const
Return the type of the in-memory value.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
size_t use_size() const
Return the number of uses of this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
ABI getTargetABI(StringRef ABIName)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Align getNonZeroOrigAlign() const
Register getFrameRegister(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)