28#include "llvm/IR/IntrinsicsLoongArch.h"
37#define DEBUG_TYPE "loongarch-isel-lowering"
42 cl::desc(
"Trap on integer division by zero."),
54 if (Subtarget.hasBasicF())
56 if (Subtarget.hasBasicD())
60 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
62 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
64 if (Subtarget.hasExtLSX())
68 if (Subtarget.hasExtLASX())
69 for (
MVT VT : LASXVTs)
165 if (Subtarget.hasBasicF()) {
189 if (!Subtarget.hasBasicD()) {
200 if (Subtarget.hasBasicD()) {
229 if (Subtarget.hasExtLSX()) {
244 for (
MVT VT : LSXVTs) {
256 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
271 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
275 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
289 if (Subtarget.hasExtLASX()) {
290 for (
MVT VT : LASXVTs) {
302 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
317 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
321 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
342 if (Subtarget.hasExtLSX())
376 switch (
Op.getOpcode()) {
378 return lowerATOMIC_FENCE(
Op, DAG);
380 return lowerEH_DWARF_CFA(
Op, DAG);
382 return lowerGlobalAddress(
Op, DAG);
384 return lowerGlobalTLSAddress(
Op, DAG);
386 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
388 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
390 return lowerINTRINSIC_VOID(
Op, DAG);
392 return lowerBlockAddress(
Op, DAG);
394 return lowerJumpTable(
Op, DAG);
396 return lowerShiftLeftParts(
Op, DAG);
398 return lowerShiftRightParts(
Op, DAG,
true);
400 return lowerShiftRightParts(
Op, DAG,
false);
402 return lowerConstantPool(
Op, DAG);
404 return lowerFP_TO_SINT(
Op, DAG);
406 return lowerBITCAST(
Op, DAG);
408 return lowerUINT_TO_FP(
Op, DAG);
410 return lowerSINT_TO_FP(
Op, DAG);
412 return lowerVASTART(
Op, DAG);
414 return lowerFRAMEADDR(
Op, DAG);
416 return lowerRETURNADDR(
Op, DAG);
418 return lowerWRITE_REGISTER(
Op, DAG);
420 return lowerINSERT_VECTOR_ELT(
Op, DAG);
422 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
424 return lowerBUILD_VECTOR(
Op, DAG);
426 return lowerVECTOR_SHUFFLE(
Op, DAG);
440 if (isa<ConstantSDNode>(
Op))
442 if (isa<ConstantFPSDNode>(
Op))
457 EVT ResTy =
Op->getValueType(0);
459 APInt SplatValue, SplatUndef;
460 unsigned SplatBitSize;
465 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
466 (!Subtarget.hasExtLASX() || !Is256Vec))
469 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
471 SplatBitSize <= 64) {
473 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
479 switch (SplatBitSize) {
483 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
486 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
489 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
492 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
500 if (ViaVecTy != ResTy)
513 EVT ResTy =
Node->getValueType(0);
519 for (
unsigned i = 0; i < NumElts; ++i) {
531LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
533 EVT VecTy =
Op->getOperand(0)->getValueType(0);
538 if (isa<ConstantSDNode>(
Idx) &&
539 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
540 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
547LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
549 if (isa<ConstantSDNode>(
Op->getOperand(2)))
573 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
575 "On LA64, only 64-bit registers can be written.");
576 return Op.getOperand(0);
579 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
581 "On LA32, only 32-bit registers can be written.");
582 return Op.getOperand(0);
590 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
592 "be a constant integer");
599 EVT VT =
Op.getValueType();
602 unsigned Depth =
Op.getConstantOperandVal(0);
603 int GRLenInBytes = Subtarget.
getGRLen() / 8;
606 int Offset = -(GRLenInBytes * 2);
621 if (
Op.getConstantOperandVal(0) != 0) {
623 "return address can only be determined for the current frame");
657 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
665 !Subtarget.hasBasicD() &&
"unexpected target features");
670 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
671 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
681 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
685 EVT RetVT =
Op.getValueType();
687 MakeLibCallOptions CallOptions;
688 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
691 std::tie(Result, Chain) =
699 !Subtarget.hasBasicD() &&
"unexpected target features");
706 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
710 EVT RetVT =
Op.getValueType();
712 MakeLibCallOptions CallOptions;
713 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
716 std::tie(Result, Chain) =
727 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
728 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
740 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
741 !Subtarget.hasBasicD()) {
766 N->getOffset(), Flags);
774template <
class NodeTy>
777 bool IsLocal)
const {
788 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
840 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
846 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
852 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
859 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
863 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
864 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
873 unsigned Opc,
bool UseGOT,
924 Args.push_back(Entry);
956LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
963 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
966 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
980 return getDynamicTLSAddr(
N, DAG,
981 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
982 : LoongArch::PseudoLA_TLS_GD,
989 return getDynamicTLSAddr(
N, DAG,
990 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
991 : LoongArch::PseudoLA_TLS_LD,
996 return getStaticTLSAddr(
N, DAG,
997 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
998 : LoongArch::PseudoLA_TLS_IE,
1005 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1009 return getTLSDescAddr(
N, DAG,
1010 Large ? LoongArch::PseudoLA_TLS_DESC_PC_LARGE
1011 : LoongArch::PseudoLA_TLS_DESC_PC,
1015template <
unsigned N>
1018 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
1020 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
1021 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
1023 ": argument out of range.");
1030LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
1033 switch (
Op.getConstantOperandVal(0)) {
1036 case Intrinsic::thread_pointer: {
1040 case Intrinsic::loongarch_lsx_vpickve2gr_d:
1041 case Intrinsic::loongarch_lsx_vpickve2gr_du:
1042 case Intrinsic::loongarch_lsx_vreplvei_d:
1043 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
1044 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
1045 case Intrinsic::loongarch_lsx_vreplvei_w:
1046 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
1047 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
1048 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
1049 case Intrinsic::loongarch_lasx_xvpickve_d:
1050 case Intrinsic::loongarch_lasx_xvpickve_d_f:
1051 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
1052 case Intrinsic::loongarch_lasx_xvinsve0_d:
1053 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
1054 case Intrinsic::loongarch_lsx_vsat_b:
1055 case Intrinsic::loongarch_lsx_vsat_bu:
1056 case Intrinsic::loongarch_lsx_vrotri_b:
1057 case Intrinsic::loongarch_lsx_vsllwil_h_b:
1058 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
1059 case Intrinsic::loongarch_lsx_vsrlri_b:
1060 case Intrinsic::loongarch_lsx_vsrari_b:
1061 case Intrinsic::loongarch_lsx_vreplvei_h:
1062 case Intrinsic::loongarch_lasx_xvsat_b:
1063 case Intrinsic::loongarch_lasx_xvsat_bu:
1064 case Intrinsic::loongarch_lasx_xvrotri_b:
1065 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
1066 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
1067 case Intrinsic::loongarch_lasx_xvsrlri_b:
1068 case Intrinsic::loongarch_lasx_xvsrari_b:
1069 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
1070 case Intrinsic::loongarch_lasx_xvpickve_w:
1071 case Intrinsic::loongarch_lasx_xvpickve_w_f:
1072 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
1073 case Intrinsic::loongarch_lasx_xvinsve0_w:
1074 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
1075 case Intrinsic::loongarch_lsx_vsat_h:
1076 case Intrinsic::loongarch_lsx_vsat_hu:
1077 case Intrinsic::loongarch_lsx_vrotri_h:
1078 case Intrinsic::loongarch_lsx_vsllwil_w_h:
1079 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
1080 case Intrinsic::loongarch_lsx_vsrlri_h:
1081 case Intrinsic::loongarch_lsx_vsrari_h:
1082 case Intrinsic::loongarch_lsx_vreplvei_b:
1083 case Intrinsic::loongarch_lasx_xvsat_h:
1084 case Intrinsic::loongarch_lasx_xvsat_hu:
1085 case Intrinsic::loongarch_lasx_xvrotri_h:
1086 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
1087 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
1088 case Intrinsic::loongarch_lasx_xvsrlri_h:
1089 case Intrinsic::loongarch_lasx_xvsrari_h:
1090 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
1091 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
1092 case Intrinsic::loongarch_lsx_vsrlni_b_h:
1093 case Intrinsic::loongarch_lsx_vsrani_b_h:
1094 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
1095 case Intrinsic::loongarch_lsx_vsrarni_b_h:
1096 case Intrinsic::loongarch_lsx_vssrlni_b_h:
1097 case Intrinsic::loongarch_lsx_vssrani_b_h:
1098 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
1099 case Intrinsic::loongarch_lsx_vssrani_bu_h:
1100 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
1101 case Intrinsic::loongarch_lsx_vssrarni_b_h:
1102 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
1103 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
1104 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
1105 case Intrinsic::loongarch_lasx_xvsrani_b_h:
1106 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
1107 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
1108 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
1109 case Intrinsic::loongarch_lasx_xvssrani_b_h:
1110 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
1111 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
1112 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
1113 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
1114 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
1115 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
1116 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
1117 case Intrinsic::loongarch_lsx_vsat_w:
1118 case Intrinsic::loongarch_lsx_vsat_wu:
1119 case Intrinsic::loongarch_lsx_vrotri_w:
1120 case Intrinsic::loongarch_lsx_vsllwil_d_w:
1121 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
1122 case Intrinsic::loongarch_lsx_vsrlri_w:
1123 case Intrinsic::loongarch_lsx_vsrari_w:
1124 case Intrinsic::loongarch_lsx_vslei_bu:
1125 case Intrinsic::loongarch_lsx_vslei_hu:
1126 case Intrinsic::loongarch_lsx_vslei_wu:
1127 case Intrinsic::loongarch_lsx_vslei_du:
1128 case Intrinsic::loongarch_lsx_vslti_bu:
1129 case Intrinsic::loongarch_lsx_vslti_hu:
1130 case Intrinsic::loongarch_lsx_vslti_wu:
1131 case Intrinsic::loongarch_lsx_vslti_du:
1132 case Intrinsic::loongarch_lsx_vbsll_v:
1133 case Intrinsic::loongarch_lsx_vbsrl_v:
1134 case Intrinsic::loongarch_lasx_xvsat_w:
1135 case Intrinsic::loongarch_lasx_xvsat_wu:
1136 case Intrinsic::loongarch_lasx_xvrotri_w:
1137 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
1138 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
1139 case Intrinsic::loongarch_lasx_xvsrlri_w:
1140 case Intrinsic::loongarch_lasx_xvsrari_w:
1141 case Intrinsic::loongarch_lasx_xvslei_bu:
1142 case Intrinsic::loongarch_lasx_xvslei_hu:
1143 case Intrinsic::loongarch_lasx_xvslei_wu:
1144 case Intrinsic::loongarch_lasx_xvslei_du:
1145 case Intrinsic::loongarch_lasx_xvslti_bu:
1146 case Intrinsic::loongarch_lasx_xvslti_hu:
1147 case Intrinsic::loongarch_lasx_xvslti_wu:
1148 case Intrinsic::loongarch_lasx_xvslti_du:
1149 case Intrinsic::loongarch_lasx_xvbsll_v:
1150 case Intrinsic::loongarch_lasx_xvbsrl_v:
1151 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
1152 case Intrinsic::loongarch_lsx_vseqi_b:
1153 case Intrinsic::loongarch_lsx_vseqi_h:
1154 case Intrinsic::loongarch_lsx_vseqi_w:
1155 case Intrinsic::loongarch_lsx_vseqi_d:
1156 case Intrinsic::loongarch_lsx_vslei_b:
1157 case Intrinsic::loongarch_lsx_vslei_h:
1158 case Intrinsic::loongarch_lsx_vslei_w:
1159 case Intrinsic::loongarch_lsx_vslei_d:
1160 case Intrinsic::loongarch_lsx_vslti_b:
1161 case Intrinsic::loongarch_lsx_vslti_h:
1162 case Intrinsic::loongarch_lsx_vslti_w:
1163 case Intrinsic::loongarch_lsx_vslti_d:
1164 case Intrinsic::loongarch_lasx_xvseqi_b:
1165 case Intrinsic::loongarch_lasx_xvseqi_h:
1166 case Intrinsic::loongarch_lasx_xvseqi_w:
1167 case Intrinsic::loongarch_lasx_xvseqi_d:
1168 case Intrinsic::loongarch_lasx_xvslei_b:
1169 case Intrinsic::loongarch_lasx_xvslei_h:
1170 case Intrinsic::loongarch_lasx_xvslei_w:
1171 case Intrinsic::loongarch_lasx_xvslei_d:
1172 case Intrinsic::loongarch_lasx_xvslti_b:
1173 case Intrinsic::loongarch_lasx_xvslti_h:
1174 case Intrinsic::loongarch_lasx_xvslti_w:
1175 case Intrinsic::loongarch_lasx_xvslti_d:
1176 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
1177 case Intrinsic::loongarch_lsx_vsrlni_h_w:
1178 case Intrinsic::loongarch_lsx_vsrani_h_w:
1179 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
1180 case Intrinsic::loongarch_lsx_vsrarni_h_w:
1181 case Intrinsic::loongarch_lsx_vssrlni_h_w:
1182 case Intrinsic::loongarch_lsx_vssrani_h_w:
1183 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
1184 case Intrinsic::loongarch_lsx_vssrani_hu_w:
1185 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
1186 case Intrinsic::loongarch_lsx_vssrarni_h_w:
1187 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
1188 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
1189 case Intrinsic::loongarch_lsx_vfrstpi_b:
1190 case Intrinsic::loongarch_lsx_vfrstpi_h:
1191 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
1192 case Intrinsic::loongarch_lasx_xvsrani_h_w:
1193 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
1194 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
1195 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
1196 case Intrinsic::loongarch_lasx_xvssrani_h_w:
1197 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
1198 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
1199 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
1200 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
1201 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
1202 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
1203 case Intrinsic::loongarch_lasx_xvfrstpi_b:
1204 case Intrinsic::loongarch_lasx_xvfrstpi_h:
1205 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
1206 case Intrinsic::loongarch_lsx_vsat_d:
1207 case Intrinsic::loongarch_lsx_vsat_du:
1208 case Intrinsic::loongarch_lsx_vrotri_d:
1209 case Intrinsic::loongarch_lsx_vsrlri_d:
1210 case Intrinsic::loongarch_lsx_vsrari_d:
1211 case Intrinsic::loongarch_lasx_xvsat_d:
1212 case Intrinsic::loongarch_lasx_xvsat_du:
1213 case Intrinsic::loongarch_lasx_xvrotri_d:
1214 case Intrinsic::loongarch_lasx_xvsrlri_d:
1215 case Intrinsic::loongarch_lasx_xvsrari_d:
1216 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
1217 case Intrinsic::loongarch_lsx_vsrlni_w_d:
1218 case Intrinsic::loongarch_lsx_vsrani_w_d:
1219 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
1220 case Intrinsic::loongarch_lsx_vsrarni_w_d:
1221 case Intrinsic::loongarch_lsx_vssrlni_w_d:
1222 case Intrinsic::loongarch_lsx_vssrani_w_d:
1223 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
1224 case Intrinsic::loongarch_lsx_vssrani_wu_d:
1225 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
1226 case Intrinsic::loongarch_lsx_vssrarni_w_d:
1227 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
1228 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
1229 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
1230 case Intrinsic::loongarch_lasx_xvsrani_w_d:
1231 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
1232 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
1233 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
1234 case Intrinsic::loongarch_lasx_xvssrani_w_d:
1235 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
1236 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
1237 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
1238 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
1239 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
1240 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
1241 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
1242 case Intrinsic::loongarch_lsx_vsrlni_d_q:
1243 case Intrinsic::loongarch_lsx_vsrani_d_q:
1244 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
1245 case Intrinsic::loongarch_lsx_vsrarni_d_q:
1246 case Intrinsic::loongarch_lsx_vssrlni_d_q:
1247 case Intrinsic::loongarch_lsx_vssrani_d_q:
1248 case Intrinsic::loongarch_lsx_vssrlni_du_q:
1249 case Intrinsic::loongarch_lsx_vssrani_du_q:
1250 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
1251 case Intrinsic::loongarch_lsx_vssrarni_d_q:
1252 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
1253 case Intrinsic::loongarch_lsx_vssrarni_du_q:
1254 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
1255 case Intrinsic::loongarch_lasx_xvsrani_d_q:
1256 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
1257 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
1258 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
1259 case Intrinsic::loongarch_lasx_xvssrani_d_q:
1260 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
1261 case Intrinsic::loongarch_lasx_xvssrani_du_q:
1262 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
1263 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
1264 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
1265 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
1266 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
1267 case Intrinsic::loongarch_lsx_vnori_b:
1268 case Intrinsic::loongarch_lsx_vshuf4i_b:
1269 case Intrinsic::loongarch_lsx_vshuf4i_h:
1270 case Intrinsic::loongarch_lsx_vshuf4i_w:
1271 case Intrinsic::loongarch_lasx_xvnori_b:
1272 case Intrinsic::loongarch_lasx_xvshuf4i_b:
1273 case Intrinsic::loongarch_lasx_xvshuf4i_h:
1274 case Intrinsic::loongarch_lasx_xvshuf4i_w:
1275 case Intrinsic::loongarch_lasx_xvpermi_d:
1276 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
1277 case Intrinsic::loongarch_lsx_vshuf4i_d:
1278 case Intrinsic::loongarch_lsx_vpermi_w:
1279 case Intrinsic::loongarch_lsx_vbitseli_b:
1280 case Intrinsic::loongarch_lsx_vextrins_b:
1281 case Intrinsic::loongarch_lsx_vextrins_h:
1282 case Intrinsic::loongarch_lsx_vextrins_w:
1283 case Intrinsic::loongarch_lsx_vextrins_d:
1284 case Intrinsic::loongarch_lasx_xvshuf4i_d:
1285 case Intrinsic::loongarch_lasx_xvpermi_w:
1286 case Intrinsic::loongarch_lasx_xvpermi_q:
1287 case Intrinsic::loongarch_lasx_xvbitseli_b:
1288 case Intrinsic::loongarch_lasx_xvextrins_b:
1289 case Intrinsic::loongarch_lasx_xvextrins_h:
1290 case Intrinsic::loongarch_lasx_xvextrins_w:
1291 case Intrinsic::loongarch_lasx_xvextrins_d:
1292 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
1293 case Intrinsic::loongarch_lsx_vrepli_b:
1294 case Intrinsic::loongarch_lsx_vrepli_h:
1295 case Intrinsic::loongarch_lsx_vrepli_w:
1296 case Intrinsic::loongarch_lsx_vrepli_d:
1297 case Intrinsic::loongarch_lasx_xvrepli_b:
1298 case Intrinsic::loongarch_lasx_xvrepli_h:
1299 case Intrinsic::loongarch_lasx_xvrepli_w:
1300 case Intrinsic::loongarch_lasx_xvrepli_d:
1301 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
1302 case Intrinsic::loongarch_lsx_vldi:
1303 case Intrinsic::loongarch_lasx_xvldi:
1304 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
1319LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
1323 EVT VT =
Op.getValueType();
1325 const StringRef ErrorMsgOOR =
"argument out of range";
1326 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
1327 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
1329 switch (
Op.getConstantOperandVal(1)) {
1332 case Intrinsic::loongarch_crc_w_b_w:
1333 case Intrinsic::loongarch_crc_w_h_w:
1334 case Intrinsic::loongarch_crc_w_w_w:
1335 case Intrinsic::loongarch_crc_w_d_w:
1336 case Intrinsic::loongarch_crcc_w_b_w:
1337 case Intrinsic::loongarch_crcc_w_h_w:
1338 case Intrinsic::loongarch_crcc_w_w_w:
1339 case Intrinsic::loongarch_crcc_w_d_w:
1341 case Intrinsic::loongarch_csrrd_w:
1342 case Intrinsic::loongarch_csrrd_d: {
1343 unsigned Imm =
Op.getConstantOperandVal(2);
1344 return !isUInt<14>(Imm)
1349 case Intrinsic::loongarch_csrwr_w:
1350 case Intrinsic::loongarch_csrwr_d: {
1351 unsigned Imm =
Op.getConstantOperandVal(3);
1352 return !isUInt<14>(Imm)
1355 {Chain,
Op.getOperand(2),
1358 case Intrinsic::loongarch_csrxchg_w:
1359 case Intrinsic::loongarch_csrxchg_d: {
1360 unsigned Imm =
Op.getConstantOperandVal(4);
1361 return !isUInt<14>(Imm)
1364 {Chain,
Op.getOperand(2),
Op.getOperand(3),
1367 case Intrinsic::loongarch_iocsrrd_d: {
1372#define IOCSRRD_CASE(NAME, NODE) \
1373 case Intrinsic::loongarch_##NAME: { \
1374 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
1375 {Chain, Op.getOperand(2)}); \
1381 case Intrinsic::loongarch_cpucfg: {
1383 {Chain,
Op.getOperand(2)});
1385 case Intrinsic::loongarch_lddir_d: {
1386 unsigned Imm =
Op.getConstantOperandVal(3);
1387 return !isUInt<8>(Imm)
1391 case Intrinsic::loongarch_movfcsr2gr: {
1392 if (!Subtarget.hasBasicF())
1394 unsigned Imm =
Op.getConstantOperandVal(2);
1395 return !isUInt<2>(Imm)
1400 case Intrinsic::loongarch_lsx_vld:
1401 case Intrinsic::loongarch_lsx_vldrepl_b:
1402 case Intrinsic::loongarch_lasx_xvld:
1403 case Intrinsic::loongarch_lasx_xvldrepl_b:
1404 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1407 case Intrinsic::loongarch_lsx_vldrepl_h:
1408 case Intrinsic::loongarch_lasx_xvldrepl_h:
1409 return !isShiftedInt<11, 1>(
1410 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1412 Op,
"argument out of range or not a multiple of 2", DAG)
1414 case Intrinsic::loongarch_lsx_vldrepl_w:
1415 case Intrinsic::loongarch_lasx_xvldrepl_w:
1416 return !isShiftedInt<10, 2>(
1417 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1419 Op,
"argument out of range or not a multiple of 4", DAG)
1421 case Intrinsic::loongarch_lsx_vldrepl_d:
1422 case Intrinsic::loongarch_lasx_xvldrepl_d:
1423 return !isShiftedInt<9, 3>(
1424 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1426 Op,
"argument out of range or not a multiple of 8", DAG)
1437 return Op.getOperand(0);
1445 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
1447 const StringRef ErrorMsgOOR =
"argument out of range";
1448 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
1449 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
1450 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
1452 switch (IntrinsicEnum) {
1456 case Intrinsic::loongarch_cacop_d:
1457 case Intrinsic::loongarch_cacop_w: {
1458 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
1460 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
1464 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
1465 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
1469 case Intrinsic::loongarch_dbar: {
1471 return !isUInt<15>(Imm)
1476 case Intrinsic::loongarch_ibar: {
1478 return !isUInt<15>(Imm)
1483 case Intrinsic::loongarch_break: {
1485 return !isUInt<15>(Imm)
1490 case Intrinsic::loongarch_movgr2fcsr: {
1491 if (!Subtarget.hasBasicF())
1494 return !isUInt<2>(Imm)
1501 case Intrinsic::loongarch_syscall: {
1503 return !isUInt<15>(Imm)
1508#define IOCSRWR_CASE(NAME, NODE) \
1509 case Intrinsic::loongarch_##NAME: { \
1510 SDValue Op3 = Op.getOperand(3); \
1511 return Subtarget.is64Bit() \
1512 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
1513 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
1514 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
1515 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
1522 case Intrinsic::loongarch_iocsrwr_d: {
1530#define ASRT_LE_GT_CASE(NAME) \
1531 case Intrinsic::loongarch_##NAME: { \
1532 return !Subtarget.is64Bit() \
1533 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
1538#undef ASRT_LE_GT_CASE
1539 case Intrinsic::loongarch_ldpte_d: {
1540 unsigned Imm =
Op.getConstantOperandVal(3);
1546 case Intrinsic::loongarch_lsx_vst:
1547 case Intrinsic::loongarch_lasx_xvst:
1548 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
1551 case Intrinsic::loongarch_lasx_xvstelm_b:
1552 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1553 !isUInt<5>(
Op.getConstantOperandVal(5)))
1556 case Intrinsic::loongarch_lsx_vstelm_b:
1557 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1558 !isUInt<4>(
Op.getConstantOperandVal(5)))
1561 case Intrinsic::loongarch_lasx_xvstelm_h:
1562 return (!isShiftedInt<8, 1>(
1563 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1564 !isUInt<4>(
Op.getConstantOperandVal(5)))
1566 Op,
"argument out of range or not a multiple of 2", DAG)
1568 case Intrinsic::loongarch_lsx_vstelm_h:
1569 return (!isShiftedInt<8, 1>(
1570 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1571 !isUInt<3>(
Op.getConstantOperandVal(5)))
1573 Op,
"argument out of range or not a multiple of 2", DAG)
1575 case Intrinsic::loongarch_lasx_xvstelm_w:
1576 return (!isShiftedInt<8, 2>(
1577 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1578 !isUInt<3>(
Op.getConstantOperandVal(5)))
1580 Op,
"argument out of range or not a multiple of 4", DAG)
1582 case Intrinsic::loongarch_lsx_vstelm_w:
1583 return (!isShiftedInt<8, 2>(
1584 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1585 !isUInt<2>(
Op.getConstantOperandVal(5)))
1587 Op,
"argument out of range or not a multiple of 4", DAG)
1589 case Intrinsic::loongarch_lasx_xvstelm_d:
1590 return (!isShiftedInt<8, 3>(
1591 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1592 !isUInt<2>(
Op.getConstantOperandVal(5)))
1594 Op,
"argument out of range or not a multiple of 8", DAG)
1596 case Intrinsic::loongarch_lsx_vstelm_d:
1597 return (!isShiftedInt<8, 3>(
1598 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1599 !isUInt<1>(
Op.getConstantOperandVal(5)))
1601 Op,
"argument out of range or not a multiple of 8", DAG)
1612 EVT VT =
Lo.getValueType();
1652 EVT VT =
Lo.getValueType();
1739 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
1740 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
1744 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
1750 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
1777 StringRef ErrorMsg,
bool WithChain =
true) {
1782 Results.push_back(
N->getOperand(0));
1785template <
unsigned N>
1790 const StringRef ErrorMsgOOR =
"argument out of range";
1791 unsigned Imm =
Node->getConstantOperandVal(2);
1792 if (!isUInt<N>(Imm)) {
1825 switch (
N->getConstantOperandVal(0)) {
1828 case Intrinsic::loongarch_lsx_vpickve2gr_b:
1829 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
1832 case Intrinsic::loongarch_lsx_vpickve2gr_h:
1833 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
1834 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
1837 case Intrinsic::loongarch_lsx_vpickve2gr_w:
1838 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
1841 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
1842 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
1845 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
1846 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
1847 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
1850 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
1851 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
1854 case Intrinsic::loongarch_lsx_bz_b:
1855 case Intrinsic::loongarch_lsx_bz_h:
1856 case Intrinsic::loongarch_lsx_bz_w:
1857 case Intrinsic::loongarch_lsx_bz_d:
1858 case Intrinsic::loongarch_lasx_xbz_b:
1859 case Intrinsic::loongarch_lasx_xbz_h:
1860 case Intrinsic::loongarch_lasx_xbz_w:
1861 case Intrinsic::loongarch_lasx_xbz_d:
1865 case Intrinsic::loongarch_lsx_bz_v:
1866 case Intrinsic::loongarch_lasx_xbz_v:
1870 case Intrinsic::loongarch_lsx_bnz_b:
1871 case Intrinsic::loongarch_lsx_bnz_h:
1872 case Intrinsic::loongarch_lsx_bnz_w:
1873 case Intrinsic::loongarch_lsx_bnz_d:
1874 case Intrinsic::loongarch_lasx_xbnz_b:
1875 case Intrinsic::loongarch_lasx_xbnz_h:
1876 case Intrinsic::loongarch_lasx_xbnz_w:
1877 case Intrinsic::loongarch_lasx_xbnz_d:
1881 case Intrinsic::loongarch_lsx_bnz_v:
1882 case Intrinsic::loongarch_lasx_xbnz_v:
1892 EVT VT =
N->getValueType(0);
1893 switch (
N->getOpcode()) {
1899 "Unexpected custom legalisation");
1905 "Unexpected custom legalisation");
1912 "Unexpected custom legalisation");
1921 "Unexpected custom legalisation");
1926 "Unexpected custom legalisation");
1940 EVT OpVT = Src.getValueType();
1944 std::tie(Result, Chain) =
1951 EVT SrcVT = Src.getValueType();
1952 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
1953 Subtarget.hasBasicF()) {
1962 "Unexpected custom legalisation");
1965 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
1971 assert((VT == MVT::i16 || VT == MVT::i32) &&
1972 "Unexpected custom legalization");
1993 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
1994 "Unexpected custom legalization");
2014 "Unexpected custom legalisation");
2022 const StringRef ErrorMsgOOR =
"argument out of range";
2023 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2024 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2026 switch (
N->getConstantOperandVal(1)) {
2029 case Intrinsic::loongarch_movfcsr2gr: {
2030 if (!Subtarget.hasBasicF()) {
2035 if (!isUInt<2>(Imm)) {
2047#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
2048 case Intrinsic::loongarch_##NAME: { \
2049 SDValue NODE = DAG.getNode( \
2050 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2051 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2052 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2053 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2054 Results.push_back(NODE.getValue(1)); \
2063#undef CRC_CASE_EXT_BINARYOP
2065#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
2066 case Intrinsic::loongarch_##NAME: { \
2067 SDValue NODE = DAG.getNode( \
2068 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2070 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2071 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2072 Results.push_back(NODE.getValue(1)); \
2077#undef CRC_CASE_EXT_UNARYOP
2078#define CSR_CASE(ID) \
2079 case Intrinsic::loongarch_##ID: { \
2080 if (!Subtarget.is64Bit()) \
2081 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
2089 case Intrinsic::loongarch_csrrd_w: {
2091 if (!isUInt<14>(Imm)) {
2103 case Intrinsic::loongarch_csrwr_w: {
2104 unsigned Imm =
N->getConstantOperandVal(3);
2105 if (!isUInt<14>(Imm)) {
2118 case Intrinsic::loongarch_csrxchg_w: {
2119 unsigned Imm =
N->getConstantOperandVal(4);
2120 if (!isUInt<14>(Imm)) {
2134#define IOCSRRD_CASE(NAME, NODE) \
2135 case Intrinsic::loongarch_##NAME: { \
2136 SDValue IOCSRRDResults = \
2137 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2138 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
2139 Results.push_back( \
2140 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
2141 Results.push_back(IOCSRRDResults.getValue(1)); \
2148 case Intrinsic::loongarch_cpucfg: {
2157 case Intrinsic::loongarch_lddir_d: {
2170 "On LA64, only 64-bit registers can be read.");
2173 "On LA32, only 32-bit registers can be read.");
2175 Results.push_back(
N->getOperand(0));
2191 SDValue FirstOperand =
N->getOperand(0);
2192 SDValue SecondOperand =
N->getOperand(1);
2193 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
2194 EVT ValTy =
N->getValueType(0);
2197 unsigned SMIdx, SMLen;
2203 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
2214 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
2255 NewOperand = FirstOperand;
2258 msb = lsb + SMLen - 1;
2262 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
2283 SDValue FirstOperand =
N->getOperand(0);
2285 EVT ValTy =
N->getValueType(0);
2288 unsigned MaskIdx, MaskLen;
2294 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
2299 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
2303 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
2316 EVT ValTy =
N->getValueType(0);
2317 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
2321 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
2323 bool SwapAndRetried =
false;
2328 if (ValBits != 32 && ValBits != 64)
2338 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2341 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2343 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
2344 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2346 (MaskIdx0 + MaskLen0 <= ValBits)) {
2360 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2363 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2365 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2367 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
2368 (MaskIdx0 + MaskLen0 <= ValBits)) {
2383 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2385 (MaskIdx0 + MaskLen0 <= 64) &&
2386 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
2393 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
2394 : (MaskIdx0 + MaskLen0 - 1),
2406 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2408 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2410 (MaskIdx0 + MaskLen0 <= ValBits)) {
2425 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2427 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
2433 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
2434 : (MaskIdx0 + MaskLen0 - 1),
2449 unsigned MaskIdx, MaskLen;
2450 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
2451 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2453 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2475 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2477 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
2478 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2491 if (!SwapAndRetried) {
2493 SwapAndRetried =
true;
2497 SwapAndRetried =
false;
2509 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2523 if (!SwapAndRetried) {
2525 SwapAndRetried =
true;
2535 switch (V.getNode()->getOpcode()) {
2537 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
2546 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
2547 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
2554 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
2555 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
2632 SDNode *AndNode =
N->getOperand(0).getNode();
2640 SDValue CmpInputValue =
N->getOperand(1);
2648 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
2651 AndInputValue1 = AndInputValue1.
getOperand(0);
2655 if (AndInputValue2 != CmpInputValue)
2684 TruncInputValue1, TruncInputValue2);
2706template <
unsigned N>
2710 bool IsSigned =
false) {
2712 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
2714 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2715 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2717 ": argument out of range.");
2723template <
unsigned N>
2727 EVT ResTy =
Node->getValueType(0);
2728 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
2731 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2732 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2734 ": argument out of range.");
2739 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
2745 EVT ResTy =
Node->getValueType(0);
2753 EVT ResTy =
Node->getValueType(0);
2762template <
unsigned N>
2765 EVT ResTy =
Node->getValueType(0);
2766 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2768 if (!isUInt<N>(CImm->getZExtValue())) {
2770 ": argument out of range.");
2780template <
unsigned N>
2783 EVT ResTy =
Node->getValueType(0);
2784 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2786 if (!isUInt<N>(CImm->getZExtValue())) {
2788 ": argument out of range.");
2797template <
unsigned N>
2800 EVT ResTy =
Node->getValueType(0);
2801 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2803 if (!isUInt<N>(CImm->getZExtValue())) {
2805 ": argument out of range.");
2819 switch (
N->getConstantOperandVal(0)) {
2822 case Intrinsic::loongarch_lsx_vadd_b:
2823 case Intrinsic::loongarch_lsx_vadd_h:
2824 case Intrinsic::loongarch_lsx_vadd_w:
2825 case Intrinsic::loongarch_lsx_vadd_d:
2826 case Intrinsic::loongarch_lasx_xvadd_b:
2827 case Intrinsic::loongarch_lasx_xvadd_h:
2828 case Intrinsic::loongarch_lasx_xvadd_w:
2829 case Intrinsic::loongarch_lasx_xvadd_d:
2832 case Intrinsic::loongarch_lsx_vaddi_bu:
2833 case Intrinsic::loongarch_lsx_vaddi_hu:
2834 case Intrinsic::loongarch_lsx_vaddi_wu:
2835 case Intrinsic::loongarch_lsx_vaddi_du:
2836 case Intrinsic::loongarch_lasx_xvaddi_bu:
2837 case Intrinsic::loongarch_lasx_xvaddi_hu:
2838 case Intrinsic::loongarch_lasx_xvaddi_wu:
2839 case Intrinsic::loongarch_lasx_xvaddi_du:
2841 lowerVectorSplatImm<5>(
N, 2, DAG));
2842 case Intrinsic::loongarch_lsx_vsub_b:
2843 case Intrinsic::loongarch_lsx_vsub_h:
2844 case Intrinsic::loongarch_lsx_vsub_w:
2845 case Intrinsic::loongarch_lsx_vsub_d:
2846 case Intrinsic::loongarch_lasx_xvsub_b:
2847 case Intrinsic::loongarch_lasx_xvsub_h:
2848 case Intrinsic::loongarch_lasx_xvsub_w:
2849 case Intrinsic::loongarch_lasx_xvsub_d:
2852 case Intrinsic::loongarch_lsx_vsubi_bu:
2853 case Intrinsic::loongarch_lsx_vsubi_hu:
2854 case Intrinsic::loongarch_lsx_vsubi_wu:
2855 case Intrinsic::loongarch_lsx_vsubi_du:
2856 case Intrinsic::loongarch_lasx_xvsubi_bu:
2857 case Intrinsic::loongarch_lasx_xvsubi_hu:
2858 case Intrinsic::loongarch_lasx_xvsubi_wu:
2859 case Intrinsic::loongarch_lasx_xvsubi_du:
2861 lowerVectorSplatImm<5>(
N, 2, DAG));
2862 case Intrinsic::loongarch_lsx_vneg_b:
2863 case Intrinsic::loongarch_lsx_vneg_h:
2864 case Intrinsic::loongarch_lsx_vneg_w:
2865 case Intrinsic::loongarch_lsx_vneg_d:
2866 case Intrinsic::loongarch_lasx_xvneg_b:
2867 case Intrinsic::loongarch_lasx_xvneg_h:
2868 case Intrinsic::loongarch_lasx_xvneg_w:
2869 case Intrinsic::loongarch_lasx_xvneg_d:
2873 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
2875 SDLoc(
N),
N->getValueType(0)),
2877 case Intrinsic::loongarch_lsx_vmax_b:
2878 case Intrinsic::loongarch_lsx_vmax_h:
2879 case Intrinsic::loongarch_lsx_vmax_w:
2880 case Intrinsic::loongarch_lsx_vmax_d:
2881 case Intrinsic::loongarch_lasx_xvmax_b:
2882 case Intrinsic::loongarch_lasx_xvmax_h:
2883 case Intrinsic::loongarch_lasx_xvmax_w:
2884 case Intrinsic::loongarch_lasx_xvmax_d:
2887 case Intrinsic::loongarch_lsx_vmax_bu:
2888 case Intrinsic::loongarch_lsx_vmax_hu:
2889 case Intrinsic::loongarch_lsx_vmax_wu:
2890 case Intrinsic::loongarch_lsx_vmax_du:
2891 case Intrinsic::loongarch_lasx_xvmax_bu:
2892 case Intrinsic::loongarch_lasx_xvmax_hu:
2893 case Intrinsic::loongarch_lasx_xvmax_wu:
2894 case Intrinsic::loongarch_lasx_xvmax_du:
2897 case Intrinsic::loongarch_lsx_vmaxi_b:
2898 case Intrinsic::loongarch_lsx_vmaxi_h:
2899 case Intrinsic::loongarch_lsx_vmaxi_w:
2900 case Intrinsic::loongarch_lsx_vmaxi_d:
2901 case Intrinsic::loongarch_lasx_xvmaxi_b:
2902 case Intrinsic::loongarch_lasx_xvmaxi_h:
2903 case Intrinsic::loongarch_lasx_xvmaxi_w:
2904 case Intrinsic::loongarch_lasx_xvmaxi_d:
2906 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
2907 case Intrinsic::loongarch_lsx_vmaxi_bu:
2908 case Intrinsic::loongarch_lsx_vmaxi_hu:
2909 case Intrinsic::loongarch_lsx_vmaxi_wu:
2910 case Intrinsic::loongarch_lsx_vmaxi_du:
2911 case Intrinsic::loongarch_lasx_xvmaxi_bu:
2912 case Intrinsic::loongarch_lasx_xvmaxi_hu:
2913 case Intrinsic::loongarch_lasx_xvmaxi_wu:
2914 case Intrinsic::loongarch_lasx_xvmaxi_du:
2916 lowerVectorSplatImm<5>(
N, 2, DAG));
2917 case Intrinsic::loongarch_lsx_vmin_b:
2918 case Intrinsic::loongarch_lsx_vmin_h:
2919 case Intrinsic::loongarch_lsx_vmin_w:
2920 case Intrinsic::loongarch_lsx_vmin_d:
2921 case Intrinsic::loongarch_lasx_xvmin_b:
2922 case Intrinsic::loongarch_lasx_xvmin_h:
2923 case Intrinsic::loongarch_lasx_xvmin_w:
2924 case Intrinsic::loongarch_lasx_xvmin_d:
2927 case Intrinsic::loongarch_lsx_vmin_bu:
2928 case Intrinsic::loongarch_lsx_vmin_hu:
2929 case Intrinsic::loongarch_lsx_vmin_wu:
2930 case Intrinsic::loongarch_lsx_vmin_du:
2931 case Intrinsic::loongarch_lasx_xvmin_bu:
2932 case Intrinsic::loongarch_lasx_xvmin_hu:
2933 case Intrinsic::loongarch_lasx_xvmin_wu:
2934 case Intrinsic::loongarch_lasx_xvmin_du:
2937 case Intrinsic::loongarch_lsx_vmini_b:
2938 case Intrinsic::loongarch_lsx_vmini_h:
2939 case Intrinsic::loongarch_lsx_vmini_w:
2940 case Intrinsic::loongarch_lsx_vmini_d:
2941 case Intrinsic::loongarch_lasx_xvmini_b:
2942 case Intrinsic::loongarch_lasx_xvmini_h:
2943 case Intrinsic::loongarch_lasx_xvmini_w:
2944 case Intrinsic::loongarch_lasx_xvmini_d:
2946 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
2947 case Intrinsic::loongarch_lsx_vmini_bu:
2948 case Intrinsic::loongarch_lsx_vmini_hu:
2949 case Intrinsic::loongarch_lsx_vmini_wu:
2950 case Intrinsic::loongarch_lsx_vmini_du:
2951 case Intrinsic::loongarch_lasx_xvmini_bu:
2952 case Intrinsic::loongarch_lasx_xvmini_hu:
2953 case Intrinsic::loongarch_lasx_xvmini_wu:
2954 case Intrinsic::loongarch_lasx_xvmini_du:
2956 lowerVectorSplatImm<5>(
N, 2, DAG));
2957 case Intrinsic::loongarch_lsx_vmul_b:
2958 case Intrinsic::loongarch_lsx_vmul_h:
2959 case Intrinsic::loongarch_lsx_vmul_w:
2960 case Intrinsic::loongarch_lsx_vmul_d:
2961 case Intrinsic::loongarch_lasx_xvmul_b:
2962 case Intrinsic::loongarch_lasx_xvmul_h:
2963 case Intrinsic::loongarch_lasx_xvmul_w:
2964 case Intrinsic::loongarch_lasx_xvmul_d:
2967 case Intrinsic::loongarch_lsx_vmadd_b:
2968 case Intrinsic::loongarch_lsx_vmadd_h:
2969 case Intrinsic::loongarch_lsx_vmadd_w:
2970 case Intrinsic::loongarch_lsx_vmadd_d:
2971 case Intrinsic::loongarch_lasx_xvmadd_b:
2972 case Intrinsic::loongarch_lasx_xvmadd_h:
2973 case Intrinsic::loongarch_lasx_xvmadd_w:
2974 case Intrinsic::loongarch_lasx_xvmadd_d: {
2975 EVT ResTy =
N->getValueType(0);
2980 case Intrinsic::loongarch_lsx_vmsub_b:
2981 case Intrinsic::loongarch_lsx_vmsub_h:
2982 case Intrinsic::loongarch_lsx_vmsub_w:
2983 case Intrinsic::loongarch_lsx_vmsub_d:
2984 case Intrinsic::loongarch_lasx_xvmsub_b:
2985 case Intrinsic::loongarch_lasx_xvmsub_h:
2986 case Intrinsic::loongarch_lasx_xvmsub_w:
2987 case Intrinsic::loongarch_lasx_xvmsub_d: {
2988 EVT ResTy =
N->getValueType(0);
2993 case Intrinsic::loongarch_lsx_vdiv_b:
2994 case Intrinsic::loongarch_lsx_vdiv_h:
2995 case Intrinsic::loongarch_lsx_vdiv_w:
2996 case Intrinsic::loongarch_lsx_vdiv_d:
2997 case Intrinsic::loongarch_lasx_xvdiv_b:
2998 case Intrinsic::loongarch_lasx_xvdiv_h:
2999 case Intrinsic::loongarch_lasx_xvdiv_w:
3000 case Intrinsic::loongarch_lasx_xvdiv_d:
3003 case Intrinsic::loongarch_lsx_vdiv_bu:
3004 case Intrinsic::loongarch_lsx_vdiv_hu:
3005 case Intrinsic::loongarch_lsx_vdiv_wu:
3006 case Intrinsic::loongarch_lsx_vdiv_du:
3007 case Intrinsic::loongarch_lasx_xvdiv_bu:
3008 case Intrinsic::loongarch_lasx_xvdiv_hu:
3009 case Intrinsic::loongarch_lasx_xvdiv_wu:
3010 case Intrinsic::loongarch_lasx_xvdiv_du:
3013 case Intrinsic::loongarch_lsx_vmod_b:
3014 case Intrinsic::loongarch_lsx_vmod_h:
3015 case Intrinsic::loongarch_lsx_vmod_w:
3016 case Intrinsic::loongarch_lsx_vmod_d:
3017 case Intrinsic::loongarch_lasx_xvmod_b:
3018 case Intrinsic::loongarch_lasx_xvmod_h:
3019 case Intrinsic::loongarch_lasx_xvmod_w:
3020 case Intrinsic::loongarch_lasx_xvmod_d:
3023 case Intrinsic::loongarch_lsx_vmod_bu:
3024 case Intrinsic::loongarch_lsx_vmod_hu:
3025 case Intrinsic::loongarch_lsx_vmod_wu:
3026 case Intrinsic::loongarch_lsx_vmod_du:
3027 case Intrinsic::loongarch_lasx_xvmod_bu:
3028 case Intrinsic::loongarch_lasx_xvmod_hu:
3029 case Intrinsic::loongarch_lasx_xvmod_wu:
3030 case Intrinsic::loongarch_lasx_xvmod_du:
3033 case Intrinsic::loongarch_lsx_vand_v:
3034 case Intrinsic::loongarch_lasx_xvand_v:
3037 case Intrinsic::loongarch_lsx_vor_v:
3038 case Intrinsic::loongarch_lasx_xvor_v:
3041 case Intrinsic::loongarch_lsx_vxor_v:
3042 case Intrinsic::loongarch_lasx_xvxor_v:
3045 case Intrinsic::loongarch_lsx_vnor_v:
3046 case Intrinsic::loongarch_lasx_xvnor_v: {
3051 case Intrinsic::loongarch_lsx_vandi_b:
3052 case Intrinsic::loongarch_lasx_xvandi_b:
3054 lowerVectorSplatImm<8>(
N, 2, DAG));
3055 case Intrinsic::loongarch_lsx_vori_b:
3056 case Intrinsic::loongarch_lasx_xvori_b:
3058 lowerVectorSplatImm<8>(
N, 2, DAG));
3059 case Intrinsic::loongarch_lsx_vxori_b:
3060 case Intrinsic::loongarch_lasx_xvxori_b:
3062 lowerVectorSplatImm<8>(
N, 2, DAG));
3063 case Intrinsic::loongarch_lsx_vsll_b:
3064 case Intrinsic::loongarch_lsx_vsll_h:
3065 case Intrinsic::loongarch_lsx_vsll_w:
3066 case Intrinsic::loongarch_lsx_vsll_d:
3067 case Intrinsic::loongarch_lasx_xvsll_b:
3068 case Intrinsic::loongarch_lasx_xvsll_h:
3069 case Intrinsic::loongarch_lasx_xvsll_w:
3070 case Intrinsic::loongarch_lasx_xvsll_d:
3073 case Intrinsic::loongarch_lsx_vslli_b:
3074 case Intrinsic::loongarch_lasx_xvslli_b:
3076 lowerVectorSplatImm<3>(
N, 2, DAG));
3077 case Intrinsic::loongarch_lsx_vslli_h:
3078 case Intrinsic::loongarch_lasx_xvslli_h:
3080 lowerVectorSplatImm<4>(
N, 2, DAG));
3081 case Intrinsic::loongarch_lsx_vslli_w:
3082 case Intrinsic::loongarch_lasx_xvslli_w:
3084 lowerVectorSplatImm<5>(
N, 2, DAG));
3085 case Intrinsic::loongarch_lsx_vslli_d:
3086 case Intrinsic::loongarch_lasx_xvslli_d:
3088 lowerVectorSplatImm<6>(
N, 2, DAG));
3089 case Intrinsic::loongarch_lsx_vsrl_b:
3090 case Intrinsic::loongarch_lsx_vsrl_h:
3091 case Intrinsic::loongarch_lsx_vsrl_w:
3092 case Intrinsic::loongarch_lsx_vsrl_d:
3093 case Intrinsic::loongarch_lasx_xvsrl_b:
3094 case Intrinsic::loongarch_lasx_xvsrl_h:
3095 case Intrinsic::loongarch_lasx_xvsrl_w:
3096 case Intrinsic::loongarch_lasx_xvsrl_d:
3099 case Intrinsic::loongarch_lsx_vsrli_b:
3100 case Intrinsic::loongarch_lasx_xvsrli_b:
3102 lowerVectorSplatImm<3>(
N, 2, DAG));
3103 case Intrinsic::loongarch_lsx_vsrli_h:
3104 case Intrinsic::loongarch_lasx_xvsrli_h:
3106 lowerVectorSplatImm<4>(
N, 2, DAG));
3107 case Intrinsic::loongarch_lsx_vsrli_w:
3108 case Intrinsic::loongarch_lasx_xvsrli_w:
3110 lowerVectorSplatImm<5>(
N, 2, DAG));
3111 case Intrinsic::loongarch_lsx_vsrli_d:
3112 case Intrinsic::loongarch_lasx_xvsrli_d:
3114 lowerVectorSplatImm<6>(
N, 2, DAG));
3115 case Intrinsic::loongarch_lsx_vsra_b:
3116 case Intrinsic::loongarch_lsx_vsra_h:
3117 case Intrinsic::loongarch_lsx_vsra_w:
3118 case Intrinsic::loongarch_lsx_vsra_d:
3119 case Intrinsic::loongarch_lasx_xvsra_b:
3120 case Intrinsic::loongarch_lasx_xvsra_h:
3121 case Intrinsic::loongarch_lasx_xvsra_w:
3122 case Intrinsic::loongarch_lasx_xvsra_d:
3125 case Intrinsic::loongarch_lsx_vsrai_b:
3126 case Intrinsic::loongarch_lasx_xvsrai_b:
3128 lowerVectorSplatImm<3>(
N, 2, DAG));
3129 case Intrinsic::loongarch_lsx_vsrai_h:
3130 case Intrinsic::loongarch_lasx_xvsrai_h:
3132 lowerVectorSplatImm<4>(
N, 2, DAG));
3133 case Intrinsic::loongarch_lsx_vsrai_w:
3134 case Intrinsic::loongarch_lasx_xvsrai_w:
3136 lowerVectorSplatImm<5>(
N, 2, DAG));
3137 case Intrinsic::loongarch_lsx_vsrai_d:
3138 case Intrinsic::loongarch_lasx_xvsrai_d:
3140 lowerVectorSplatImm<6>(
N, 2, DAG));
3141 case Intrinsic::loongarch_lsx_vclz_b:
3142 case Intrinsic::loongarch_lsx_vclz_h:
3143 case Intrinsic::loongarch_lsx_vclz_w:
3144 case Intrinsic::loongarch_lsx_vclz_d:
3145 case Intrinsic::loongarch_lasx_xvclz_b:
3146 case Intrinsic::loongarch_lasx_xvclz_h:
3147 case Intrinsic::loongarch_lasx_xvclz_w:
3148 case Intrinsic::loongarch_lasx_xvclz_d:
3150 case Intrinsic::loongarch_lsx_vpcnt_b:
3151 case Intrinsic::loongarch_lsx_vpcnt_h:
3152 case Intrinsic::loongarch_lsx_vpcnt_w:
3153 case Intrinsic::loongarch_lsx_vpcnt_d:
3154 case Intrinsic::loongarch_lasx_xvpcnt_b:
3155 case Intrinsic::loongarch_lasx_xvpcnt_h:
3156 case Intrinsic::loongarch_lasx_xvpcnt_w:
3157 case Intrinsic::loongarch_lasx_xvpcnt_d:
3159 case Intrinsic::loongarch_lsx_vbitclr_b:
3160 case Intrinsic::loongarch_lsx_vbitclr_h:
3161 case Intrinsic::loongarch_lsx_vbitclr_w:
3162 case Intrinsic::loongarch_lsx_vbitclr_d:
3163 case Intrinsic::loongarch_lasx_xvbitclr_b:
3164 case Intrinsic::loongarch_lasx_xvbitclr_h:
3165 case Intrinsic::loongarch_lasx_xvbitclr_w:
3166 case Intrinsic::loongarch_lasx_xvbitclr_d:
3168 case Intrinsic::loongarch_lsx_vbitclri_b:
3169 case Intrinsic::loongarch_lasx_xvbitclri_b:
3170 return lowerVectorBitClearImm<3>(
N, DAG);
3171 case Intrinsic::loongarch_lsx_vbitclri_h:
3172 case Intrinsic::loongarch_lasx_xvbitclri_h:
3173 return lowerVectorBitClearImm<4>(
N, DAG);
3174 case Intrinsic::loongarch_lsx_vbitclri_w:
3175 case Intrinsic::loongarch_lasx_xvbitclri_w:
3176 return lowerVectorBitClearImm<5>(
N, DAG);
3177 case Intrinsic::loongarch_lsx_vbitclri_d:
3178 case Intrinsic::loongarch_lasx_xvbitclri_d:
3179 return lowerVectorBitClearImm<6>(
N, DAG);
3180 case Intrinsic::loongarch_lsx_vbitset_b:
3181 case Intrinsic::loongarch_lsx_vbitset_h:
3182 case Intrinsic::loongarch_lsx_vbitset_w:
3183 case Intrinsic::loongarch_lsx_vbitset_d:
3184 case Intrinsic::loongarch_lasx_xvbitset_b:
3185 case Intrinsic::loongarch_lasx_xvbitset_h:
3186 case Intrinsic::loongarch_lasx_xvbitset_w:
3187 case Intrinsic::loongarch_lasx_xvbitset_d: {
3188 EVT VecTy =
N->getValueType(0);
3194 case Intrinsic::loongarch_lsx_vbitseti_b:
3195 case Intrinsic::loongarch_lasx_xvbitseti_b:
3196 return lowerVectorBitSetImm<3>(
N, DAG);
3197 case Intrinsic::loongarch_lsx_vbitseti_h:
3198 case Intrinsic::loongarch_lasx_xvbitseti_h:
3199 return lowerVectorBitSetImm<4>(
N, DAG);
3200 case Intrinsic::loongarch_lsx_vbitseti_w:
3201 case Intrinsic::loongarch_lasx_xvbitseti_w:
3202 return lowerVectorBitSetImm<5>(
N, DAG);
3203 case Intrinsic::loongarch_lsx_vbitseti_d:
3204 case Intrinsic::loongarch_lasx_xvbitseti_d:
3205 return lowerVectorBitSetImm<6>(
N, DAG);
3206 case Intrinsic::loongarch_lsx_vbitrev_b:
3207 case Intrinsic::loongarch_lsx_vbitrev_h:
3208 case Intrinsic::loongarch_lsx_vbitrev_w:
3209 case Intrinsic::loongarch_lsx_vbitrev_d:
3210 case Intrinsic::loongarch_lasx_xvbitrev_b:
3211 case Intrinsic::loongarch_lasx_xvbitrev_h:
3212 case Intrinsic::loongarch_lasx_xvbitrev_w:
3213 case Intrinsic::loongarch_lasx_xvbitrev_d: {
3214 EVT VecTy =
N->getValueType(0);
3220 case Intrinsic::loongarch_lsx_vbitrevi_b:
3221 case Intrinsic::loongarch_lasx_xvbitrevi_b:
3222 return lowerVectorBitRevImm<3>(
N, DAG);
3223 case Intrinsic::loongarch_lsx_vbitrevi_h:
3224 case Intrinsic::loongarch_lasx_xvbitrevi_h:
3225 return lowerVectorBitRevImm<4>(
N, DAG);
3226 case Intrinsic::loongarch_lsx_vbitrevi_w:
3227 case Intrinsic::loongarch_lasx_xvbitrevi_w:
3228 return lowerVectorBitRevImm<5>(
N, DAG);
3229 case Intrinsic::loongarch_lsx_vbitrevi_d:
3230 case Intrinsic::loongarch_lasx_xvbitrevi_d:
3231 return lowerVectorBitRevImm<6>(
N, DAG);
3232 case Intrinsic::loongarch_lsx_vfadd_s:
3233 case Intrinsic::loongarch_lsx_vfadd_d:
3234 case Intrinsic::loongarch_lasx_xvfadd_s:
3235 case Intrinsic::loongarch_lasx_xvfadd_d:
3238 case Intrinsic::loongarch_lsx_vfsub_s:
3239 case Intrinsic::loongarch_lsx_vfsub_d:
3240 case Intrinsic::loongarch_lasx_xvfsub_s:
3241 case Intrinsic::loongarch_lasx_xvfsub_d:
3244 case Intrinsic::loongarch_lsx_vfmul_s:
3245 case Intrinsic::loongarch_lsx_vfmul_d:
3246 case Intrinsic::loongarch_lasx_xvfmul_s:
3247 case Intrinsic::loongarch_lasx_xvfmul_d:
3250 case Intrinsic::loongarch_lsx_vfdiv_s:
3251 case Intrinsic::loongarch_lsx_vfdiv_d:
3252 case Intrinsic::loongarch_lasx_xvfdiv_s:
3253 case Intrinsic::loongarch_lasx_xvfdiv_d:
3256 case Intrinsic::loongarch_lsx_vfmadd_s:
3257 case Intrinsic::loongarch_lsx_vfmadd_d:
3258 case Intrinsic::loongarch_lasx_xvfmadd_s:
3259 case Intrinsic::loongarch_lasx_xvfmadd_d:
3261 N->getOperand(2),
N->getOperand(3));
3262 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
3264 N->getOperand(1),
N->getOperand(2),
3265 legalizeIntrinsicImmArg<4>(
N, 3, DAG, Subtarget));
3266 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
3267 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
3269 N->getOperand(1),
N->getOperand(2),
3270 legalizeIntrinsicImmArg<3>(
N, 3, DAG, Subtarget));
3271 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
3272 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
3274 N->getOperand(1),
N->getOperand(2),
3275 legalizeIntrinsicImmArg<2>(
N, 3, DAG, Subtarget));
3276 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
3278 N->getOperand(1),
N->getOperand(2),
3279 legalizeIntrinsicImmArg<1>(
N, 3, DAG, Subtarget));
3280 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
3281 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
3282 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
3283 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
3284 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
3285 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
3286 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
3287 case Intrinsic::loongarch_lasx_xvreplgr2vr_d: {
3288 EVT ResTy =
N->getValueType(0);
3292 case Intrinsic::loongarch_lsx_vreplve_b:
3293 case Intrinsic::loongarch_lsx_vreplve_h:
3294 case Intrinsic::loongarch_lsx_vreplve_w:
3295 case Intrinsic::loongarch_lsx_vreplve_d:
3296 case Intrinsic::loongarch_lasx_xvreplve_b:
3297 case Intrinsic::loongarch_lasx_xvreplve_h:
3298 case Intrinsic::loongarch_lasx_xvreplve_w:
3299 case Intrinsic::loongarch_lasx_xvreplve_d:
3311 switch (
N->getOpcode()) {
3348 MF->
insert(It, BreakMBB);
3352 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
3353 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
3371 BreakMBB->addSuccessor(SinkMBB);
3383 switch (
MI.getOpcode()) {
3386 case LoongArch::PseudoVBZ:
3387 CondOpc = LoongArch::VSETEQZ_V;
3389 case LoongArch::PseudoVBZ_B:
3390 CondOpc = LoongArch::VSETANYEQZ_B;
3392 case LoongArch::PseudoVBZ_H:
3393 CondOpc = LoongArch::VSETANYEQZ_H;
3395 case LoongArch::PseudoVBZ_W:
3396 CondOpc = LoongArch::VSETANYEQZ_W;
3398 case LoongArch::PseudoVBZ_D:
3399 CondOpc = LoongArch::VSETANYEQZ_D;
3401 case LoongArch::PseudoVBNZ:
3402 CondOpc = LoongArch::VSETNEZ_V;
3404 case LoongArch::PseudoVBNZ_B:
3405 CondOpc = LoongArch::VSETALLNEZ_B;
3407 case LoongArch::PseudoVBNZ_H:
3408 CondOpc = LoongArch::VSETALLNEZ_H;
3410 case LoongArch::PseudoVBNZ_W:
3411 CondOpc = LoongArch::VSETALLNEZ_W;
3413 case LoongArch::PseudoVBNZ_D:
3414 CondOpc = LoongArch::VSETALLNEZ_D;
3416 case LoongArch::PseudoXVBZ:
3417 CondOpc = LoongArch::XVSETEQZ_V;
3419 case LoongArch::PseudoXVBZ_B:
3420 CondOpc = LoongArch::XVSETANYEQZ_B;
3422 case LoongArch::PseudoXVBZ_H:
3423 CondOpc = LoongArch::XVSETANYEQZ_H;
3425 case LoongArch::PseudoXVBZ_W:
3426 CondOpc = LoongArch::XVSETANYEQZ_W;
3428 case LoongArch::PseudoXVBZ_D:
3429 CondOpc = LoongArch::XVSETANYEQZ_D;
3431 case LoongArch::PseudoXVBNZ:
3432 CondOpc = LoongArch::XVSETNEZ_V;
3434 case LoongArch::PseudoXVBNZ_B:
3435 CondOpc = LoongArch::XVSETALLNEZ_B;
3437 case LoongArch::PseudoXVBNZ_H:
3438 CondOpc = LoongArch::XVSETALLNEZ_H;
3440 case LoongArch::PseudoXVBNZ_W:
3441 CondOpc = LoongArch::XVSETALLNEZ_W;
3443 case LoongArch::PseudoXVBNZ_D:
3444 CondOpc = LoongArch::XVSETALLNEZ_D;
3459 F->insert(It, FalseBB);
3460 F->insert(It, TrueBB);
3461 F->insert(It, SinkBB);
3464 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
3468 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
3477 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
3485 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
3493 MI.getOperand(0).getReg())
3500 MI.eraseFromParent();
3509 switch (
MI.getOpcode()) {
3512 case LoongArch::PseudoXVINSGR2VR_B:
3514 InsOp = LoongArch::VINSGR2VR_B;
3516 case LoongArch::PseudoXVINSGR2VR_H:
3518 InsOp = LoongArch::VINSGR2VR_H;
3530 unsigned Idx =
MI.getOperand(3).getImm();
3533 if (
Idx >= HalfSize) {
3534 ScratchReg1 =
MRI.createVirtualRegister(RC);
3535 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg1)
3541 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
3542 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
3544 .
addReg(ScratchReg1, 0, LoongArch::sub_128);
3551 if (
Idx >= HalfSize)
3552 ScratchReg2 =
MRI.createVirtualRegister(RC);
3554 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
3557 .
addImm(LoongArch::sub_128);
3559 if (
Idx >= HalfSize)
3565 MI.eraseFromParent();
3574 switch (
MI.getOpcode()) {
3577 case LoongArch::DIV_W:
3578 case LoongArch::DIV_WU:
3579 case LoongArch::MOD_W:
3580 case LoongArch::MOD_WU:
3581 case LoongArch::DIV_D:
3582 case LoongArch::DIV_DU:
3583 case LoongArch::MOD_D:
3584 case LoongArch::MOD_DU:
3587 case LoongArch::WRFCSR: {
3589 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
3590 .
addReg(
MI.getOperand(1).getReg());
3591 MI.eraseFromParent();
3594 case LoongArch::RDFCSR: {
3597 MI.getOperand(0).getReg())
3598 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
3600 MI.eraseFromParent();
3603 case LoongArch::PseudoVBZ:
3604 case LoongArch::PseudoVBZ_B:
3605 case LoongArch::PseudoVBZ_H:
3606 case LoongArch::PseudoVBZ_W:
3607 case LoongArch::PseudoVBZ_D:
3608 case LoongArch::PseudoVBNZ:
3609 case LoongArch::PseudoVBNZ_B:
3610 case LoongArch::PseudoVBNZ_H:
3611 case LoongArch::PseudoVBNZ_W:
3612 case LoongArch::PseudoVBNZ_D:
3613 case LoongArch::PseudoXVBZ:
3614 case LoongArch::PseudoXVBZ_B:
3615 case LoongArch::PseudoXVBZ_H:
3616 case LoongArch::PseudoXVBZ_W:
3617 case LoongArch::PseudoXVBZ_D:
3618 case LoongArch::PseudoXVBNZ:
3619 case LoongArch::PseudoXVBNZ_B:
3620 case LoongArch::PseudoXVBNZ_H:
3621 case LoongArch::PseudoXVBNZ_W:
3622 case LoongArch::PseudoXVBNZ_D:
3624 case LoongArch::PseudoXVINSGR2VR_B:
3625 case LoongArch::PseudoXVINSGR2VR_H:
3632 unsigned *
Fast)
const {
3633 if (!Subtarget.hasUAL())
3647#define NODE_NAME_CASE(node) \
3648 case LoongArchISD::node: \
3649 return "LoongArchISD::" #node;
3713#undef NODE_NAME_CASE
3726 LoongArch::R7, LoongArch::R8, LoongArch::R9,
3727 LoongArch::R10, LoongArch::R11};
3731 LoongArch::F3, LoongArch::F4, LoongArch::F5,
3732 LoongArch::F6, LoongArch::F7};
3735 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
3736 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
3739 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
3740 LoongArch::VR6, LoongArch::VR7};
3743 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
3744 LoongArch::XR6, LoongArch::XR7};
3750 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
3752 unsigned GRLenInBytes = GRLen / 8;
3785 unsigned ValNo,
MVT ValVT,
3787 CCState &State,
bool IsFixed,
bool IsRet,
3789 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
3790 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
3791 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
3796 if (IsRet && ValNo > 1)
3800 bool UseGPRForFloat =
true;
3810 UseGPRForFloat = !IsFixed;
3819 UseGPRForFloat =
true;
3821 if (UseGPRForFloat && ValVT == MVT::f32) {
3824 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
3827 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
3838 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
3840 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
3843 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
3852 "PendingLocs and PendingArgFlags out of sync");
3870 PendingLocs.
size() <= 2) {
3871 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
3876 PendingLocs.
clear();
3877 PendingArgFlags.
clear();
3884 unsigned StoreSizeBytes = GRLen / 8;
3887 if (ValVT == MVT::f32 && !UseGPRForFloat)
3889 else if (ValVT == MVT::f64 && !UseGPRForFloat)
3903 if (!PendingLocs.
empty()) {
3905 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
3906 for (
auto &It : PendingLocs) {
3908 It.convertToReg(Reg);
3913 PendingLocs.clear();
3914 PendingArgFlags.
clear();
3917 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
3918 "Expected an GRLenVT at this stage");
3935void LoongArchTargetLowering::analyzeInputArgs(
3938 LoongArchCCAssignFn Fn)
const {
3940 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
3942 Type *ArgTy =
nullptr;
3944 ArgTy = FType->getReturnType();
3945 else if (Ins[i].isOrigArg())
3946 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
3950 CCInfo,
true, IsRet, ArgTy)) {
3951 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
3958void LoongArchTargetLowering::analyzeOutputArgs(
3961 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
3962 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
3963 MVT ArgVT = Outs[i].VT;
3964 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
3968 CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
3969 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
4010 if (In.isOrigArg()) {
4015 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
4016 (
BitWidth < 32 && In.Flags.isZExt())) {
4076 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
4080 LoongArch::R23, LoongArch::R24, LoongArch::R25,
4081 LoongArch::R26, LoongArch::R27, LoongArch::R28,
4082 LoongArch::R29, LoongArch::R30, LoongArch::R31};
4089 if (LocVT == MVT::f32) {
4092 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
4093 LoongArch::F26, LoongArch::F27};
4094 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
4100 if (LocVT == MVT::f64) {
4103 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
4104 LoongArch::F30_64, LoongArch::F31_64};
4105 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
4133 "GHC calling convention requires the F and D extensions");
4138 unsigned GRLenInBytes = Subtarget.
getGRLen() / 8;
4140 std::vector<SDValue> OutChains;
4149 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
4151 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
4163 unsigned ArgIndex = Ins[i].OrigArgIndex;
4164 unsigned ArgPartOffset = Ins[i].PartOffset;
4165 assert(ArgPartOffset == 0);
4166 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
4168 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
4191 int VaArgOffset, VarArgsSaveSize;
4197 VarArgsSaveSize = 0;
4199 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() -
Idx);
4200 VaArgOffset = -VarArgsSaveSize;
4206 LoongArchFI->setVarArgsFrameIndex(FI);
4214 VarArgsSaveSize += GRLenInBytes;
4219 for (
unsigned I =
Idx;
I < ArgRegs.
size();
4220 ++
I, VaArgOffset += GRLenInBytes) {
4228 cast<StoreSDNode>(Store.getNode())
4230 ->setValue((
Value *)
nullptr);
4231 OutChains.push_back(Store);
4233 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
4238 if (!OutChains.empty()) {
4239 OutChains.push_back(Chain);
4254 if (
N->getNumValues() != 1)
4256 if (!
N->hasNUsesOfValue(1, 0))
4259 SDNode *Copy = *
N->use_begin();
4265 if (Copy->getGluedNode())
4269 bool HasRet =
false;
4270 for (
SDNode *Node : Copy->uses()) {
4279 Chain = Copy->getOperand(0);
4284bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
4288 auto CalleeCC = CLI.CallConv;
4289 auto &Outs = CLI.Outs;
4291 auto CallerCC = Caller.getCallingConv();
4298 for (
auto &VA : ArgLocs)
4304 auto IsCallerStructRet = Caller.hasStructRetAttr();
4305 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
4306 if (IsCallerStructRet || IsCalleeStructRet)
4310 for (
auto &Arg : Outs)
4311 if (Arg.Flags.isByVal())
4316 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
4317 if (CalleeCC != CallerCC) {
4318 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
4319 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4357 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
4361 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
4367 "site marked musttail");
4374 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
4376 if (!Flags.isByVal())
4380 unsigned Size = Flags.getByValSize();
4381 Align Alignment = Flags.getNonZeroByValAlign();
4388 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
4390 false,
nullptr, std::nullopt,
4402 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(); i != e; ++i) {
4404 SDValue ArgValue = OutVals[i];
4417 unsigned ArgIndex = Outs[i].OrigArgIndex;
4418 unsigned ArgPartOffset = Outs[i].PartOffset;
4419 assert(ArgPartOffset == 0);
4424 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
4425 SDValue PartValue = OutVals[i + 1];
4426 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
4436 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4440 for (
const auto &Part : Parts) {
4441 SDValue PartValue = Part.first;
4442 SDValue PartOffset = Part.second;
4449 ArgValue = SpillSlot;
4455 if (Flags.isByVal())
4456 ArgValue = ByValArgs[j++];
4463 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
4464 "for passing parameters");
4467 if (!StackPtr.getNode())
4480 if (!MemOpChains.
empty())
4486 for (
auto &Reg : RegsToPass) {
4487 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
4514 for (
auto &Reg : RegsToPass)
4520 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
4521 assert(Mask &&
"Missing call preserved mask for calling convention");
4539 assert(Subtarget.
is64Bit() &&
"Medium code model requires LA64");
4543 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
4566 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
4569 for (
auto &VA : RVLocs) {
4589 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
4591 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
4595 Outs[i].Flags, CCInfo,
true,
true,
4622 for (
unsigned i = 0, e = RVLocs.
size(); i < e; ++i) {
4644bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
4645 bool ForCodeSize)
const {
4647 if (VT == MVT::f32 && !Subtarget.hasBasicF())
4649 if (VT == MVT::f64 && !Subtarget.hasBasicD())
4651 return (Imm.isZero() || Imm.isExactlyValue(+1.0));
4662bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
4665 return isa<LoadInst>(
I) || isa<StoreInst>(
I);
4667 if (isa<LoadInst>(
I))
4672 if (isa<StoreInst>(
I)) {
4673 unsigned Size =
I->getOperand(0)->getType()->getIntegerBitWidth();
4690 return Y.getValueType().isScalarInteger() && !isa<ConstantSDNode>(
Y);
4696 unsigned Intrinsic)
const {
4697 switch (Intrinsic) {
4700 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
4701 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
4702 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
4703 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
4705 Info.memVT = MVT::i32;
4706 Info.ptrVal =
I.getArgOperand(0);
4741 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
4743 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
4745 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
4747 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
4749 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
4751 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
4753 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
4755 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
4765 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
4767 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
4769 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
4771 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
4792 Value *FailureOrdering =
4796 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
4804 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
4828 unsigned GRLen = Subtarget.
getGRLen();
4857 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
4860 Builder.
CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
4887 const Constant *PersonalityFn)
const {
4888 return LoongArch::R4;
4892 const Constant *PersonalityFn)
const {
4893 return LoongArch::R5;
4901LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
4919 if (Constraint.
size() == 1) {
4920 switch (Constraint[0]) {
4935 if (Constraint ==
"ZC" || Constraint ==
"ZB")
4951std::pair<unsigned, const TargetRegisterClass *>
4952LoongArchTargetLowering::getRegForInlineAsmConstraint(
4956 if (Constraint.
size() == 1) {
4957 switch (Constraint[0]) {
4962 return std::make_pair(0U, &LoongArch::GPRRegClass);
4964 if (Subtarget.hasBasicF() && VT == MVT::f32)
4965 return std::make_pair(0U, &LoongArch::FPR32RegClass);
4966 if (Subtarget.hasBasicD() && VT == MVT::f64)
4967 return std::make_pair(0U, &LoongArch::FPR64RegClass);
4968 if (Subtarget.hasExtLSX() &&
4969 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
4970 return std::make_pair(0U, &LoongArch::LSX128RegClass);
4971 if (Subtarget.hasExtLASX() &&
4972 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
4973 return std::make_pair(0U, &LoongArch::LASX256RegClass);
4993 bool IsFP = Constraint[2] ==
'f';
4994 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
4995 std::pair<unsigned, const TargetRegisterClass *>
R;
4997 TRI, join_items(
"", Temp.first, Temp.second), VT);
5000 unsigned RegNo =
R.first;
5001 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
5002 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
5003 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
5004 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
5014void LoongArchTargetLowering::LowerAsmOperandForConstraint(
5018 if (Constraint.
size() == 1) {
5019 switch (Constraint[0]) {
5022 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
5024 if (isInt<16>(CVal))
5031 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
5033 if (isInt<12>(CVal))
5040 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
5041 if (
C->getZExtValue() == 0)
5047 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
5049 if (isUInt<12>(CVal))
5061#define GET_REGISTER_MATCHER
5062#include "LoongArchGenAsmMatcher.inc"
5068 std::string NewRegName =
Name.second.str();
5070 if (Reg == LoongArch::NoRegister)
5072 if (Reg == LoongArch::NoRegister)
5076 if (!ReservedRegs.
test(Reg))
5092 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
5093 const APInt &Imm = ConstNode->getAPIntValue();
5095 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
5096 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
5099 if (ConstNode->hasOneUse() &&
5100 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
5101 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
5107 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
5108 unsigned Shifts = Imm.countr_zero();
5114 APInt ImmPop = Imm.ashr(Shifts);
5115 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
5119 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
5120 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
5121 (ImmSmall - Imm).isPowerOf2())
5131 Type *Ty,
unsigned AS,
5147 !(isShiftedInt<14, 2>(AM.
BaseOffs) && Subtarget.hasUAL()))
5174 return isInt<12>(Imm);
5178 return isInt<12>(Imm);
5185 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
5186 EVT MemVT = LD->getMemoryVT();
5187 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
5198 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
5207 if (
Y.getValueType().isVector())
5210 return !isa<ConstantSDNode>(
Y);
5219 EVT Type,
bool IsSigned)
const {
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static bool isConstantOrUndef(const SDValue Op)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getMaxBytesForAlignment() const
Align getPrefFunctionAlignment() const
unsigned getGRLen() const
Align getPrefLoopAlignment() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool hasFeature(unsigned Feature) const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isVector() const
Return true if this is a vector value type.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
size_t use_size() const
Return the number of uses of this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
ABI getTargetABI(StringRef ABIName)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Align getNonZeroOrigAlign() const
Register getFrameRegister(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)