28#include "llvm/IR/IntrinsicsLoongArch.h"
37#define DEBUG_TYPE "loongarch-isel-lowering"
42 cl::desc(
"Trap on integer division by zero."),
54 if (Subtarget.hasBasicF())
56 if (Subtarget.hasBasicD())
60 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
62 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
64 if (Subtarget.hasExtLSX())
68 if (Subtarget.hasExtLASX())
69 for (
MVT VT : LASXVTs)
173 if (Subtarget.hasBasicF()) {
197 if (!Subtarget.hasBasicD()) {
208 if (Subtarget.hasBasicD()) {
237 if (Subtarget.hasExtLSX()) {
252 for (
MVT VT : LSXVTs) {
264 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
279 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
283 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
297 if (Subtarget.hasExtLASX()) {
298 for (
MVT VT : LASXVTs) {
310 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
325 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
329 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
349 if (Subtarget.hasExtLSX())
383 switch (
Op.getOpcode()) {
385 return lowerATOMIC_FENCE(
Op, DAG);
387 return lowerEH_DWARF_CFA(
Op, DAG);
389 return lowerGlobalAddress(
Op, DAG);
391 return lowerGlobalTLSAddress(
Op, DAG);
393 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
395 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
397 return lowerINTRINSIC_VOID(
Op, DAG);
399 return lowerBlockAddress(
Op, DAG);
401 return lowerJumpTable(
Op, DAG);
403 return lowerShiftLeftParts(
Op, DAG);
405 return lowerShiftRightParts(
Op, DAG,
true);
407 return lowerShiftRightParts(
Op, DAG,
false);
409 return lowerConstantPool(
Op, DAG);
411 return lowerFP_TO_SINT(
Op, DAG);
413 return lowerBITCAST(
Op, DAG);
415 return lowerUINT_TO_FP(
Op, DAG);
417 return lowerSINT_TO_FP(
Op, DAG);
419 return lowerVASTART(
Op, DAG);
421 return lowerFRAMEADDR(
Op, DAG);
423 return lowerRETURNADDR(
Op, DAG);
425 return lowerWRITE_REGISTER(
Op, DAG);
427 return lowerINSERT_VECTOR_ELT(
Op, DAG);
429 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
431 return lowerBUILD_VECTOR(
Op, DAG);
433 return lowerVECTOR_SHUFFLE(
Op, DAG);
447 if (isa<ConstantSDNode>(
Op))
449 if (isa<ConstantFPSDNode>(
Op))
464 EVT ResTy =
Op->getValueType(0);
466 APInt SplatValue, SplatUndef;
467 unsigned SplatBitSize;
472 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
473 (!Subtarget.hasExtLASX() || !Is256Vec))
476 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
478 SplatBitSize <= 64) {
480 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
486 switch (SplatBitSize) {
490 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
493 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
496 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
499 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
507 if (ViaVecTy != ResTy)
520 EVT ResTy =
Node->getValueType(0);
526 for (
unsigned i = 0; i < NumElts; ++i) {
538LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
540 EVT VecTy =
Op->getOperand(0)->getValueType(0);
545 if (isa<ConstantSDNode>(
Idx) &&
546 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
547 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
554LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
556 if (isa<ConstantSDNode>(
Op->getOperand(2)))
580 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
582 "On LA64, only 64-bit registers can be written.");
583 return Op.getOperand(0);
586 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
588 "On LA32, only 32-bit registers can be written.");
589 return Op.getOperand(0);
597 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
599 "be a constant integer");
606 EVT VT =
Op.getValueType();
609 unsigned Depth =
Op.getConstantOperandVal(0);
610 int GRLenInBytes = Subtarget.
getGRLen() / 8;
613 int Offset = -(GRLenInBytes * 2);
628 if (
Op.getConstantOperandVal(0) != 0) {
630 "return address can only be determined for the current frame");
664 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
672 !Subtarget.hasBasicD() &&
"unexpected target features");
677 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
678 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
688 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
692 EVT RetVT =
Op.getValueType();
694 MakeLibCallOptions CallOptions;
695 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
698 std::tie(Result, Chain) =
706 !Subtarget.hasBasicD() &&
"unexpected target features");
713 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
717 EVT RetVT =
Op.getValueType();
719 MakeLibCallOptions CallOptions;
720 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
723 std::tie(Result, Chain) =
734 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
735 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
747 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
748 !Subtarget.hasBasicD()) {
773 N->getOffset(), Flags);
781template <
class NodeTy>
784 bool IsLocal)
const {
795 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
847 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
853 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
859 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
866 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
870 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
871 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
880 unsigned Opc,
bool UseGOT,
931 Args.push_back(Entry);
963LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
970 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
973 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
987 return getDynamicTLSAddr(
N, DAG,
988 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
989 : LoongArch::PseudoLA_TLS_GD,
996 return getDynamicTLSAddr(
N, DAG,
997 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
998 : LoongArch::PseudoLA_TLS_LD,
1003 return getStaticTLSAddr(
N, DAG,
1004 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1005 : LoongArch::PseudoLA_TLS_IE,
1012 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1016 return getTLSDescAddr(
N, DAG,
1017 Large ? LoongArch::PseudoLA_TLS_DESC_PC_LARGE
1018 : LoongArch::PseudoLA_TLS_DESC_PC,
1022template <
unsigned N>
1025 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
1027 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
1028 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
1030 ": argument out of range.");
1037LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
1040 switch (
Op.getConstantOperandVal(0)) {
1043 case Intrinsic::thread_pointer: {
1047 case Intrinsic::loongarch_lsx_vpickve2gr_d:
1048 case Intrinsic::loongarch_lsx_vpickve2gr_du:
1049 case Intrinsic::loongarch_lsx_vreplvei_d:
1050 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
1051 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
1052 case Intrinsic::loongarch_lsx_vreplvei_w:
1053 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
1054 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
1055 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
1056 case Intrinsic::loongarch_lasx_xvpickve_d:
1057 case Intrinsic::loongarch_lasx_xvpickve_d_f:
1058 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
1059 case Intrinsic::loongarch_lasx_xvinsve0_d:
1060 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
1061 case Intrinsic::loongarch_lsx_vsat_b:
1062 case Intrinsic::loongarch_lsx_vsat_bu:
1063 case Intrinsic::loongarch_lsx_vrotri_b:
1064 case Intrinsic::loongarch_lsx_vsllwil_h_b:
1065 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
1066 case Intrinsic::loongarch_lsx_vsrlri_b:
1067 case Intrinsic::loongarch_lsx_vsrari_b:
1068 case Intrinsic::loongarch_lsx_vreplvei_h:
1069 case Intrinsic::loongarch_lasx_xvsat_b:
1070 case Intrinsic::loongarch_lasx_xvsat_bu:
1071 case Intrinsic::loongarch_lasx_xvrotri_b:
1072 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
1073 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
1074 case Intrinsic::loongarch_lasx_xvsrlri_b:
1075 case Intrinsic::loongarch_lasx_xvsrari_b:
1076 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
1077 case Intrinsic::loongarch_lasx_xvpickve_w:
1078 case Intrinsic::loongarch_lasx_xvpickve_w_f:
1079 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
1080 case Intrinsic::loongarch_lasx_xvinsve0_w:
1081 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
1082 case Intrinsic::loongarch_lsx_vsat_h:
1083 case Intrinsic::loongarch_lsx_vsat_hu:
1084 case Intrinsic::loongarch_lsx_vrotri_h:
1085 case Intrinsic::loongarch_lsx_vsllwil_w_h:
1086 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
1087 case Intrinsic::loongarch_lsx_vsrlri_h:
1088 case Intrinsic::loongarch_lsx_vsrari_h:
1089 case Intrinsic::loongarch_lsx_vreplvei_b:
1090 case Intrinsic::loongarch_lasx_xvsat_h:
1091 case Intrinsic::loongarch_lasx_xvsat_hu:
1092 case Intrinsic::loongarch_lasx_xvrotri_h:
1093 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
1094 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
1095 case Intrinsic::loongarch_lasx_xvsrlri_h:
1096 case Intrinsic::loongarch_lasx_xvsrari_h:
1097 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
1098 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
1099 case Intrinsic::loongarch_lsx_vsrlni_b_h:
1100 case Intrinsic::loongarch_lsx_vsrani_b_h:
1101 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
1102 case Intrinsic::loongarch_lsx_vsrarni_b_h:
1103 case Intrinsic::loongarch_lsx_vssrlni_b_h:
1104 case Intrinsic::loongarch_lsx_vssrani_b_h:
1105 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
1106 case Intrinsic::loongarch_lsx_vssrani_bu_h:
1107 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
1108 case Intrinsic::loongarch_lsx_vssrarni_b_h:
1109 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
1110 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
1111 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
1112 case Intrinsic::loongarch_lasx_xvsrani_b_h:
1113 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
1114 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
1115 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
1116 case Intrinsic::loongarch_lasx_xvssrani_b_h:
1117 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
1118 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
1119 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
1120 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
1121 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
1122 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
1123 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
1124 case Intrinsic::loongarch_lsx_vsat_w:
1125 case Intrinsic::loongarch_lsx_vsat_wu:
1126 case Intrinsic::loongarch_lsx_vrotri_w:
1127 case Intrinsic::loongarch_lsx_vsllwil_d_w:
1128 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
1129 case Intrinsic::loongarch_lsx_vsrlri_w:
1130 case Intrinsic::loongarch_lsx_vsrari_w:
1131 case Intrinsic::loongarch_lsx_vslei_bu:
1132 case Intrinsic::loongarch_lsx_vslei_hu:
1133 case Intrinsic::loongarch_lsx_vslei_wu:
1134 case Intrinsic::loongarch_lsx_vslei_du:
1135 case Intrinsic::loongarch_lsx_vslti_bu:
1136 case Intrinsic::loongarch_lsx_vslti_hu:
1137 case Intrinsic::loongarch_lsx_vslti_wu:
1138 case Intrinsic::loongarch_lsx_vslti_du:
1139 case Intrinsic::loongarch_lsx_vbsll_v:
1140 case Intrinsic::loongarch_lsx_vbsrl_v:
1141 case Intrinsic::loongarch_lasx_xvsat_w:
1142 case Intrinsic::loongarch_lasx_xvsat_wu:
1143 case Intrinsic::loongarch_lasx_xvrotri_w:
1144 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
1145 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
1146 case Intrinsic::loongarch_lasx_xvsrlri_w:
1147 case Intrinsic::loongarch_lasx_xvsrari_w:
1148 case Intrinsic::loongarch_lasx_xvslei_bu:
1149 case Intrinsic::loongarch_lasx_xvslei_hu:
1150 case Intrinsic::loongarch_lasx_xvslei_wu:
1151 case Intrinsic::loongarch_lasx_xvslei_du:
1152 case Intrinsic::loongarch_lasx_xvslti_bu:
1153 case Intrinsic::loongarch_lasx_xvslti_hu:
1154 case Intrinsic::loongarch_lasx_xvslti_wu:
1155 case Intrinsic::loongarch_lasx_xvslti_du:
1156 case Intrinsic::loongarch_lasx_xvbsll_v:
1157 case Intrinsic::loongarch_lasx_xvbsrl_v:
1158 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
1159 case Intrinsic::loongarch_lsx_vseqi_b:
1160 case Intrinsic::loongarch_lsx_vseqi_h:
1161 case Intrinsic::loongarch_lsx_vseqi_w:
1162 case Intrinsic::loongarch_lsx_vseqi_d:
1163 case Intrinsic::loongarch_lsx_vslei_b:
1164 case Intrinsic::loongarch_lsx_vslei_h:
1165 case Intrinsic::loongarch_lsx_vslei_w:
1166 case Intrinsic::loongarch_lsx_vslei_d:
1167 case Intrinsic::loongarch_lsx_vslti_b:
1168 case Intrinsic::loongarch_lsx_vslti_h:
1169 case Intrinsic::loongarch_lsx_vslti_w:
1170 case Intrinsic::loongarch_lsx_vslti_d:
1171 case Intrinsic::loongarch_lasx_xvseqi_b:
1172 case Intrinsic::loongarch_lasx_xvseqi_h:
1173 case Intrinsic::loongarch_lasx_xvseqi_w:
1174 case Intrinsic::loongarch_lasx_xvseqi_d:
1175 case Intrinsic::loongarch_lasx_xvslei_b:
1176 case Intrinsic::loongarch_lasx_xvslei_h:
1177 case Intrinsic::loongarch_lasx_xvslei_w:
1178 case Intrinsic::loongarch_lasx_xvslei_d:
1179 case Intrinsic::loongarch_lasx_xvslti_b:
1180 case Intrinsic::loongarch_lasx_xvslti_h:
1181 case Intrinsic::loongarch_lasx_xvslti_w:
1182 case Intrinsic::loongarch_lasx_xvslti_d:
1183 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
1184 case Intrinsic::loongarch_lsx_vsrlni_h_w:
1185 case Intrinsic::loongarch_lsx_vsrani_h_w:
1186 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
1187 case Intrinsic::loongarch_lsx_vsrarni_h_w:
1188 case Intrinsic::loongarch_lsx_vssrlni_h_w:
1189 case Intrinsic::loongarch_lsx_vssrani_h_w:
1190 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
1191 case Intrinsic::loongarch_lsx_vssrani_hu_w:
1192 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
1193 case Intrinsic::loongarch_lsx_vssrarni_h_w:
1194 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
1195 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
1196 case Intrinsic::loongarch_lsx_vfrstpi_b:
1197 case Intrinsic::loongarch_lsx_vfrstpi_h:
1198 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
1199 case Intrinsic::loongarch_lasx_xvsrani_h_w:
1200 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
1201 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
1202 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
1203 case Intrinsic::loongarch_lasx_xvssrani_h_w:
1204 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
1205 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
1206 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
1207 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
1208 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
1209 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
1210 case Intrinsic::loongarch_lasx_xvfrstpi_b:
1211 case Intrinsic::loongarch_lasx_xvfrstpi_h:
1212 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
1213 case Intrinsic::loongarch_lsx_vsat_d:
1214 case Intrinsic::loongarch_lsx_vsat_du:
1215 case Intrinsic::loongarch_lsx_vrotri_d:
1216 case Intrinsic::loongarch_lsx_vsrlri_d:
1217 case Intrinsic::loongarch_lsx_vsrari_d:
1218 case Intrinsic::loongarch_lasx_xvsat_d:
1219 case Intrinsic::loongarch_lasx_xvsat_du:
1220 case Intrinsic::loongarch_lasx_xvrotri_d:
1221 case Intrinsic::loongarch_lasx_xvsrlri_d:
1222 case Intrinsic::loongarch_lasx_xvsrari_d:
1223 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
1224 case Intrinsic::loongarch_lsx_vsrlni_w_d:
1225 case Intrinsic::loongarch_lsx_vsrani_w_d:
1226 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
1227 case Intrinsic::loongarch_lsx_vsrarni_w_d:
1228 case Intrinsic::loongarch_lsx_vssrlni_w_d:
1229 case Intrinsic::loongarch_lsx_vssrani_w_d:
1230 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
1231 case Intrinsic::loongarch_lsx_vssrani_wu_d:
1232 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
1233 case Intrinsic::loongarch_lsx_vssrarni_w_d:
1234 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
1235 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
1236 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
1237 case Intrinsic::loongarch_lasx_xvsrani_w_d:
1238 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
1239 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
1240 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
1241 case Intrinsic::loongarch_lasx_xvssrani_w_d:
1242 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
1243 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
1244 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
1245 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
1246 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
1247 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
1248 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
1249 case Intrinsic::loongarch_lsx_vsrlni_d_q:
1250 case Intrinsic::loongarch_lsx_vsrani_d_q:
1251 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
1252 case Intrinsic::loongarch_lsx_vsrarni_d_q:
1253 case Intrinsic::loongarch_lsx_vssrlni_d_q:
1254 case Intrinsic::loongarch_lsx_vssrani_d_q:
1255 case Intrinsic::loongarch_lsx_vssrlni_du_q:
1256 case Intrinsic::loongarch_lsx_vssrani_du_q:
1257 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
1258 case Intrinsic::loongarch_lsx_vssrarni_d_q:
1259 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
1260 case Intrinsic::loongarch_lsx_vssrarni_du_q:
1261 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
1262 case Intrinsic::loongarch_lasx_xvsrani_d_q:
1263 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
1264 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
1265 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
1266 case Intrinsic::loongarch_lasx_xvssrani_d_q:
1267 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
1268 case Intrinsic::loongarch_lasx_xvssrani_du_q:
1269 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
1270 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
1271 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
1272 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
1273 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
1274 case Intrinsic::loongarch_lsx_vnori_b:
1275 case Intrinsic::loongarch_lsx_vshuf4i_b:
1276 case Intrinsic::loongarch_lsx_vshuf4i_h:
1277 case Intrinsic::loongarch_lsx_vshuf4i_w:
1278 case Intrinsic::loongarch_lasx_xvnori_b:
1279 case Intrinsic::loongarch_lasx_xvshuf4i_b:
1280 case Intrinsic::loongarch_lasx_xvshuf4i_h:
1281 case Intrinsic::loongarch_lasx_xvshuf4i_w:
1282 case Intrinsic::loongarch_lasx_xvpermi_d:
1283 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
1284 case Intrinsic::loongarch_lsx_vshuf4i_d:
1285 case Intrinsic::loongarch_lsx_vpermi_w:
1286 case Intrinsic::loongarch_lsx_vbitseli_b:
1287 case Intrinsic::loongarch_lsx_vextrins_b:
1288 case Intrinsic::loongarch_lsx_vextrins_h:
1289 case Intrinsic::loongarch_lsx_vextrins_w:
1290 case Intrinsic::loongarch_lsx_vextrins_d:
1291 case Intrinsic::loongarch_lasx_xvshuf4i_d:
1292 case Intrinsic::loongarch_lasx_xvpermi_w:
1293 case Intrinsic::loongarch_lasx_xvpermi_q:
1294 case Intrinsic::loongarch_lasx_xvbitseli_b:
1295 case Intrinsic::loongarch_lasx_xvextrins_b:
1296 case Intrinsic::loongarch_lasx_xvextrins_h:
1297 case Intrinsic::loongarch_lasx_xvextrins_w:
1298 case Intrinsic::loongarch_lasx_xvextrins_d:
1299 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
1300 case Intrinsic::loongarch_lsx_vrepli_b:
1301 case Intrinsic::loongarch_lsx_vrepli_h:
1302 case Intrinsic::loongarch_lsx_vrepli_w:
1303 case Intrinsic::loongarch_lsx_vrepli_d:
1304 case Intrinsic::loongarch_lasx_xvrepli_b:
1305 case Intrinsic::loongarch_lasx_xvrepli_h:
1306 case Intrinsic::loongarch_lasx_xvrepli_w:
1307 case Intrinsic::loongarch_lasx_xvrepli_d:
1308 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
1309 case Intrinsic::loongarch_lsx_vldi:
1310 case Intrinsic::loongarch_lasx_xvldi:
1311 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
1326LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
1330 EVT VT =
Op.getValueType();
1332 const StringRef ErrorMsgOOR =
"argument out of range";
1333 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
1334 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
1336 switch (
Op.getConstantOperandVal(1)) {
1339 case Intrinsic::loongarch_crc_w_b_w:
1340 case Intrinsic::loongarch_crc_w_h_w:
1341 case Intrinsic::loongarch_crc_w_w_w:
1342 case Intrinsic::loongarch_crc_w_d_w:
1343 case Intrinsic::loongarch_crcc_w_b_w:
1344 case Intrinsic::loongarch_crcc_w_h_w:
1345 case Intrinsic::loongarch_crcc_w_w_w:
1346 case Intrinsic::loongarch_crcc_w_d_w:
1348 case Intrinsic::loongarch_csrrd_w:
1349 case Intrinsic::loongarch_csrrd_d: {
1350 unsigned Imm =
Op.getConstantOperandVal(2);
1351 return !isUInt<14>(Imm)
1356 case Intrinsic::loongarch_csrwr_w:
1357 case Intrinsic::loongarch_csrwr_d: {
1358 unsigned Imm =
Op.getConstantOperandVal(3);
1359 return !isUInt<14>(Imm)
1362 {Chain,
Op.getOperand(2),
1365 case Intrinsic::loongarch_csrxchg_w:
1366 case Intrinsic::loongarch_csrxchg_d: {
1367 unsigned Imm =
Op.getConstantOperandVal(4);
1368 return !isUInt<14>(Imm)
1371 {Chain,
Op.getOperand(2),
Op.getOperand(3),
1374 case Intrinsic::loongarch_iocsrrd_d: {
1379#define IOCSRRD_CASE(NAME, NODE) \
1380 case Intrinsic::loongarch_##NAME: { \
1381 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
1382 {Chain, Op.getOperand(2)}); \
1388 case Intrinsic::loongarch_cpucfg: {
1390 {Chain,
Op.getOperand(2)});
1392 case Intrinsic::loongarch_lddir_d: {
1393 unsigned Imm =
Op.getConstantOperandVal(3);
1394 return !isUInt<8>(Imm)
1398 case Intrinsic::loongarch_movfcsr2gr: {
1399 if (!Subtarget.hasBasicF())
1401 unsigned Imm =
Op.getConstantOperandVal(2);
1402 return !isUInt<2>(Imm)
1407 case Intrinsic::loongarch_lsx_vld:
1408 case Intrinsic::loongarch_lsx_vldrepl_b:
1409 case Intrinsic::loongarch_lasx_xvld:
1410 case Intrinsic::loongarch_lasx_xvldrepl_b:
1411 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1414 case Intrinsic::loongarch_lsx_vldrepl_h:
1415 case Intrinsic::loongarch_lasx_xvldrepl_h:
1416 return !isShiftedInt<11, 1>(
1417 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1419 Op,
"argument out of range or not a multiple of 2", DAG)
1421 case Intrinsic::loongarch_lsx_vldrepl_w:
1422 case Intrinsic::loongarch_lasx_xvldrepl_w:
1423 return !isShiftedInt<10, 2>(
1424 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1426 Op,
"argument out of range or not a multiple of 4", DAG)
1428 case Intrinsic::loongarch_lsx_vldrepl_d:
1429 case Intrinsic::loongarch_lasx_xvldrepl_d:
1430 return !isShiftedInt<9, 3>(
1431 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
1433 Op,
"argument out of range or not a multiple of 8", DAG)
1444 return Op.getOperand(0);
1452 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
1454 const StringRef ErrorMsgOOR =
"argument out of range";
1455 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
1456 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
1457 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
1459 switch (IntrinsicEnum) {
1463 case Intrinsic::loongarch_cacop_d:
1464 case Intrinsic::loongarch_cacop_w: {
1465 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
1467 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
1471 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
1472 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
1476 case Intrinsic::loongarch_dbar: {
1478 return !isUInt<15>(Imm)
1483 case Intrinsic::loongarch_ibar: {
1485 return !isUInt<15>(Imm)
1490 case Intrinsic::loongarch_break: {
1492 return !isUInt<15>(Imm)
1497 case Intrinsic::loongarch_movgr2fcsr: {
1498 if (!Subtarget.hasBasicF())
1501 return !isUInt<2>(Imm)
1508 case Intrinsic::loongarch_syscall: {
1510 return !isUInt<15>(Imm)
1515#define IOCSRWR_CASE(NAME, NODE) \
1516 case Intrinsic::loongarch_##NAME: { \
1517 SDValue Op3 = Op.getOperand(3); \
1518 return Subtarget.is64Bit() \
1519 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
1520 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
1521 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
1522 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
1529 case Intrinsic::loongarch_iocsrwr_d: {
1537#define ASRT_LE_GT_CASE(NAME) \
1538 case Intrinsic::loongarch_##NAME: { \
1539 return !Subtarget.is64Bit() \
1540 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
1545#undef ASRT_LE_GT_CASE
1546 case Intrinsic::loongarch_ldpte_d: {
1547 unsigned Imm =
Op.getConstantOperandVal(3);
1553 case Intrinsic::loongarch_lsx_vst:
1554 case Intrinsic::loongarch_lasx_xvst:
1555 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
1558 case Intrinsic::loongarch_lasx_xvstelm_b:
1559 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1560 !isUInt<5>(
Op.getConstantOperandVal(5)))
1563 case Intrinsic::loongarch_lsx_vstelm_b:
1564 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1565 !isUInt<4>(
Op.getConstantOperandVal(5)))
1568 case Intrinsic::loongarch_lasx_xvstelm_h:
1569 return (!isShiftedInt<8, 1>(
1570 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1571 !isUInt<4>(
Op.getConstantOperandVal(5)))
1573 Op,
"argument out of range or not a multiple of 2", DAG)
1575 case Intrinsic::loongarch_lsx_vstelm_h:
1576 return (!isShiftedInt<8, 1>(
1577 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1578 !isUInt<3>(
Op.getConstantOperandVal(5)))
1580 Op,
"argument out of range or not a multiple of 2", DAG)
1582 case Intrinsic::loongarch_lasx_xvstelm_w:
1583 return (!isShiftedInt<8, 2>(
1584 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1585 !isUInt<3>(
Op.getConstantOperandVal(5)))
1587 Op,
"argument out of range or not a multiple of 4", DAG)
1589 case Intrinsic::loongarch_lsx_vstelm_w:
1590 return (!isShiftedInt<8, 2>(
1591 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1592 !isUInt<2>(
Op.getConstantOperandVal(5)))
1594 Op,
"argument out of range or not a multiple of 4", DAG)
1596 case Intrinsic::loongarch_lasx_xvstelm_d:
1597 return (!isShiftedInt<8, 3>(
1598 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1599 !isUInt<2>(
Op.getConstantOperandVal(5)))
1601 Op,
"argument out of range or not a multiple of 8", DAG)
1603 case Intrinsic::loongarch_lsx_vstelm_d:
1604 return (!isShiftedInt<8, 3>(
1605 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
1606 !isUInt<1>(
Op.getConstantOperandVal(5)))
1608 Op,
"argument out of range or not a multiple of 8", DAG)
1619 EVT VT =
Lo.getValueType();
1659 EVT VT =
Lo.getValueType();
1746 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
1747 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
1751 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
1757 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
1784 StringRef ErrorMsg,
bool WithChain =
true) {
1789 Results.push_back(
N->getOperand(0));
1792template <
unsigned N>
1797 const StringRef ErrorMsgOOR =
"argument out of range";
1798 unsigned Imm =
Node->getConstantOperandVal(2);
1799 if (!isUInt<N>(Imm)) {
1832 switch (
N->getConstantOperandVal(0)) {
1835 case Intrinsic::loongarch_lsx_vpickve2gr_b:
1836 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
1839 case Intrinsic::loongarch_lsx_vpickve2gr_h:
1840 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
1841 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
1844 case Intrinsic::loongarch_lsx_vpickve2gr_w:
1845 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
1848 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
1849 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
1852 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
1853 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
1854 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
1857 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
1858 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
1861 case Intrinsic::loongarch_lsx_bz_b:
1862 case Intrinsic::loongarch_lsx_bz_h:
1863 case Intrinsic::loongarch_lsx_bz_w:
1864 case Intrinsic::loongarch_lsx_bz_d:
1865 case Intrinsic::loongarch_lasx_xbz_b:
1866 case Intrinsic::loongarch_lasx_xbz_h:
1867 case Intrinsic::loongarch_lasx_xbz_w:
1868 case Intrinsic::loongarch_lasx_xbz_d:
1872 case Intrinsic::loongarch_lsx_bz_v:
1873 case Intrinsic::loongarch_lasx_xbz_v:
1877 case Intrinsic::loongarch_lsx_bnz_b:
1878 case Intrinsic::loongarch_lsx_bnz_h:
1879 case Intrinsic::loongarch_lsx_bnz_w:
1880 case Intrinsic::loongarch_lsx_bnz_d:
1881 case Intrinsic::loongarch_lasx_xbnz_b:
1882 case Intrinsic::loongarch_lasx_xbnz_h:
1883 case Intrinsic::loongarch_lasx_xbnz_w:
1884 case Intrinsic::loongarch_lasx_xbnz_d:
1888 case Intrinsic::loongarch_lsx_bnz_v:
1889 case Intrinsic::loongarch_lasx_xbnz_v:
1899 EVT VT =
N->getValueType(0);
1900 switch (
N->getOpcode()) {
1906 "Unexpected custom legalisation");
1912 "Unexpected custom legalisation");
1919 "Unexpected custom legalisation");
1928 "Unexpected custom legalisation");
1933 "Unexpected custom legalisation");
1947 EVT OpVT = Src.getValueType();
1951 std::tie(Result, Chain) =
1958 EVT SrcVT = Src.getValueType();
1959 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
1960 Subtarget.hasBasicF()) {
1969 "Unexpected custom legalisation");
1972 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
1978 assert((VT == MVT::i16 || VT == MVT::i32) &&
1979 "Unexpected custom legalization");
2000 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
2001 "Unexpected custom legalization");
2021 "Unexpected custom legalisation");
2029 const StringRef ErrorMsgOOR =
"argument out of range";
2030 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2031 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2033 switch (
N->getConstantOperandVal(1)) {
2036 case Intrinsic::loongarch_movfcsr2gr: {
2037 if (!Subtarget.hasBasicF()) {
2042 if (!isUInt<2>(Imm)) {
2054#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
2055 case Intrinsic::loongarch_##NAME: { \
2056 SDValue NODE = DAG.getNode( \
2057 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2058 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2059 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2060 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2061 Results.push_back(NODE.getValue(1)); \
2070#undef CRC_CASE_EXT_BINARYOP
2072#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
2073 case Intrinsic::loongarch_##NAME: { \
2074 SDValue NODE = DAG.getNode( \
2075 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2077 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2078 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2079 Results.push_back(NODE.getValue(1)); \
2084#undef CRC_CASE_EXT_UNARYOP
2085#define CSR_CASE(ID) \
2086 case Intrinsic::loongarch_##ID: { \
2087 if (!Subtarget.is64Bit()) \
2088 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
2096 case Intrinsic::loongarch_csrrd_w: {
2098 if (!isUInt<14>(Imm)) {
2110 case Intrinsic::loongarch_csrwr_w: {
2111 unsigned Imm =
N->getConstantOperandVal(3);
2112 if (!isUInt<14>(Imm)) {
2125 case Intrinsic::loongarch_csrxchg_w: {
2126 unsigned Imm =
N->getConstantOperandVal(4);
2127 if (!isUInt<14>(Imm)) {
2141#define IOCSRRD_CASE(NAME, NODE) \
2142 case Intrinsic::loongarch_##NAME: { \
2143 SDValue IOCSRRDResults = \
2144 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2145 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
2146 Results.push_back( \
2147 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
2148 Results.push_back(IOCSRRDResults.getValue(1)); \
2155 case Intrinsic::loongarch_cpucfg: {
2164 case Intrinsic::loongarch_lddir_d: {
2177 "On LA64, only 64-bit registers can be read.");
2180 "On LA32, only 32-bit registers can be read.");
2182 Results.push_back(
N->getOperand(0));
2198 SDValue FirstOperand =
N->getOperand(0);
2199 SDValue SecondOperand =
N->getOperand(1);
2200 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
2201 EVT ValTy =
N->getValueType(0);
2204 unsigned SMIdx, SMLen;
2210 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
2221 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
2262 NewOperand = FirstOperand;
2265 msb = lsb + SMLen - 1;
2269 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
2290 SDValue FirstOperand =
N->getOperand(0);
2292 EVT ValTy =
N->getValueType(0);
2295 unsigned MaskIdx, MaskLen;
2301 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
2306 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
2310 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
2323 EVT ValTy =
N->getValueType(0);
2324 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
2328 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
2330 bool SwapAndRetried =
false;
2335 if (ValBits != 32 && ValBits != 64)
2345 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2348 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2350 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
2351 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2353 (MaskIdx0 + MaskLen0 <= ValBits)) {
2367 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2370 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2372 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2374 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
2375 (MaskIdx0 + MaskLen0 <= ValBits)) {
2390 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2392 (MaskIdx0 + MaskLen0 <= 64) &&
2393 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
2400 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
2401 : (MaskIdx0 + MaskLen0 - 1),
2413 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2415 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2417 (MaskIdx0 + MaskLen0 <= ValBits)) {
2432 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
2434 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
2440 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
2441 : (MaskIdx0 + MaskLen0 - 1),
2456 unsigned MaskIdx, MaskLen;
2457 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
2458 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2460 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2482 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2484 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
2485 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
2498 if (!SwapAndRetried) {
2500 SwapAndRetried =
true;
2504 SwapAndRetried =
false;
2516 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
2530 if (!SwapAndRetried) {
2532 SwapAndRetried =
true;
2554template <
unsigned N>
2558 bool IsSigned =
false) {
2560 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
2562 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2563 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2565 ": argument out of range.");
2571template <
unsigned N>
2575 EVT ResTy =
Node->getValueType(0);
2576 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
2579 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2580 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2582 ": argument out of range.");
2587 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
2593 EVT ResTy =
Node->getValueType(0);
2601 EVT ResTy =
Node->getValueType(0);
2610template <
unsigned N>
2613 EVT ResTy =
Node->getValueType(0);
2614 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2616 if (!isUInt<N>(CImm->getZExtValue())) {
2618 ": argument out of range.");
2628template <
unsigned N>
2631 EVT ResTy =
Node->getValueType(0);
2632 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2634 if (!isUInt<N>(CImm->getZExtValue())) {
2636 ": argument out of range.");
2645template <
unsigned N>
2648 EVT ResTy =
Node->getValueType(0);
2649 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
2651 if (!isUInt<N>(CImm->getZExtValue())) {
2653 ": argument out of range.");
2667 switch (
N->getConstantOperandVal(0)) {
2670 case Intrinsic::loongarch_lsx_vadd_b:
2671 case Intrinsic::loongarch_lsx_vadd_h:
2672 case Intrinsic::loongarch_lsx_vadd_w:
2673 case Intrinsic::loongarch_lsx_vadd_d:
2674 case Intrinsic::loongarch_lasx_xvadd_b:
2675 case Intrinsic::loongarch_lasx_xvadd_h:
2676 case Intrinsic::loongarch_lasx_xvadd_w:
2677 case Intrinsic::loongarch_lasx_xvadd_d:
2680 case Intrinsic::loongarch_lsx_vaddi_bu:
2681 case Intrinsic::loongarch_lsx_vaddi_hu:
2682 case Intrinsic::loongarch_lsx_vaddi_wu:
2683 case Intrinsic::loongarch_lsx_vaddi_du:
2684 case Intrinsic::loongarch_lasx_xvaddi_bu:
2685 case Intrinsic::loongarch_lasx_xvaddi_hu:
2686 case Intrinsic::loongarch_lasx_xvaddi_wu:
2687 case Intrinsic::loongarch_lasx_xvaddi_du:
2689 lowerVectorSplatImm<5>(
N, 2, DAG));
2690 case Intrinsic::loongarch_lsx_vsub_b:
2691 case Intrinsic::loongarch_lsx_vsub_h:
2692 case Intrinsic::loongarch_lsx_vsub_w:
2693 case Intrinsic::loongarch_lsx_vsub_d:
2694 case Intrinsic::loongarch_lasx_xvsub_b:
2695 case Intrinsic::loongarch_lasx_xvsub_h:
2696 case Intrinsic::loongarch_lasx_xvsub_w:
2697 case Intrinsic::loongarch_lasx_xvsub_d:
2700 case Intrinsic::loongarch_lsx_vsubi_bu:
2701 case Intrinsic::loongarch_lsx_vsubi_hu:
2702 case Intrinsic::loongarch_lsx_vsubi_wu:
2703 case Intrinsic::loongarch_lsx_vsubi_du:
2704 case Intrinsic::loongarch_lasx_xvsubi_bu:
2705 case Intrinsic::loongarch_lasx_xvsubi_hu:
2706 case Intrinsic::loongarch_lasx_xvsubi_wu:
2707 case Intrinsic::loongarch_lasx_xvsubi_du:
2709 lowerVectorSplatImm<5>(
N, 2, DAG));
2710 case Intrinsic::loongarch_lsx_vneg_b:
2711 case Intrinsic::loongarch_lsx_vneg_h:
2712 case Intrinsic::loongarch_lsx_vneg_w:
2713 case Intrinsic::loongarch_lsx_vneg_d:
2714 case Intrinsic::loongarch_lasx_xvneg_b:
2715 case Intrinsic::loongarch_lasx_xvneg_h:
2716 case Intrinsic::loongarch_lasx_xvneg_w:
2717 case Intrinsic::loongarch_lasx_xvneg_d:
2721 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
2723 SDLoc(
N),
N->getValueType(0)),
2725 case Intrinsic::loongarch_lsx_vmax_b:
2726 case Intrinsic::loongarch_lsx_vmax_h:
2727 case Intrinsic::loongarch_lsx_vmax_w:
2728 case Intrinsic::loongarch_lsx_vmax_d:
2729 case Intrinsic::loongarch_lasx_xvmax_b:
2730 case Intrinsic::loongarch_lasx_xvmax_h:
2731 case Intrinsic::loongarch_lasx_xvmax_w:
2732 case Intrinsic::loongarch_lasx_xvmax_d:
2735 case Intrinsic::loongarch_lsx_vmax_bu:
2736 case Intrinsic::loongarch_lsx_vmax_hu:
2737 case Intrinsic::loongarch_lsx_vmax_wu:
2738 case Intrinsic::loongarch_lsx_vmax_du:
2739 case Intrinsic::loongarch_lasx_xvmax_bu:
2740 case Intrinsic::loongarch_lasx_xvmax_hu:
2741 case Intrinsic::loongarch_lasx_xvmax_wu:
2742 case Intrinsic::loongarch_lasx_xvmax_du:
2745 case Intrinsic::loongarch_lsx_vmaxi_b:
2746 case Intrinsic::loongarch_lsx_vmaxi_h:
2747 case Intrinsic::loongarch_lsx_vmaxi_w:
2748 case Intrinsic::loongarch_lsx_vmaxi_d:
2749 case Intrinsic::loongarch_lasx_xvmaxi_b:
2750 case Intrinsic::loongarch_lasx_xvmaxi_h:
2751 case Intrinsic::loongarch_lasx_xvmaxi_w:
2752 case Intrinsic::loongarch_lasx_xvmaxi_d:
2754 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
2755 case Intrinsic::loongarch_lsx_vmaxi_bu:
2756 case Intrinsic::loongarch_lsx_vmaxi_hu:
2757 case Intrinsic::loongarch_lsx_vmaxi_wu:
2758 case Intrinsic::loongarch_lsx_vmaxi_du:
2759 case Intrinsic::loongarch_lasx_xvmaxi_bu:
2760 case Intrinsic::loongarch_lasx_xvmaxi_hu:
2761 case Intrinsic::loongarch_lasx_xvmaxi_wu:
2762 case Intrinsic::loongarch_lasx_xvmaxi_du:
2764 lowerVectorSplatImm<5>(
N, 2, DAG));
2765 case Intrinsic::loongarch_lsx_vmin_b:
2766 case Intrinsic::loongarch_lsx_vmin_h:
2767 case Intrinsic::loongarch_lsx_vmin_w:
2768 case Intrinsic::loongarch_lsx_vmin_d:
2769 case Intrinsic::loongarch_lasx_xvmin_b:
2770 case Intrinsic::loongarch_lasx_xvmin_h:
2771 case Intrinsic::loongarch_lasx_xvmin_w:
2772 case Intrinsic::loongarch_lasx_xvmin_d:
2775 case Intrinsic::loongarch_lsx_vmin_bu:
2776 case Intrinsic::loongarch_lsx_vmin_hu:
2777 case Intrinsic::loongarch_lsx_vmin_wu:
2778 case Intrinsic::loongarch_lsx_vmin_du:
2779 case Intrinsic::loongarch_lasx_xvmin_bu:
2780 case Intrinsic::loongarch_lasx_xvmin_hu:
2781 case Intrinsic::loongarch_lasx_xvmin_wu:
2782 case Intrinsic::loongarch_lasx_xvmin_du:
2785 case Intrinsic::loongarch_lsx_vmini_b:
2786 case Intrinsic::loongarch_lsx_vmini_h:
2787 case Intrinsic::loongarch_lsx_vmini_w:
2788 case Intrinsic::loongarch_lsx_vmini_d:
2789 case Intrinsic::loongarch_lasx_xvmini_b:
2790 case Intrinsic::loongarch_lasx_xvmini_h:
2791 case Intrinsic::loongarch_lasx_xvmini_w:
2792 case Intrinsic::loongarch_lasx_xvmini_d:
2794 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
2795 case Intrinsic::loongarch_lsx_vmini_bu:
2796 case Intrinsic::loongarch_lsx_vmini_hu:
2797 case Intrinsic::loongarch_lsx_vmini_wu:
2798 case Intrinsic::loongarch_lsx_vmini_du:
2799 case Intrinsic::loongarch_lasx_xvmini_bu:
2800 case Intrinsic::loongarch_lasx_xvmini_hu:
2801 case Intrinsic::loongarch_lasx_xvmini_wu:
2802 case Intrinsic::loongarch_lasx_xvmini_du:
2804 lowerVectorSplatImm<5>(
N, 2, DAG));
2805 case Intrinsic::loongarch_lsx_vmul_b:
2806 case Intrinsic::loongarch_lsx_vmul_h:
2807 case Intrinsic::loongarch_lsx_vmul_w:
2808 case Intrinsic::loongarch_lsx_vmul_d:
2809 case Intrinsic::loongarch_lasx_xvmul_b:
2810 case Intrinsic::loongarch_lasx_xvmul_h:
2811 case Intrinsic::loongarch_lasx_xvmul_w:
2812 case Intrinsic::loongarch_lasx_xvmul_d:
2815 case Intrinsic::loongarch_lsx_vmadd_b:
2816 case Intrinsic::loongarch_lsx_vmadd_h:
2817 case Intrinsic::loongarch_lsx_vmadd_w:
2818 case Intrinsic::loongarch_lsx_vmadd_d:
2819 case Intrinsic::loongarch_lasx_xvmadd_b:
2820 case Intrinsic::loongarch_lasx_xvmadd_h:
2821 case Intrinsic::loongarch_lasx_xvmadd_w:
2822 case Intrinsic::loongarch_lasx_xvmadd_d: {
2823 EVT ResTy =
N->getValueType(0);
2828 case Intrinsic::loongarch_lsx_vmsub_b:
2829 case Intrinsic::loongarch_lsx_vmsub_h:
2830 case Intrinsic::loongarch_lsx_vmsub_w:
2831 case Intrinsic::loongarch_lsx_vmsub_d:
2832 case Intrinsic::loongarch_lasx_xvmsub_b:
2833 case Intrinsic::loongarch_lasx_xvmsub_h:
2834 case Intrinsic::loongarch_lasx_xvmsub_w:
2835 case Intrinsic::loongarch_lasx_xvmsub_d: {
2836 EVT ResTy =
N->getValueType(0);
2841 case Intrinsic::loongarch_lsx_vdiv_b:
2842 case Intrinsic::loongarch_lsx_vdiv_h:
2843 case Intrinsic::loongarch_lsx_vdiv_w:
2844 case Intrinsic::loongarch_lsx_vdiv_d:
2845 case Intrinsic::loongarch_lasx_xvdiv_b:
2846 case Intrinsic::loongarch_lasx_xvdiv_h:
2847 case Intrinsic::loongarch_lasx_xvdiv_w:
2848 case Intrinsic::loongarch_lasx_xvdiv_d:
2851 case Intrinsic::loongarch_lsx_vdiv_bu:
2852 case Intrinsic::loongarch_lsx_vdiv_hu:
2853 case Intrinsic::loongarch_lsx_vdiv_wu:
2854 case Intrinsic::loongarch_lsx_vdiv_du:
2855 case Intrinsic::loongarch_lasx_xvdiv_bu:
2856 case Intrinsic::loongarch_lasx_xvdiv_hu:
2857 case Intrinsic::loongarch_lasx_xvdiv_wu:
2858 case Intrinsic::loongarch_lasx_xvdiv_du:
2861 case Intrinsic::loongarch_lsx_vmod_b:
2862 case Intrinsic::loongarch_lsx_vmod_h:
2863 case Intrinsic::loongarch_lsx_vmod_w:
2864 case Intrinsic::loongarch_lsx_vmod_d:
2865 case Intrinsic::loongarch_lasx_xvmod_b:
2866 case Intrinsic::loongarch_lasx_xvmod_h:
2867 case Intrinsic::loongarch_lasx_xvmod_w:
2868 case Intrinsic::loongarch_lasx_xvmod_d:
2871 case Intrinsic::loongarch_lsx_vmod_bu:
2872 case Intrinsic::loongarch_lsx_vmod_hu:
2873 case Intrinsic::loongarch_lsx_vmod_wu:
2874 case Intrinsic::loongarch_lsx_vmod_du:
2875 case Intrinsic::loongarch_lasx_xvmod_bu:
2876 case Intrinsic::loongarch_lasx_xvmod_hu:
2877 case Intrinsic::loongarch_lasx_xvmod_wu:
2878 case Intrinsic::loongarch_lasx_xvmod_du:
2881 case Intrinsic::loongarch_lsx_vand_v:
2882 case Intrinsic::loongarch_lasx_xvand_v:
2885 case Intrinsic::loongarch_lsx_vor_v:
2886 case Intrinsic::loongarch_lasx_xvor_v:
2889 case Intrinsic::loongarch_lsx_vxor_v:
2890 case Intrinsic::loongarch_lasx_xvxor_v:
2893 case Intrinsic::loongarch_lsx_vnor_v:
2894 case Intrinsic::loongarch_lasx_xvnor_v: {
2899 case Intrinsic::loongarch_lsx_vandi_b:
2900 case Intrinsic::loongarch_lasx_xvandi_b:
2902 lowerVectorSplatImm<8>(
N, 2, DAG));
2903 case Intrinsic::loongarch_lsx_vori_b:
2904 case Intrinsic::loongarch_lasx_xvori_b:
2906 lowerVectorSplatImm<8>(
N, 2, DAG));
2907 case Intrinsic::loongarch_lsx_vxori_b:
2908 case Intrinsic::loongarch_lasx_xvxori_b:
2910 lowerVectorSplatImm<8>(
N, 2, DAG));
2911 case Intrinsic::loongarch_lsx_vsll_b:
2912 case Intrinsic::loongarch_lsx_vsll_h:
2913 case Intrinsic::loongarch_lsx_vsll_w:
2914 case Intrinsic::loongarch_lsx_vsll_d:
2915 case Intrinsic::loongarch_lasx_xvsll_b:
2916 case Intrinsic::loongarch_lasx_xvsll_h:
2917 case Intrinsic::loongarch_lasx_xvsll_w:
2918 case Intrinsic::loongarch_lasx_xvsll_d:
2921 case Intrinsic::loongarch_lsx_vslli_b:
2922 case Intrinsic::loongarch_lasx_xvslli_b:
2924 lowerVectorSplatImm<3>(
N, 2, DAG));
2925 case Intrinsic::loongarch_lsx_vslli_h:
2926 case Intrinsic::loongarch_lasx_xvslli_h:
2928 lowerVectorSplatImm<4>(
N, 2, DAG));
2929 case Intrinsic::loongarch_lsx_vslli_w:
2930 case Intrinsic::loongarch_lasx_xvslli_w:
2932 lowerVectorSplatImm<5>(
N, 2, DAG));
2933 case Intrinsic::loongarch_lsx_vslli_d:
2934 case Intrinsic::loongarch_lasx_xvslli_d:
2936 lowerVectorSplatImm<6>(
N, 2, DAG));
2937 case Intrinsic::loongarch_lsx_vsrl_b:
2938 case Intrinsic::loongarch_lsx_vsrl_h:
2939 case Intrinsic::loongarch_lsx_vsrl_w:
2940 case Intrinsic::loongarch_lsx_vsrl_d:
2941 case Intrinsic::loongarch_lasx_xvsrl_b:
2942 case Intrinsic::loongarch_lasx_xvsrl_h:
2943 case Intrinsic::loongarch_lasx_xvsrl_w:
2944 case Intrinsic::loongarch_lasx_xvsrl_d:
2947 case Intrinsic::loongarch_lsx_vsrli_b:
2948 case Intrinsic::loongarch_lasx_xvsrli_b:
2950 lowerVectorSplatImm<3>(
N, 2, DAG));
2951 case Intrinsic::loongarch_lsx_vsrli_h:
2952 case Intrinsic::loongarch_lasx_xvsrli_h:
2954 lowerVectorSplatImm<4>(
N, 2, DAG));
2955 case Intrinsic::loongarch_lsx_vsrli_w:
2956 case Intrinsic::loongarch_lasx_xvsrli_w:
2958 lowerVectorSplatImm<5>(
N, 2, DAG));
2959 case Intrinsic::loongarch_lsx_vsrli_d:
2960 case Intrinsic::loongarch_lasx_xvsrli_d:
2962 lowerVectorSplatImm<6>(
N, 2, DAG));
2963 case Intrinsic::loongarch_lsx_vsra_b:
2964 case Intrinsic::loongarch_lsx_vsra_h:
2965 case Intrinsic::loongarch_lsx_vsra_w:
2966 case Intrinsic::loongarch_lsx_vsra_d:
2967 case Intrinsic::loongarch_lasx_xvsra_b:
2968 case Intrinsic::loongarch_lasx_xvsra_h:
2969 case Intrinsic::loongarch_lasx_xvsra_w:
2970 case Intrinsic::loongarch_lasx_xvsra_d:
2973 case Intrinsic::loongarch_lsx_vsrai_b:
2974 case Intrinsic::loongarch_lasx_xvsrai_b:
2976 lowerVectorSplatImm<3>(
N, 2, DAG));
2977 case Intrinsic::loongarch_lsx_vsrai_h:
2978 case Intrinsic::loongarch_lasx_xvsrai_h:
2980 lowerVectorSplatImm<4>(
N, 2, DAG));
2981 case Intrinsic::loongarch_lsx_vsrai_w:
2982 case Intrinsic::loongarch_lasx_xvsrai_w:
2984 lowerVectorSplatImm<5>(
N, 2, DAG));
2985 case Intrinsic::loongarch_lsx_vsrai_d:
2986 case Intrinsic::loongarch_lasx_xvsrai_d:
2988 lowerVectorSplatImm<6>(
N, 2, DAG));
2989 case Intrinsic::loongarch_lsx_vclz_b:
2990 case Intrinsic::loongarch_lsx_vclz_h:
2991 case Intrinsic::loongarch_lsx_vclz_w:
2992 case Intrinsic::loongarch_lsx_vclz_d:
2993 case Intrinsic::loongarch_lasx_xvclz_b:
2994 case Intrinsic::loongarch_lasx_xvclz_h:
2995 case Intrinsic::loongarch_lasx_xvclz_w:
2996 case Intrinsic::loongarch_lasx_xvclz_d:
2998 case Intrinsic::loongarch_lsx_vpcnt_b:
2999 case Intrinsic::loongarch_lsx_vpcnt_h:
3000 case Intrinsic::loongarch_lsx_vpcnt_w:
3001 case Intrinsic::loongarch_lsx_vpcnt_d:
3002 case Intrinsic::loongarch_lasx_xvpcnt_b:
3003 case Intrinsic::loongarch_lasx_xvpcnt_h:
3004 case Intrinsic::loongarch_lasx_xvpcnt_w:
3005 case Intrinsic::loongarch_lasx_xvpcnt_d:
3007 case Intrinsic::loongarch_lsx_vbitclr_b:
3008 case Intrinsic::loongarch_lsx_vbitclr_h:
3009 case Intrinsic::loongarch_lsx_vbitclr_w:
3010 case Intrinsic::loongarch_lsx_vbitclr_d:
3011 case Intrinsic::loongarch_lasx_xvbitclr_b:
3012 case Intrinsic::loongarch_lasx_xvbitclr_h:
3013 case Intrinsic::loongarch_lasx_xvbitclr_w:
3014 case Intrinsic::loongarch_lasx_xvbitclr_d:
3016 case Intrinsic::loongarch_lsx_vbitclri_b:
3017 case Intrinsic::loongarch_lasx_xvbitclri_b:
3018 return lowerVectorBitClearImm<3>(
N, DAG);
3019 case Intrinsic::loongarch_lsx_vbitclri_h:
3020 case Intrinsic::loongarch_lasx_xvbitclri_h:
3021 return lowerVectorBitClearImm<4>(
N, DAG);
3022 case Intrinsic::loongarch_lsx_vbitclri_w:
3023 case Intrinsic::loongarch_lasx_xvbitclri_w:
3024 return lowerVectorBitClearImm<5>(
N, DAG);
3025 case Intrinsic::loongarch_lsx_vbitclri_d:
3026 case Intrinsic::loongarch_lasx_xvbitclri_d:
3027 return lowerVectorBitClearImm<6>(
N, DAG);
3028 case Intrinsic::loongarch_lsx_vbitset_b:
3029 case Intrinsic::loongarch_lsx_vbitset_h:
3030 case Intrinsic::loongarch_lsx_vbitset_w:
3031 case Intrinsic::loongarch_lsx_vbitset_d:
3032 case Intrinsic::loongarch_lasx_xvbitset_b:
3033 case Intrinsic::loongarch_lasx_xvbitset_h:
3034 case Intrinsic::loongarch_lasx_xvbitset_w:
3035 case Intrinsic::loongarch_lasx_xvbitset_d: {
3036 EVT VecTy =
N->getValueType(0);
3042 case Intrinsic::loongarch_lsx_vbitseti_b:
3043 case Intrinsic::loongarch_lasx_xvbitseti_b:
3044 return lowerVectorBitSetImm<3>(
N, DAG);
3045 case Intrinsic::loongarch_lsx_vbitseti_h:
3046 case Intrinsic::loongarch_lasx_xvbitseti_h:
3047 return lowerVectorBitSetImm<4>(
N, DAG);
3048 case Intrinsic::loongarch_lsx_vbitseti_w:
3049 case Intrinsic::loongarch_lasx_xvbitseti_w:
3050 return lowerVectorBitSetImm<5>(
N, DAG);
3051 case Intrinsic::loongarch_lsx_vbitseti_d:
3052 case Intrinsic::loongarch_lasx_xvbitseti_d:
3053 return lowerVectorBitSetImm<6>(
N, DAG);
3054 case Intrinsic::loongarch_lsx_vbitrev_b:
3055 case Intrinsic::loongarch_lsx_vbitrev_h:
3056 case Intrinsic::loongarch_lsx_vbitrev_w:
3057 case Intrinsic::loongarch_lsx_vbitrev_d:
3058 case Intrinsic::loongarch_lasx_xvbitrev_b:
3059 case Intrinsic::loongarch_lasx_xvbitrev_h:
3060 case Intrinsic::loongarch_lasx_xvbitrev_w:
3061 case Intrinsic::loongarch_lasx_xvbitrev_d: {
3062 EVT VecTy =
N->getValueType(0);
3068 case Intrinsic::loongarch_lsx_vbitrevi_b:
3069 case Intrinsic::loongarch_lasx_xvbitrevi_b:
3070 return lowerVectorBitRevImm<3>(
N, DAG);
3071 case Intrinsic::loongarch_lsx_vbitrevi_h:
3072 case Intrinsic::loongarch_lasx_xvbitrevi_h:
3073 return lowerVectorBitRevImm<4>(
N, DAG);
3074 case Intrinsic::loongarch_lsx_vbitrevi_w:
3075 case Intrinsic::loongarch_lasx_xvbitrevi_w:
3076 return lowerVectorBitRevImm<5>(
N, DAG);
3077 case Intrinsic::loongarch_lsx_vbitrevi_d:
3078 case Intrinsic::loongarch_lasx_xvbitrevi_d:
3079 return lowerVectorBitRevImm<6>(
N, DAG);
3080 case Intrinsic::loongarch_lsx_vfadd_s:
3081 case Intrinsic::loongarch_lsx_vfadd_d:
3082 case Intrinsic::loongarch_lasx_xvfadd_s:
3083 case Intrinsic::loongarch_lasx_xvfadd_d:
3086 case Intrinsic::loongarch_lsx_vfsub_s:
3087 case Intrinsic::loongarch_lsx_vfsub_d:
3088 case Intrinsic::loongarch_lasx_xvfsub_s:
3089 case Intrinsic::loongarch_lasx_xvfsub_d:
3092 case Intrinsic::loongarch_lsx_vfmul_s:
3093 case Intrinsic::loongarch_lsx_vfmul_d:
3094 case Intrinsic::loongarch_lasx_xvfmul_s:
3095 case Intrinsic::loongarch_lasx_xvfmul_d:
3098 case Intrinsic::loongarch_lsx_vfdiv_s:
3099 case Intrinsic::loongarch_lsx_vfdiv_d:
3100 case Intrinsic::loongarch_lasx_xvfdiv_s:
3101 case Intrinsic::loongarch_lasx_xvfdiv_d:
3104 case Intrinsic::loongarch_lsx_vfmadd_s:
3105 case Intrinsic::loongarch_lsx_vfmadd_d:
3106 case Intrinsic::loongarch_lasx_xvfmadd_s:
3107 case Intrinsic::loongarch_lasx_xvfmadd_d:
3109 N->getOperand(2),
N->getOperand(3));
3110 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
3112 N->getOperand(1),
N->getOperand(2),
3113 legalizeIntrinsicImmArg<4>(
N, 3, DAG, Subtarget));
3114 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
3115 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
3117 N->getOperand(1),
N->getOperand(2),
3118 legalizeIntrinsicImmArg<3>(
N, 3, DAG, Subtarget));
3119 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
3120 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
3122 N->getOperand(1),
N->getOperand(2),
3123 legalizeIntrinsicImmArg<2>(
N, 3, DAG, Subtarget));
3124 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
3126 N->getOperand(1),
N->getOperand(2),
3127 legalizeIntrinsicImmArg<1>(
N, 3, DAG, Subtarget));
3128 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
3129 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
3130 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
3131 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
3132 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
3133 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
3134 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
3135 case Intrinsic::loongarch_lasx_xvreplgr2vr_d: {
3136 EVT ResTy =
N->getValueType(0);
3140 case Intrinsic::loongarch_lsx_vreplve_b:
3141 case Intrinsic::loongarch_lsx_vreplve_h:
3142 case Intrinsic::loongarch_lsx_vreplve_w:
3143 case Intrinsic::loongarch_lsx_vreplve_d:
3144 case Intrinsic::loongarch_lasx_xvreplve_b:
3145 case Intrinsic::loongarch_lasx_xvreplve_h:
3146 case Intrinsic::loongarch_lasx_xvreplve_w:
3147 case Intrinsic::loongarch_lasx_xvreplve_d:
3159 switch (
N->getOpcode()) {
3194 MF->
insert(It, BreakMBB);
3198 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
3199 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
3217 BreakMBB->addSuccessor(SinkMBB);
3229 switch (
MI.getOpcode()) {
3232 case LoongArch::PseudoVBZ:
3233 CondOpc = LoongArch::VSETEQZ_V;
3235 case LoongArch::PseudoVBZ_B:
3236 CondOpc = LoongArch::VSETANYEQZ_B;
3238 case LoongArch::PseudoVBZ_H:
3239 CondOpc = LoongArch::VSETANYEQZ_H;
3241 case LoongArch::PseudoVBZ_W:
3242 CondOpc = LoongArch::VSETANYEQZ_W;
3244 case LoongArch::PseudoVBZ_D:
3245 CondOpc = LoongArch::VSETANYEQZ_D;
3247 case LoongArch::PseudoVBNZ:
3248 CondOpc = LoongArch::VSETNEZ_V;
3250 case LoongArch::PseudoVBNZ_B:
3251 CondOpc = LoongArch::VSETALLNEZ_B;
3253 case LoongArch::PseudoVBNZ_H:
3254 CondOpc = LoongArch::VSETALLNEZ_H;
3256 case LoongArch::PseudoVBNZ_W:
3257 CondOpc = LoongArch::VSETALLNEZ_W;
3259 case LoongArch::PseudoVBNZ_D:
3260 CondOpc = LoongArch::VSETALLNEZ_D;
3262 case LoongArch::PseudoXVBZ:
3263 CondOpc = LoongArch::XVSETEQZ_V;
3265 case LoongArch::PseudoXVBZ_B:
3266 CondOpc = LoongArch::XVSETANYEQZ_B;
3268 case LoongArch::PseudoXVBZ_H:
3269 CondOpc = LoongArch::XVSETANYEQZ_H;
3271 case LoongArch::PseudoXVBZ_W:
3272 CondOpc = LoongArch::XVSETANYEQZ_W;
3274 case LoongArch::PseudoXVBZ_D:
3275 CondOpc = LoongArch::XVSETANYEQZ_D;
3277 case LoongArch::PseudoXVBNZ:
3278 CondOpc = LoongArch::XVSETNEZ_V;
3280 case LoongArch::PseudoXVBNZ_B:
3281 CondOpc = LoongArch::XVSETALLNEZ_B;
3283 case LoongArch::PseudoXVBNZ_H:
3284 CondOpc = LoongArch::XVSETALLNEZ_H;
3286 case LoongArch::PseudoXVBNZ_W:
3287 CondOpc = LoongArch::XVSETALLNEZ_W;
3289 case LoongArch::PseudoXVBNZ_D:
3290 CondOpc = LoongArch::XVSETALLNEZ_D;
3305 F->insert(It, FalseBB);
3306 F->insert(It, TrueBB);
3307 F->insert(It, SinkBB);
3310 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
3314 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
3323 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
3331 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
3339 MI.getOperand(0).getReg())
3346 MI.eraseFromParent();
3355 switch (
MI.getOpcode()) {
3358 case LoongArch::PseudoXVINSGR2VR_B:
3360 InsOp = LoongArch::VINSGR2VR_B;
3362 case LoongArch::PseudoXVINSGR2VR_H:
3364 InsOp = LoongArch::VINSGR2VR_H;
3376 unsigned Idx =
MI.getOperand(3).getImm();
3379 if (
Idx >= HalfSize) {
3380 ScratchReg1 =
MRI.createVirtualRegister(RC);
3381 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg1)
3387 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
3388 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
3390 .
addReg(ScratchReg1, 0, LoongArch::sub_128);
3397 if (
Idx >= HalfSize)
3398 ScratchReg2 =
MRI.createVirtualRegister(RC);
3400 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
3403 .
addImm(LoongArch::sub_128);
3405 if (
Idx >= HalfSize)
3411 MI.eraseFromParent();
3420 switch (
MI.getOpcode()) {
3423 case LoongArch::DIV_W:
3424 case LoongArch::DIV_WU:
3425 case LoongArch::MOD_W:
3426 case LoongArch::MOD_WU:
3427 case LoongArch::DIV_D:
3428 case LoongArch::DIV_DU:
3429 case LoongArch::MOD_D:
3430 case LoongArch::MOD_DU:
3433 case LoongArch::WRFCSR: {
3435 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
3436 .
addReg(
MI.getOperand(1).getReg());
3437 MI.eraseFromParent();
3440 case LoongArch::RDFCSR: {
3443 MI.getOperand(0).getReg())
3444 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
3446 MI.eraseFromParent();
3449 case LoongArch::PseudoVBZ:
3450 case LoongArch::PseudoVBZ_B:
3451 case LoongArch::PseudoVBZ_H:
3452 case LoongArch::PseudoVBZ_W:
3453 case LoongArch::PseudoVBZ_D:
3454 case LoongArch::PseudoVBNZ:
3455 case LoongArch::PseudoVBNZ_B:
3456 case LoongArch::PseudoVBNZ_H:
3457 case LoongArch::PseudoVBNZ_W:
3458 case LoongArch::PseudoVBNZ_D:
3459 case LoongArch::PseudoXVBZ:
3460 case LoongArch::PseudoXVBZ_B:
3461 case LoongArch::PseudoXVBZ_H:
3462 case LoongArch::PseudoXVBZ_W:
3463 case LoongArch::PseudoXVBZ_D:
3464 case LoongArch::PseudoXVBNZ:
3465 case LoongArch::PseudoXVBNZ_B:
3466 case LoongArch::PseudoXVBNZ_H:
3467 case LoongArch::PseudoXVBNZ_W:
3468 case LoongArch::PseudoXVBNZ_D:
3470 case LoongArch::PseudoXVINSGR2VR_B:
3471 case LoongArch::PseudoXVINSGR2VR_H:
3478 unsigned *
Fast)
const {
3479 if (!Subtarget.hasUAL())
3493#define NODE_NAME_CASE(node) \
3494 case LoongArchISD::node: \
3495 return "LoongArchISD::" #node;
3559#undef NODE_NAME_CASE
3572 LoongArch::R7, LoongArch::R8, LoongArch::R9,
3573 LoongArch::R10, LoongArch::R11};
3577 LoongArch::F3, LoongArch::F4, LoongArch::F5,
3578 LoongArch::F6, LoongArch::F7};
3581 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
3582 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
3585 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
3586 LoongArch::VR6, LoongArch::VR7};
3589 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
3590 LoongArch::XR6, LoongArch::XR7};
3596 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
3598 unsigned GRLenInBytes = GRLen / 8;
3631 unsigned ValNo,
MVT ValVT,
3633 CCState &State,
bool IsFixed,
bool IsRet,
3635 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
3636 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
3637 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
3642 if (IsRet && ValNo > 1)
3646 bool UseGPRForFloat =
true;
3656 UseGPRForFloat = !IsFixed;
3665 UseGPRForFloat =
true;
3667 if (UseGPRForFloat && ValVT == MVT::f32) {
3670 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
3673 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
3684 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
3686 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
3689 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
3698 "PendingLocs and PendingArgFlags out of sync");
3716 PendingLocs.
size() <= 2) {
3717 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
3722 PendingLocs.
clear();
3723 PendingArgFlags.
clear();
3730 unsigned StoreSizeBytes = GRLen / 8;
3733 if (ValVT == MVT::f32 && !UseGPRForFloat)
3735 else if (ValVT == MVT::f64 && !UseGPRForFloat)
3749 if (!PendingLocs.
empty()) {
3751 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
3752 for (
auto &It : PendingLocs) {
3754 It.convertToReg(Reg);
3759 PendingLocs.clear();
3760 PendingArgFlags.
clear();
3763 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
3764 "Expected an GRLenVT at this stage");
3781void LoongArchTargetLowering::analyzeInputArgs(
3784 LoongArchCCAssignFn Fn)
const {
3786 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
3788 Type *ArgTy =
nullptr;
3790 ArgTy = FType->getReturnType();
3791 else if (Ins[i].isOrigArg())
3792 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
3796 CCInfo,
true, IsRet, ArgTy)) {
3797 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
3804void LoongArchTargetLowering::analyzeOutputArgs(
3807 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
3808 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
3809 MVT ArgVT = Outs[i].VT;
3810 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
3814 CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
3815 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
3856 if (In.isOrigArg()) {
3861 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
3862 (
BitWidth < 32 && In.Flags.isZExt())) {
3922 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
3926 LoongArch::R23, LoongArch::R24, LoongArch::R25,
3927 LoongArch::R26, LoongArch::R27, LoongArch::R28,
3928 LoongArch::R29, LoongArch::R30, LoongArch::R31};
3935 if (LocVT == MVT::f32) {
3938 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
3939 LoongArch::F26, LoongArch::F27};
3940 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
3946 if (LocVT == MVT::f64) {
3949 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
3950 LoongArch::F30_64, LoongArch::F31_64};
3951 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
3979 "GHC calling convention requires the F and D extensions");
3984 unsigned GRLenInBytes = Subtarget.
getGRLen() / 8;
3986 std::vector<SDValue> OutChains;
3995 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
3997 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
4009 unsigned ArgIndex = Ins[i].OrigArgIndex;
4010 unsigned ArgPartOffset = Ins[i].PartOffset;
4011 assert(ArgPartOffset == 0);
4012 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
4014 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
4037 int VaArgOffset, VarArgsSaveSize;
4043 VarArgsSaveSize = 0;
4045 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() -
Idx);
4046 VaArgOffset = -VarArgsSaveSize;
4052 LoongArchFI->setVarArgsFrameIndex(FI);
4060 VarArgsSaveSize += GRLenInBytes;
4065 for (
unsigned I =
Idx;
I < ArgRegs.
size();
4066 ++
I, VaArgOffset += GRLenInBytes) {
4074 cast<StoreSDNode>(Store.getNode())
4076 ->setValue((
Value *)
nullptr);
4077 OutChains.push_back(Store);
4079 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
4084 if (!OutChains.empty()) {
4085 OutChains.push_back(Chain);
4100 if (
N->getNumValues() != 1)
4102 if (!
N->hasNUsesOfValue(1, 0))
4105 SDNode *Copy = *
N->use_begin();
4111 if (Copy->getGluedNode())
4115 bool HasRet =
false;
4116 for (
SDNode *Node : Copy->uses()) {
4125 Chain = Copy->getOperand(0);
4130bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
4134 auto CalleeCC = CLI.CallConv;
4135 auto &Outs = CLI.Outs;
4137 auto CallerCC = Caller.getCallingConv();
4144 for (
auto &VA : ArgLocs)
4150 auto IsCallerStructRet = Caller.hasStructRetAttr();
4151 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
4152 if (IsCallerStructRet || IsCalleeStructRet)
4156 for (
auto &Arg : Outs)
4157 if (Arg.Flags.isByVal())
4162 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
4163 if (CalleeCC != CallerCC) {
4164 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
4165 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4203 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
4207 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
4213 "site marked musttail");
4220 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
4222 if (!Flags.isByVal())
4226 unsigned Size = Flags.getByValSize();
4227 Align Alignment = Flags.getNonZeroByValAlign();
4234 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
4248 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(); i != e; ++i) {
4250 SDValue ArgValue = OutVals[i];
4263 unsigned ArgIndex = Outs[i].OrigArgIndex;
4264 unsigned ArgPartOffset = Outs[i].PartOffset;
4265 assert(ArgPartOffset == 0);
4270 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
4271 SDValue PartValue = OutVals[i + 1];
4272 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
4282 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4286 for (
const auto &Part : Parts) {
4287 SDValue PartValue = Part.first;
4288 SDValue PartOffset = Part.second;
4295 ArgValue = SpillSlot;
4301 if (Flags.isByVal())
4302 ArgValue = ByValArgs[j++];
4309 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
4310 "for passing parameters");
4313 if (!StackPtr.getNode())
4326 if (!MemOpChains.
empty())
4332 for (
auto &Reg : RegsToPass) {
4333 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
4360 for (
auto &Reg : RegsToPass)
4366 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
4367 assert(Mask &&
"Missing call preserved mask for calling convention");
4385 assert(Subtarget.
is64Bit() &&
"Medium code model requires LA64");
4389 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
4412 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
4415 for (
auto &VA : RVLocs) {
4435 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
4437 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
4441 Outs[i].Flags, CCInfo,
true,
true,
4468 for (
unsigned i = 0, e = RVLocs.
size(); i < e; ++i) {
4490bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
4491 bool ForCodeSize)
const {
4493 if (VT == MVT::f32 && !Subtarget.hasBasicF())
4495 if (VT == MVT::f64 && !Subtarget.hasBasicD())
4497 return (Imm.isZero() || Imm.isExactlyValue(+1.0));
4508bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
4511 return isa<LoadInst>(
I) || isa<StoreInst>(
I);
4513 if (isa<LoadInst>(
I))
4518 if (isa<StoreInst>(
I)) {
4519 unsigned Size =
I->getOperand(0)->getType()->getIntegerBitWidth();
4536 return Y.getValueType().isScalarInteger() && !isa<ConstantSDNode>(
Y);
4542 unsigned Intrinsic)
const {
4543 switch (Intrinsic) {
4546 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
4547 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
4548 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
4549 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
4551 Info.memVT = MVT::i32;
4552 Info.ptrVal =
I.getArgOperand(0);
4587 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
4589 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
4591 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
4593 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
4595 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
4597 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
4599 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
4601 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
4611 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
4613 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
4615 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
4617 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
4638 Value *FailureOrdering =
4642 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
4650 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
4674 unsigned GRLen = Subtarget.
getGRLen();
4703 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
4706 Builder.
CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
4733 const Constant *PersonalityFn)
const {
4734 return LoongArch::R4;
4738 const Constant *PersonalityFn)
const {
4739 return LoongArch::R5;
4747LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
4765 if (Constraint.
size() == 1) {
4766 switch (Constraint[0]) {
4781 if (Constraint ==
"ZC" || Constraint ==
"ZB")
4797std::pair<unsigned, const TargetRegisterClass *>
4798LoongArchTargetLowering::getRegForInlineAsmConstraint(
4802 if (Constraint.
size() == 1) {
4803 switch (Constraint[0]) {
4808 return std::make_pair(0U, &LoongArch::GPRRegClass);
4810 if (Subtarget.hasBasicF() && VT == MVT::f32)
4811 return std::make_pair(0U, &LoongArch::FPR32RegClass);
4812 if (Subtarget.hasBasicD() && VT == MVT::f64)
4813 return std::make_pair(0U, &LoongArch::FPR64RegClass);
4814 if (Subtarget.hasExtLSX() &&
4815 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
4816 return std::make_pair(0U, &LoongArch::LSX128RegClass);
4817 if (Subtarget.hasExtLASX() &&
4818 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
4819 return std::make_pair(0U, &LoongArch::LASX256RegClass);
4839 bool IsFP = Constraint[2] ==
'f';
4840 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
4841 std::pair<unsigned, const TargetRegisterClass *>
R;
4843 TRI, join_items(
"", Temp.first, Temp.second), VT);
4846 unsigned RegNo =
R.first;
4847 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
4848 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
4849 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
4850 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
4860void LoongArchTargetLowering::LowerAsmOperandForConstraint(
4864 if (Constraint.
size() == 1) {
4865 switch (Constraint[0]) {
4868 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4870 if (isInt<16>(CVal))
4877 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4879 if (isInt<12>(CVal))
4886 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
4887 if (
C->getZExtValue() == 0)
4893 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4895 if (isUInt<12>(CVal))
4907#define GET_REGISTER_MATCHER
4908#include "LoongArchGenAsmMatcher.inc"
4914 std::string NewRegName =
Name.second.str();
4916 if (Reg == LoongArch::NoRegister)
4918 if (Reg == LoongArch::NoRegister)
4922 if (!ReservedRegs.
test(Reg))
4938 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
4939 const APInt &Imm = ConstNode->getAPIntValue();
4941 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
4942 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
4945 if (ConstNode->hasOneUse() &&
4946 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
4947 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
4953 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
4954 unsigned Shifts = Imm.countr_zero();
4960 APInt ImmPop = Imm.ashr(Shifts);
4961 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
4965 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
4966 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
4967 (ImmSmall - Imm).isPowerOf2())
4977 Type *Ty,
unsigned AS,
4993 !(isShiftedInt<14, 2>(AM.
BaseOffs) && Subtarget.hasUAL()))
5020 return isInt<12>(Imm);
5024 return isInt<12>(Imm);
5031 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
5032 EVT MemVT = LD->getMemoryVT();
5033 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
5044 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
5053 if (
Y.getValueType().isVector())
5056 return !isa<ConstantSDNode>(
Y);
5065 EVT Type,
bool IsSigned)
const {
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static bool isConstantOrUndef(const SDValue Op)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getMaxBytesForAlignment() const
Align getPrefFunctionAlignment() const
unsigned getGRLen() const
Align getPrefLoopAlignment() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool hasFeature(unsigned Feature) const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isVector() const
Return true if this is a vector value type.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
size_t use_size() const
Return the number of uses of this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
ABI getTargetABI(StringRef ABIName)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Align getNonZeroOrigAlign() const
Register getFrameRegister(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)