35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
64 R = ScalarizeVecRes_UnaryOpWithExtraInput(
N);
67 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
73 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
74 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
80 R = ScalarizeVecRes_VecInregOp(
N);
131 R = ScalarizeVecRes_UnaryOp(
N);
134 R = ScalarizeVecRes_ADDRSPACECAST(
N);
138 R = ScalarizeVecRes_UnaryOpWithTwoResults(
N, ResNo);
192 R = ScalarizeVecRes_BinOp(
N);
197 R = ScalarizeVecRes_CMP(
N);
203 R = ScalarizeVecRes_TernaryOp(
N);
206#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
207 case ISD::STRICT_##DAGN:
208#include "llvm/IR/ConstrainedOps.def"
209 R = ScalarizeVecRes_StrictFPOp(
N);
214 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
223 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
233 R = ScalarizeVecRes_FIX(
N);
239 SetScalarizedVector(
SDValue(
N, ResNo), R);
243 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
244 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
246 LHS.getValueType(), LHS, RHS,
N->getFlags());
254 if (getTypeAction(
LHS.getValueType()) ==
256 LHS = GetScalarizedVector(LHS);
257 RHS = GetScalarizedVector(RHS);
259 EVT VT =
LHS.getValueType().getVectorElementType();
267 N->getValueType(0).getVectorElementType(), LHS, RHS);
271 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
272 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
273 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
279 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
280 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
287DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithTwoResults(
SDNode *
N,
289 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
290 "Unexpected vector type!");
291 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
293 EVT VT0 =
N->getValueType(0);
294 EVT VT1 =
N->getValueType(1);
299 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
303 unsigned OtherNo = 1 - ResNo;
304 EVT OtherVT =
N->getValueType(OtherNo);
306 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
310 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
313 return SDValue(ScalarNode, ResNo);
317 EVT VT =
N->getValueType(0).getVectorElementType();
318 unsigned NumOpers =
N->getNumOperands();
320 EVT ValueVTs[] = {VT, MVT::Other};
329 for (
unsigned i = 1; i < NumOpers; ++i) {
335 Oper = GetScalarizedVector(Oper);
346 Opers,
N->getFlags());
357 EVT ResVT =
N->getValueType(0);
358 EVT OvVT =
N->getValueType(1);
362 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
363 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
368 ScalarLHS = ElemsLHS[0];
369 ScalarRHS = ElemsRHS[0];
375 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
379 unsigned OtherNo = 1 - ResNo;
380 EVT OtherVT =
N->getValueType(OtherNo);
382 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
386 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
389 return SDValue(ScalarNode, ResNo);
394 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
395 return GetScalarizedVector(
Op);
400 if (
Op.getValueType().isVector()
401 &&
Op.getValueType().getVectorNumElements() == 1
402 && !isSimpleLegalType(
Op.getValueType()))
403 Op = GetScalarizedVector(
Op);
404 EVT NewVT =
N->getValueType(0).getVectorElementType();
409SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
410 EVT EltVT =
N->getValueType(0).getVectorElementType();
419SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
421 N->getValueType(0).getVectorElementType(),
422 N->getOperand(0),
N->getOperand(1));
428 EVT OpVT =
Op.getValueType();
432 Op = GetScalarizedVector(
Op);
439 N->getValueType(0).getVectorElementType(),
Op,
443SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithExtraInput(
SDNode *
N) {
444 SDValue Op = GetScalarizedVector(
N->getOperand(0));
449SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
453 EVT EltVT =
N->getValueType(0).getVectorElementType();
454 if (
Op.getValueType() != EltVT)
461 assert(
N->isUnindexed() &&
"Indexed vector load?");
465 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
466 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
467 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
468 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
478 EVT DestVT =
N->getValueType(0).getVectorElementType();
480 EVT OpVT =
Op.getValueType();
490 Op = GetScalarizedVector(
Op);
500 EVT EltVT =
N->getValueType(0).getVectorElementType();
502 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
511 EVT OpVT =
Op.getValueType();
513 EVT EltVT =
N->getValueType(0).getVectorElementType();
516 Op = GetScalarizedVector(
Op);
522 switch (
N->getOpcode()) {
534SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
535 EVT DestVT =
N->getValueType(0).getVectorElementType();
537 EVT OpVT =
Op.getValueType();
547 Op = GetScalarizedVector(
Op);
553 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
554 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
555 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
559SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
562 EVT EltVT =
N->getValueType(0).getVectorElementType();
571 EVT OpVT =
Cond.getValueType();
584 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
598 EVT OpVT =
Cond->getOperand(0).getValueType();
605 EVT CondVT =
Cond.getValueType();
606 if (ScalarBool != VecBool) {
607 switch (ScalarBool) {
628 auto BoolVT = getSetCCResultType(CondVT);
629 if (BoolVT.bitsLT(CondVT))
634 GetScalarizedVector(
N->getOperand(2)));
638 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
640 LHS.getValueType(),
N->getOperand(0), LHS,
641 GetScalarizedVector(
N->getOperand(2)));
645 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
647 N->getOperand(0),
N->getOperand(1),
648 LHS, GetScalarizedVector(
N->getOperand(3)),
653 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
656SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
658 SDValue Arg =
N->getOperand(2).getOperand(0);
660 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
661 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
662 return GetScalarizedVector(
N->getOperand(
Op));
665SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
667 EVT SrcVT = Src.getValueType();
672 Src = GetScalarizedVector(Src);
678 EVT DstVT =
N->getValueType(0).getVectorElementType();
679 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
683 assert(
N->getValueType(0).isVector() &&
684 N->getOperand(0).getValueType().isVector() &&
685 "Operand types must be vectors");
688 EVT OpVT =
LHS.getValueType();
689 EVT NVT =
N->getValueType(0).getVectorElementType();
694 LHS = GetScalarizedVector(LHS);
695 RHS = GetScalarizedVector(RHS);
711 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
719 EVT ResultVT =
N->getValueType(0).getVectorElementType();
722 Arg = GetScalarizedVector(Arg);
735 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
742bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
747 switch (
N->getOpcode()) {
750 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
757 Res = ScalarizeVecOp_BITCAST(
N);
760 Res = ScalarizeVecOp_FAKE_USE(
N);
774 Res = ScalarizeVecOp_UnaryOp(
N);
780 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
783 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
786 Res = ScalarizeVecOp_INSERT_SUBVECTOR(
N, OpNo);
789 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
792 Res = ScalarizeVecOp_VSELECT(
N);
795 Res = ScalarizeVecOp_VSETCC(
N);
798 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
801 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
804 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
807 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
810 Res = ScalarizeVecOp_FP_EXTEND(
N);
827 Res = ScalarizeVecOp_VECREDUCE(
N);
831 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
835 Res = ScalarizeVecOp_CMP(
N);
840 if (!Res.
getNode())
return false;
848 "Invalid operand expansion");
850 ReplaceValueWith(
SDValue(
N, 0), Res);
857 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
859 N->getValueType(0), Elt);
864 assert(
N->getOperand(1).getValueType().getVectorNumElements() == 1 &&
865 "Fake Use: Unexpected vector type!");
866 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
873 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
874 "Unexpected vector type!");
875 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
877 N->getValueType(0).getScalarType(), Elt);
885SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
886 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
887 "Unexpected vector type!");
888 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
890 {
N->getValueType(0).getScalarType(), MVT::Other },
891 {
N->getOperand(0), Elt });
901 ReplaceValueWith(
SDValue(
N, 0), Res);
906SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
908 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
909 Ops[i] = GetScalarizedVector(
N->getOperand(i));
915SDValue DAGTypeLegalizer::ScalarizeVecOp_INSERT_SUBVECTOR(
SDNode *
N,
919 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
920 SDValue ContainingVec =
N->getOperand(0);
928SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
929 EVT VT =
N->getValueType(0);
930 SDValue Res = GetScalarizedVector(
N->getOperand(0));
942 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
943 EVT VT =
N->getValueType(0);
953 assert(
N->getValueType(0).isVector() &&
954 N->getOperand(0).getValueType().isVector() &&
955 "Operand types must be vectors");
956 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
958 EVT VT =
N->getValueType(0);
959 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
960 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
962 EVT OpVT =
N->getOperand(0).getValueType();
974 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
982 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
983 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
986 if (
N->isTruncatingStore())
988 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
989 N->getBasePtr(),
N->getPointerInfo(),
990 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
991 N->getMemOperand()->getFlags(),
N->getAAInfo());
993 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
994 N->getBasePtr(),
N->getPointerInfo(),
995 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
1001SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
1002 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
1003 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
1005 N->getValueType(0).getVectorElementType(), Elt,
1010SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
1012 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
1013 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1017 {
N->getOperand(0), Elt,
N->getOperand(2) });
1026 ReplaceValueWith(
SDValue(
N, 0), Res);
1033 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
1035 N->getValueType(0).getVectorElementType(), Elt);
1041SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
1042 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1046 {
N->getOperand(0), Elt});
1055 ReplaceValueWith(
SDValue(
N, 0), Res);
1060 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1067SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1073 SDValue Op = GetScalarizedVector(VecOp);
1075 AccOp,
Op,
N->getFlags());
1079 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1080 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1082 EVT ResVT =
N->getValueType(0).getVectorElementType();
1095void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1100 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1103 switch (
N->getOpcode()) {
1106 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1118 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1131 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(
N,
Lo,
Hi);
break;
1134 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1137 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1141 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1144 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1146 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1147 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1150 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1153 case ISD::VP_GATHER:
1154 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1157 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
1161 SplitVecRes_SETCC(
N,
Lo,
Hi);
1164 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1167 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1170 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1173 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1176 SplitVecRes_VECTOR_INTERLEAVE(
N);
1179 SplitVecRes_VAARG(
N,
Lo,
Hi);
1185 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1191 case ISD::VP_BITREVERSE:
1199 case ISD::VP_CTLZ_ZERO_UNDEF:
1201 case ISD::VP_CTTZ_ZERO_UNDEF:
1216 case ISD::VP_FFLOOR:
1221 case ISD::VP_FNEARBYINT:
1226 case ISD::VP_FP_EXTEND:
1228 case ISD::VP_FP_ROUND:
1230 case ISD::VP_FP_TO_SINT:
1232 case ISD::VP_FP_TO_UINT:
1238 case ISD::VP_LLRINT:
1240 case ISD::VP_FROUND:
1242 case ISD::VP_FROUNDEVEN:
1251 case ISD::VP_FROUNDTOZERO:
1253 case ISD::VP_SINT_TO_FP:
1255 case ISD::VP_TRUNCATE:
1257 case ISD::VP_UINT_TO_FP:
1259 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1262 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1266 SplitVecRes_UnaryOpWithTwoResults(
N, ResNo,
Lo,
Hi);
1272 case ISD::VP_SIGN_EXTEND:
1273 case ISD::VP_ZERO_EXTEND:
1274 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1293 case ISD::VP_FMINNUM:
1296 case ISD::VP_FMAXNUM:
1298 case ISD::VP_FMINIMUM:
1300 case ISD::VP_FMAXIMUM:
1309 case ISD::OR:
case ISD::VP_OR:
1329 case ISD::VP_FCOPYSIGN:
1330 SplitVecRes_BinOp(
N,
Lo,
Hi);
1337 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1341 SplitVecRes_CMP(
N,
Lo,
Hi);
1344#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1345 case ISD::STRICT_##DAGN:
1346#include "llvm/IR/ConstrainedOps.def"
1347 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1352 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1361 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1371 SplitVecRes_FIX(
N,
Lo,
Hi);
1373 case ISD::EXPERIMENTAL_VP_REVERSE:
1374 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1383void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1391 DL,
Ptr.getValueType(),
1392 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1395 *ScaledOffset += IncrementSize;
1399 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1405std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1406 return SplitMask(Mask,
SDLoc(Mask));
1409std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1412 EVT MaskVT =
Mask.getValueType();
1414 GetSplitVector(Mask, MaskLo, MaskHi);
1417 return std::make_pair(MaskLo, MaskHi);
1422 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1424 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1428 unsigned Opcode =
N->getOpcode();
1429 if (
N->getNumOperands() == 2) {
1435 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1436 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1439 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1442 std::tie(EVLLo, EVLHi) =
1443 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1446 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1448 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1454 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1456 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1458 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1462 unsigned Opcode =
N->getOpcode();
1463 if (
N->getNumOperands() == 3) {
1469 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1470 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1473 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1476 std::tie(EVLLo, EVLHi) =
1477 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1480 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1482 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1492 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1494 GetSplitVector(LHS, LHSLo, LHSHi);
1495 GetSplitVector(RHS, RHSLo, RHSHi);
1497 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1498 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1501 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1502 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1503 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1508 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1510 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1514 unsigned Opcode =
N->getOpcode();
1533 switch (getTypeAction(InVT)) {
1548 GetExpandedOp(InOp,
Lo,
Hi);
1559 GetSplitVector(InOp,
Lo,
Hi);
1580 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1603 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1605 unsigned NumSubvectors =
N->getNumOperands() / 2;
1606 if (NumSubvectors == 1) {
1607 Lo =
N->getOperand(0);
1608 Hi =
N->getOperand(1);
1622void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1644 GetSplitVector(Vec,
Lo,
Hi);
1647 EVT LoVT =
Lo.getValueType();
1656 unsigned IdxVal =
Idx->getAsZExtVal();
1657 if (IdxVal + SubElems <= LoElems) {
1665 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1691 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1695 auto *
Load = cast<LoadSDNode>(
Lo);
1697 IncrementPointer(Load, LoVT, MPI, StackPtr);
1700 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1709 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1714 EVT RHSVT =
RHS.getValueType();
1717 GetSplitVector(RHS, RHSLo, RHSHi);
1734 SDValue FpValue =
N->getOperand(0);
1736 GetSplitVector(FpValue, ArgLo, ArgHi);
1749 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1753 std::tie(LoVT, HiVT) =
1764 unsigned Opcode =
N->getOpcode();
1771 GetSplitVector(N0, InLo, InHi);
1778 EVT OutLoVT, OutHiVT;
1781 assert((2 * OutNumElements) <= InNumElements &&
1782 "Illegal extend vector in reg split");
1792 for (
unsigned i = 0; i != OutNumElements; ++i)
1793 SplitHi[i] = i + OutNumElements;
1796 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1797 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1802 unsigned NumOps =
N->getNumOperands();
1816 for (
unsigned i = 1; i < NumOps; ++i) {
1821 EVT InVT =
Op.getValueType();
1826 GetSplitVector(
Op, OpLo, OpHi);
1835 EVT LoValueVTs[] = {LoVT, MVT::Other};
1836 EVT HiValueVTs[] = {HiVT, MVT::Other};
1845 Lo.getValue(1),
Hi.getValue(1));
1849 ReplaceValueWith(
SDValue(
N, 1), Chain);
1852SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1854 EVT VT =
N->getValueType(0);
1865 else if (NE > ResNE)
1869 EVT ChainVTs[] = {EltVT, MVT::Other};
1873 for (i = 0; i !=
NE; ++i) {
1875 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1876 SDValue Operand =
N->getOperand(j);
1887 Scalar.getNode()->setFlags(
N->getFlags());
1895 for (; i < ResNE; ++i)
1900 ReplaceValueWith(
SDValue(
N, 1), Chain);
1907void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1910 EVT ResVT =
N->getValueType(0);
1911 EVT OvVT =
N->getValueType(1);
1912 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1916 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1918 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1919 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1925 unsigned Opcode =
N->getOpcode();
1937 unsigned OtherNo = 1 - ResNo;
1938 EVT OtherVT =
N->getValueType(OtherNo);
1940 SetSplitVector(
SDValue(
N, OtherNo),
1946 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1950void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1956 GetSplitVector(Vec,
Lo,
Hi);
1959 unsigned IdxVal = CIdx->getZExtValue();
1960 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1961 if (IdxVal < LoNumElts) {
1963 Lo.getValueType(),
Lo, Elt,
Idx);
2009 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
2012 auto Load = cast<LoadSDNode>(
Lo);
2014 IncrementPointer(Load, LoVT, MPI, StackPtr);
2016 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
2020 if (LoVT !=
Lo.getValueType())
2022 if (HiVT !=
Hi.getValueType())
2030 assert(
N->getValueType(0).isScalableVector() &&
2031 "Only scalable vectors are supported for STEP_VECTOR");
2054 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2067 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
2068 auto [EVLLo, EVLHi] = DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2069 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0), MaskLo, EVLLo);
2070 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT,
N->getOperand(0), MaskHi, EVLHi);
2084 EVT MemoryVT =
LD->getMemoryVT();
2088 EVT LoMemVT, HiMemVT;
2095 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2100 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2104 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2107 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2116 ReplaceValueWith(
SDValue(LD, 1), Ch);
2121 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2130 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2131 Align Alignment =
LD->getOriginalAlign();
2134 EVT MemoryVT =
LD->getMemoryVT();
2136 EVT LoMemVT, HiMemVT;
2137 bool HiIsEmpty =
false;
2138 std::tie(LoMemVT, HiMemVT) =
2144 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2147 GetSplitVector(Mask, MaskLo, MaskHi);
2149 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2154 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2163 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2172 LD->isExpandingLoad());
2178 MPI =
LD->getPointerInfo().getWithOffset(
2183 Alignment,
LD->getAAInfo(),
LD->getRanges());
2186 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2187 LD->isExpandingLoad());
2197 ReplaceValueWith(
SDValue(LD, 1), Ch);
2203 "Indexed VP strided load during type legalization!");
2205 "Unexpected indexed variable-length load offset");
2212 EVT LoMemVT, HiMemVT;
2213 bool HiIsEmpty =
false;
2214 std::tie(LoMemVT, HiMemVT) =
2220 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2223 GetSplitVector(Mask, LoMask, HiMask);
2229 std::tie(LoEVL, HiEVL) =
2267 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2278 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2291 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2300 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2303 GetSplitVector(Mask, MaskLo, MaskHi);
2305 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2309 EVT LoMemVT, HiMemVT;
2310 bool HiIsEmpty =
false;
2311 std::tie(LoMemVT, HiMemVT) =
2314 SDValue PassThruLo, PassThruHi;
2316 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2318 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2361 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2378 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2379 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2381 auto *VPSC = cast<VPGatherSDNode>(
N);
2382 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2385 EVT MemoryVT =
N->getMemoryVT();
2386 Align Alignment =
N->getOriginalAlign();
2390 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2391 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2393 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2396 EVT LoMemVT, HiMemVT;
2401 if (getTypeAction(Ops.Index.getValueType()) ==
2403 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2405 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2412 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2413 SDValue PassThru = MGT->getPassThru();
2414 SDValue PassThruLo, PassThruHi;
2417 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2419 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2424 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2426 OpsLo, MMO, IndexTy, ExtType);
2428 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2430 OpsHi, MMO, IndexTy, ExtType);
2432 auto *VPGT = cast<VPGatherSDNode>(
N);
2434 std::tie(EVLLo, EVLHi) =
2435 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2437 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2439 MMO, VPGT->getIndexType());
2441 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2443 MMO, VPGT->getIndexType());
2453 ReplaceValueWith(
SDValue(
N, 1), Ch);
2467 EVT VecVT =
N->getValueType(0);
2470 bool HasCustomLowering =
false;
2477 HasCustomLowering =
true;
2483 SDValue Passthru =
N->getOperand(2);
2484 if (!HasCustomLowering) {
2494 std::tie(LoMask, HiMask) = SplitMask(Mask);
2504 MF, cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex());
2513 Chain = DAG.
getStore(Chain,
DL,
Lo, StackPtr, PtrInfo);
2526 assert(
N->getValueType(0).isVector() &&
2527 N->getOperand(0).getValueType().isVector() &&
2528 "Operand types must be vectors");
2536 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2538 GetSplitVector(
N->getOperand(0), LL, LH);
2542 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2544 GetSplitVector(
N->getOperand(1), RL, RH);
2549 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2550 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2552 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2553 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2554 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2555 std::tie(EVLLo, EVLHi) =
2556 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2557 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2559 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2573 EVT InVT =
N->getOperand(0).getValueType();
2575 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2580 unsigned Opcode =
N->getOpcode();
2581 if (
N->getNumOperands() <= 2) {
2583 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2584 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2592 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2593 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2596 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2599 std::tie(EVLLo, EVLHi) =
2600 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2613 EVT InVT =
N->getOperand(0).getValueType();
2615 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2619 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2620 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2621 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2626void DAGTypeLegalizer::SplitVecRes_UnaryOpWithTwoResults(
SDNode *
N,
2636 EVT InVT =
N->getOperand(0).getValueType();
2638 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2642 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2643 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2644 Lo->setFlags(
N->getFlags());
2645 Hi->setFlags(
N->getFlags());
2651 unsigned OtherNo = 1 - ResNo;
2652 EVT OtherVT =
N->getValueType(OtherNo);
2660 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2667 EVT SrcVT =
N->getOperand(0).getValueType();
2668 EVT DestVT =
N->getValueType(0);
2691 EVT SplitLoVT, SplitHiVT;
2695 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2696 N->dump(&DAG);
dbgs() <<
"\n");
2697 if (!
N->isVPOpcode()) {
2700 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2711 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2712 N->getOperand(1),
N->getOperand(2));
2717 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2720 std::tie(EVLLo, EVLHi) =
2721 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2723 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2724 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2729 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2737 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2738 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2744 return N.getResNo() == 0 &&
2748 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2753 "Expected build vector node.");
2756 for (
unsigned I = 0;
I < NewElts; ++
I) {
2761 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2763 Ops[
I] = Input1.getOperand(
Idx);
2768 return DAG.getBuildVector(NewVT,
DL, Ops);
2776 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2780 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2782 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2791 for (
auto &
P : ShufflesIdxs) {
2792 if (
P.second.size() < 2)
2796 for (
int &
Idx : Mask) {
2799 unsigned SrcRegIdx =
Idx / NewElts;
2800 if (Inputs[SrcRegIdx].
isUndef()) {
2805 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2808 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2813 Idx = MaskElt % NewElts +
2814 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2820 Inputs[
P.second[0]] =
P.first.first;
2821 Inputs[
P.second[1]] =
P.first.second;
2824 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2828 for (
int &
Idx : Mask) {
2831 unsigned SrcRegIdx =
Idx / NewElts;
2832 if (Inputs[SrcRegIdx].
isUndef()) {
2839 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2840 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2843 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2845 if (UsedSubVector.count() > 1) {
2847 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2848 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2850 if (Pairs.
empty() || Pairs.
back().size() == 2)
2852 if (UsedSubVector.test(2 *
I)) {
2853 Pairs.
back().emplace_back(
I, 0);
2855 assert(UsedSubVector.test(2 *
I + 1) &&
2856 "Expected to be used one of the subvectors.");
2857 Pairs.
back().emplace_back(
I, 1);
2860 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2862 for (
int &
Idx : Mask) {
2865 unsigned SrcRegIdx =
Idx / NewElts;
2867 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2868 return Idxs.front().first == SrcRegIdx ||
2869 Idxs.back().first == SrcRegIdx;
2871 if (It == Pairs.
end())
2873 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2874 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2877 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2878 Inputs[Idxs.front().first] = DAG.
getNode(
2880 Inputs[Idxs.front().first].getValueType(),
2881 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2882 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2891 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2892 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2895 if (Shuffle->getOperand(0).getValueType() != NewVT)
2898 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2899 !Shuffle->isSplat()) {
2901 }
else if (!Inputs[
I].hasOneUse() &&
2902 !Shuffle->getOperand(1).isUndef()) {
2904 for (
int &
Idx : Mask) {
2907 unsigned SrcRegIdx =
Idx / NewElts;
2910 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2915 int OpIdx = MaskElt / NewElts;
2928 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2929 if (Shuffle->getOperand(OpIdx).isUndef())
2931 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2932 if (It == std::end(Inputs))
2934 int FoundOp = std::distance(std::begin(Inputs), It);
2937 for (
int &
Idx : Mask) {
2940 unsigned SrcRegIdx =
Idx / NewElts;
2943 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2948 int MaskIdx = MaskElt / NewElts;
2949 if (OpIdx == MaskIdx)
2950 Idx = MaskElt % NewElts + FoundOp * NewElts;
2953 Op = (OpIdx + 1) % 2;
2961 for (
int &
Idx : Mask) {
2964 unsigned SrcRegIdx =
Idx / NewElts;
2967 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2968 int OpIdx = MaskElt / NewElts;
2971 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2977 TryPeekThroughShufflesInputs(OrigMask);
2979 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2983 for (
const auto &
I : Inputs) {
2985 UniqueConstantInputs.
insert(
I);
2986 else if (!
I.isUndef())
2991 if (UniqueInputs.
size() != std::size(Inputs)) {
2992 auto &&UniqueVec = UniqueInputs.
takeVector();
2993 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2994 unsigned ConstNum = UniqueConstantVec.size();
2995 for (
int &
Idx : Mask) {
2998 unsigned SrcRegIdx =
Idx / NewElts;
2999 if (Inputs[SrcRegIdx].
isUndef()) {
3003 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
3004 if (It != UniqueConstantVec.end()) {
3006 NewElts * std::distance(UniqueConstantVec.begin(), It);
3007 assert(
Idx >= 0 &&
"Expected defined mask idx.");
3010 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
3011 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
3013 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
3014 assert(
Idx >= 0 &&
"Expected defined mask idx.");
3016 copy(UniqueConstantVec, std::begin(Inputs));
3017 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
3020 MakeUniqueInputs(OrigMask);
3022 copy(Inputs, std::begin(OrigInputs));
3028 unsigned FirstMaskIdx =
High * NewElts;
3031 assert(!Output &&
"Expected default initialized initial value.");
3032 TryPeekThroughShufflesInputs(Mask);
3033 MakeUniqueInputs(Mask);
3035 copy(Inputs, std::begin(TmpInputs));
3038 bool SecondIteration =
false;
3039 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
3044 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
3045 SecondIteration =
true;
3046 return SecondIteration;
3049 Mask, std::size(Inputs), std::size(Inputs),
3051 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
3052 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3055 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
3057 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
3058 DAG.getUNDEF(NewVT), Mask);
3059 Inputs[
Idx] = Output;
3061 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3063 unsigned Idx2,
bool ) {
3064 if (AccumulateResults(Idx1)) {
3067 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
3069 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
3070 Inputs[Idx2], Mask);
3074 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
3076 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
3077 TmpInputs[Idx2], Mask);
3079 Inputs[Idx1] = Output;
3081 copy(OrigInputs, std::begin(Inputs));
3086 EVT OVT =
N->getValueType(0);
3093 const Align Alignment =
3094 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
3096 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
3097 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
3098 Chain =
Hi.getValue(1);
3102 ReplaceValueWith(
SDValue(
N, 1), Chain);
3107 EVT DstVTLo, DstVTHi;
3108 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
3112 EVT SrcVT =
N->getOperand(0).getValueType();
3114 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
3116 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
3118 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
3119 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
3125 GetSplitVector(
N->getOperand(0), InLo, InHi);
3137 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
3142 EVT VT =
N->getValueType(0);
3149 Align Alignment = DAG.getReducedAlign(VT,
false);
3155 auto &MF = DAG.getMachineFunction();
3169 DAG.getConstant(1,
DL, PtrVT));
3171 DAG.getConstant(EltWidth,
DL, PtrVT));
3173 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3175 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3176 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3177 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3180 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3182 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3185void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3187 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3188 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3189 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3193 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3195 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3201void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3202 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3203 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3204 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3208 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3210 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3212 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3213 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3224bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3229 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3232 switch (
N->getOpcode()) {
3235 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3245 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3251 case ISD::VP_TRUNCATE:
3253 Res = SplitVecOp_TruncateHelper(
N);
3256 case ISD::VP_FP_ROUND:
3260 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3263 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3265 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3266 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3269 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3272 case ISD::VP_SCATTER:
3273 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3276 case ISD::VP_GATHER:
3277 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3280 Res = SplitVecOp_VSELECT(
N, OpNo);
3283 Res = SplitVecOp_VECTOR_COMPRESS(
N, OpNo);
3289 case ISD::VP_SINT_TO_FP:
3290 case ISD::VP_UINT_TO_FP:
3291 if (
N->getValueType(0).bitsLT(
3292 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3293 Res = SplitVecOp_TruncateHelper(
N);
3295 Res = SplitVecOp_UnaryOp(
N);
3299 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3303 case ISD::VP_FP_TO_SINT:
3304 case ISD::VP_FP_TO_UINT:
3317 Res = SplitVecOp_UnaryOp(
N);
3320 Res = SplitVecOp_FPOpDifferentTypes(
N);
3325 Res = SplitVecOp_CMP(
N);
3329 Res = SplitVecOp_FAKE_USE(
N);
3334 Res = SplitVecOp_ExtVecInRegOp(
N);
3352 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3356 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3358 case ISD::VP_REDUCE_FADD:
3359 case ISD::VP_REDUCE_SEQ_FADD:
3360 case ISD::VP_REDUCE_FMUL:
3361 case ISD::VP_REDUCE_SEQ_FMUL:
3362 case ISD::VP_REDUCE_ADD:
3363 case ISD::VP_REDUCE_MUL:
3364 case ISD::VP_REDUCE_AND:
3365 case ISD::VP_REDUCE_OR:
3366 case ISD::VP_REDUCE_XOR:
3367 case ISD::VP_REDUCE_SMAX:
3368 case ISD::VP_REDUCE_SMIN:
3369 case ISD::VP_REDUCE_UMAX:
3370 case ISD::VP_REDUCE_UMIN:
3371 case ISD::VP_REDUCE_FMAX:
3372 case ISD::VP_REDUCE_FMIN:
3373 case ISD::VP_REDUCE_FMAXIMUM:
3374 case ISD::VP_REDUCE_FMINIMUM:
3375 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3377 case ISD::VP_CTTZ_ELTS:
3378 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3379 Res = SplitVecOp_VP_CttzElements(
N);
3382 Res = SplitVecOp_VECTOR_HISTOGRAM(
N);
3387 if (!Res.
getNode())
return false;
3394 if (
N->isStrictFPOpcode())
3396 "Invalid operand expansion");
3399 "Invalid operand expansion");
3401 ReplaceValueWith(
SDValue(
N, 0), Res);
3405SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3408 assert(OpNo == 0 &&
"Illegal operand must be mask");
3415 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3418 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3419 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3420 "Lo and Hi have differing types");
3423 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3424 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3426 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3427 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3428 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3429 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3439SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_COMPRESS(
SDNode *
N,
unsigned OpNo) {
3442 assert(OpNo == 1 &&
"Illegal operand must be mask");
3447 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
3449 EVT VecVT =
N->getValueType(0);
3453SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3454 EVT ResVT =
N->getValueType(0);
3458 SDValue VecOp =
N->getOperand(OpNo);
3460 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3461 GetSplitVector(VecOp,
Lo,
Hi);
3463 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3469 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3473 EVT ResVT =
N->getValueType(0);
3482 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3483 GetSplitVector(VecOp,
Lo,
Hi);
3485 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3491 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3494SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3495 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3496 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3498 unsigned Opc =
N->getOpcode();
3499 EVT ResVT =
N->getValueType(0);
3503 SDValue VecOp =
N->getOperand(OpNo);
3505 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3506 GetSplitVector(VecOp,
Lo,
Hi);
3509 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3512 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3517 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3518 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3523 EVT ResVT =
N->getValueType(0);
3526 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3527 EVT InVT =
Lo.getValueType();
3532 if (
N->isStrictFPOpcode()) {
3533 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3534 { N->getOperand(0), Lo });
3535 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3536 { N->getOperand(0), Hi });
3545 ReplaceValueWith(
SDValue(
N, 1), Ch);
3546 }
else if (
N->getNumOperands() == 3) {
3547 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3548 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3549 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3550 std::tie(EVLLo, EVLHi) =
3551 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3552 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3553 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3555 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3556 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3565 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3575 EVT ResVT =
N->getValueType(0);
3577 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3581 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3587 Lo = BitConvertToInteger(
Lo);
3588 Hi = BitConvertToInteger(
Hi);
3590 if (DAG.getDataLayout().isBigEndian())
3598 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3600 EVT ResVT =
N->getValueType(0);
3608 GetSplitVector(SubVec,
Lo,
Hi);
3611 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3617 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3619 return SecondInsertion;
3622SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3624 EVT SubVT =
N->getValueType(0);
3629 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3631 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3634 if (IdxVal < LoEltsMin) {
3636 "Extracted subvector crosses vector split!");
3639 N->getOperand(0).getValueType().isScalableVector())
3641 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3646 "Extracting scalable subvector from fixed-width unsupported");
3654 "subvector from a scalable predicate vector");
3660 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3662 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3663 auto &MF = DAG.getMachineFunction();
3667 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3674 SubVT, dl, Store, StackPtr,
3678SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3687 GetSplitVector(Vec,
Lo,
Hi);
3689 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3691 if (IdxVal < LoElts)
3695 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3696 Idx.getValueType())), 0);
3700 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3712 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3718 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3720 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3721 auto &MF = DAG.getMachineFunction();
3724 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3732 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3734 return DAG.getExtLoad(
3745 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3753 SplitVecRes_Gather(
N,
Lo,
Hi);
3756 ReplaceValueWith(
SDValue(
N, 0), Res);
3761 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3765 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3767 SDValue EVL =
N->getVectorLength();
3769 Align Alignment =
N->getOriginalAlign();
3775 GetSplitVector(
Data, DataLo, DataHi);
3777 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3782 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3785 GetSplitVector(Mask, MaskLo, MaskHi);
3787 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3790 EVT MemoryVT =
N->getMemoryVT();
3791 EVT LoMemVT, HiMemVT;
3792 bool HiIsEmpty =
false;
3793 std::tie(LoMemVT, HiMemVT) =
3794 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3798 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3806 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3807 N->getAddressingMode(),
N->isTruncatingStore(),
3808 N->isCompressingStore());
3815 N->isCompressingStore());
3823 MPI =
N->getPointerInfo().getWithOffset(
3826 MMO = DAG.getMachineFunction().getMachineMemOperand(
3828 Alignment,
N->getAAInfo(),
N->getRanges());
3830 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3831 N->getAddressingMode(),
N->isTruncatingStore(),
3832 N->isCompressingStore());
3841 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3842 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3849 GetSplitVector(
Data, LoData, HiData);
3851 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3853 EVT LoMemVT, HiMemVT;
3854 bool HiIsEmpty =
false;
3855 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3861 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3862 else if (getTypeAction(
Mask.getValueType()) ==
3864 GetSplitVector(Mask, LoMask, HiMask);
3866 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3869 std::tie(LoEVL, HiEVL) =
3870 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3874 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3875 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3876 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3887 EVT PtrVT =
N->getBasePtr().getValueType();
3890 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3893 Align Alignment =
N->getOriginalAlign();
3901 Alignment,
N->getAAInfo(),
N->getRanges());
3904 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3905 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3906 N->isCompressingStore());
3915 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3919 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3922 Align Alignment =
N->getOriginalAlign();
3928 GetSplitVector(
Data, DataLo, DataHi);
3930 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3935 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3938 GetSplitVector(Mask, MaskLo, MaskHi);
3940 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3943 EVT MemoryVT =
N->getMemoryVT();
3944 EVT LoMemVT, HiMemVT;
3945 bool HiIsEmpty =
false;
3946 std::tie(LoMemVT, HiMemVT) =
3947 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3955 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3956 N->getAddressingMode(),
N->isTruncatingStore(),
3957 N->isCompressingStore());
3966 N->isCompressingStore());
3974 MPI =
N->getPointerInfo().getWithOffset(
3977 MMO = DAG.getMachineFunction().getMachineMemOperand(
3979 Alignment,
N->getAAInfo(),
N->getRanges());
3981 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3982 N->getAddressingMode(),
N->isTruncatingStore(),
3983 N->isCompressingStore());
3996 EVT MemoryVT =
N->getMemoryVT();
3997 Align Alignment =
N->getOriginalAlign();
4005 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
4006 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
4009 auto *VPSC = cast<VPScatterSDNode>(
N);
4010 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
4015 EVT LoMemVT, HiMemVT;
4016 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4021 GetSplitVector(Ops.Data, DataLo, DataHi);
4023 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
4027 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
4028 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
4030 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
4034 if (getTypeAction(Ops.Index.getValueType()) ==
4036 GetSplitVector(Ops.Index, IndexLo, IndexHi);
4038 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
4046 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
4047 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
4049 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
4050 MSC->getIndexType(), MSC->isTruncatingStore());
4055 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
4056 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
4057 MMO, MSC->getIndexType(),
4058 MSC->isTruncatingStore());
4060 auto *VPSC = cast<VPScatterSDNode>(
N);
4062 std::tie(EVLLo, EVLHi) =
4063 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
4065 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
4066 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
4067 VPSC->getIndexType());
4072 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
4073 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
4074 VPSC->getIndexType());
4078 assert(
N->isUnindexed() &&
"Indexed store of vector?");
4079 assert(OpNo == 1 &&
"Can only split the stored value");
4082 bool isTruncating =
N->isTruncatingStore();
4085 EVT MemoryVT =
N->getMemoryVT();
4086 Align Alignment =
N->getOriginalAlign();
4090 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
4092 EVT LoMemVT, HiMemVT;
4093 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4100 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
4101 Alignment, MMOFlags, AAInfo);
4103 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
4107 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
4110 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
4111 HiMemVT, Alignment, MMOFlags, AAInfo);
4113 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
4127 EVT EltVT =
N->getValueType(0).getVectorElementType();
4129 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
4132 DAG.getVectorIdxConstant(i,
DL)));
4136 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
4157 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
4158 SDValue InVec =
N->getOperand(OpNo);
4160 EVT OutVT =
N->getValueType(0);
4168 EVT LoOutVT, HiOutVT;
4169 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4170 assert(LoOutVT == HiOutVT &&
"Unequal split?");
4175 if (isTypeLegal(LoOutVT) ||
4176 InElementSize <= OutElementSize * 2)
4177 return SplitVecOp_UnaryOp(
N);
4186 return SplitVecOp_UnaryOp(
N);
4190 GetSplitVector(InVec, InLoVec, InHiVec);
4196 EVT HalfElementVT = IsFloat ?
4198 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4205 if (
N->isStrictFPOpcode()) {
4206 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4207 {N->getOperand(0), InLoVec});
4208 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4209 {N->getOperand(0), InHiVec});
4215 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4216 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4228 if (
N->isStrictFPOpcode()) {
4232 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4240 DAG.getTargetConstant(
4246 unsigned Opc =
N->getOpcode();
4248 assert(
N->getValueType(0).isVector() &&
4249 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4250 "Operand types must be vectors");
4252 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4254 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4255 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4266 }
else if (isStrict) {
4267 LoRes = DAG.
getNode(Opc,
DL, DAG.getVTList(PartResVT,
N->getValueType(1)),
4268 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4269 HiRes = DAG.
getNode(Opc,
DL, DAG.getVTList(PartResVT,
N->getValueType(1)),
4270 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4273 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4275 assert(Opc == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4276 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4277 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4278 std::tie(EVLLo, EVLHi) =
4279 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4280 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4281 N->getOperand(2), MaskLo, EVLLo);
4282 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4283 N->getOperand(2), MaskHi, EVLHi);
4287 EVT OpVT =
N->getOperand(0).getValueType();
4290 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4296 EVT ResVT =
N->getValueType(0);
4299 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4300 EVT InVT =
Lo.getValueType();
4305 if (
N->isStrictFPOpcode()) {
4306 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4307 { N->getOperand(0), Lo, N->getOperand(2) });
4308 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4309 { N->getOperand(0), Hi, N->getOperand(2) });
4313 Lo.getValue(1),
Hi.getValue(1));
4314 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4315 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4316 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4317 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4318 std::tie(EVLLo, EVLHi) =
4319 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4320 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4321 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4335SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4338 EVT LHSLoVT, LHSHiVT;
4339 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4341 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4342 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4345 std::tie(LHSLo, LHSHi) =
4346 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4349 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4351 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4352 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4361 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4362 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4363 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4365 EVT ResVT =
N->getValueType(0);
4370 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4371 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4377 EVT ResVT =
N->getValueType(0);
4380 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4381 EVT InVT =
Lo.getValueType();
4387 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4388 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4395 EVT ResVT =
N->getValueType(0);
4399 GetSplitVector(VecOp,
Lo,
Hi);
4401 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4402 auto [EVLLo, EVLHi] =
4404 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4410 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4412 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4413 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4416SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_HISTOGRAM(
SDNode *
N) {
4427 SDValue IndexLo, IndexHi, MaskLo, MaskHi;
4428 std::tie(IndexLo, IndexHi) = DAG.SplitVector(HG->
getIndex(),
DL);
4429 std::tie(MaskLo, MaskHi) = DAG.SplitVector(HG->
getMask(),
DL);
4431 SDValue Lo = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT,
DL,
4432 OpsLo, MMO, IndexType);
4433 SDValue OpsHi[] = {
Lo, Inc, MaskHi,
Ptr, IndexHi, Scale, IntID};
4434 return DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT,
DL, OpsHi,
4442void DAGTypeLegalizer::ReplaceOtherWidenResults(
SDNode *
N,
SDNode *WidenNode,
4443 unsigned WidenResNo) {
4444 unsigned NumResults =
N->getNumValues();
4445 for (
unsigned ResNo = 0; ResNo < NumResults; ResNo++) {
4446 if (ResNo == WidenResNo)
4448 EVT ResVT =
N->getValueType(ResNo);
4455 DAG.getVectorIdxConstant(0,
DL));
4456 ReplaceValueWith(
SDValue(
N, ResNo), ResVal);
4461void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4462 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4465 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4470 auto unrollExpandedOp = [&]() {
4475 EVT VT =
N->getValueType(0);
4480 if (
N->getNumValues() > 1)
4481 ReplaceOtherWidenResults(
N, Res.
getNode(), ResNo);
4487 switch (
N->getOpcode()) {
4490 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4498 Res = WidenVecRes_ADDRSPACECAST(
N);
4505 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4509 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4513 case ISD::EXPERIMENTAL_VP_SPLAT:
4514 Res = WidenVecRes_ScalarOp(
N);
4519 case ISD::VP_SELECT:
4521 Res = WidenVecRes_Select(
N);
4525 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4526 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4528 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4531 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4533 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4534 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4537 Res = WidenVecRes_VECTOR_COMPRESS(
N);
4540 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4543 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4545 case ISD::VP_GATHER:
4546 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4549 Res = WidenVecRes_VECTOR_REVERSE(
N);
4559 case ISD::OR:
case ISD::VP_OR:
4567 case ISD::VP_FMINNUM:
4570 case ISD::VP_FMAXNUM:
4572 case ISD::VP_FMINIMUM:
4574 case ISD::VP_FMAXIMUM:
4607 case ISD::VP_FCOPYSIGN:
4608 Res = WidenVecRes_Binary(
N);
4613 Res = WidenVecRes_CMP(
N);
4619 if (unrollExpandedOp())
4634 Res = WidenVecRes_BinaryCanTrap(
N);
4643 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4646#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4647 case ISD::STRICT_##DAGN:
4648#include "llvm/IR/ConstrainedOps.def"
4649 Res = WidenVecRes_StrictFP(
N);
4658 Res = WidenVecRes_OverflowOp(
N, ResNo);
4662 Res = WidenVecRes_FCOPYSIGN(
N);
4667 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4672 if (!unrollExpandedOp())
4673 Res = WidenVecRes_ExpOp(
N);
4679 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4684 case ISD::VP_FP_EXTEND:
4686 case ISD::VP_FP_ROUND:
4688 case ISD::VP_FP_TO_SINT:
4690 case ISD::VP_FP_TO_UINT:
4692 case ISD::VP_SIGN_EXTEND:
4694 case ISD::VP_SINT_TO_FP:
4695 case ISD::VP_TRUNCATE:
4698 case ISD::VP_UINT_TO_FP:
4700 case ISD::VP_ZERO_EXTEND:
4701 Res = WidenVecRes_Convert(
N);
4706 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4712 case ISD::VP_LLRINT:
4715 Res = WidenVecRes_XROUND(
N);
4741 if (unrollExpandedOp())
4751 case ISD::VP_BITREVERSE:
4757 case ISD::VP_CTLZ_ZERO_UNDEF:
4763 case ISD::VP_CTTZ_ZERO_UNDEF:
4768 case ISD::VP_FFLOOR:
4770 case ISD::VP_FNEARBYINT:
4771 case ISD::VP_FROUND:
4772 case ISD::VP_FROUNDEVEN:
4773 case ISD::VP_FROUNDTOZERO:
4777 Res = WidenVecRes_Unary(
N);
4784 Res = WidenVecRes_Ternary(
N);
4788 if (!unrollExpandedOp())
4789 Res = WidenVecRes_UnaryOpWithTwoResults(
N, ResNo);
4796 SetWidenedVector(
SDValue(
N, ResNo), Res);
4803 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4804 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4805 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4806 if (
N->getNumOperands() == 3)
4807 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4809 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4810 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4814 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4815 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4822 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4823 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4824 if (
N->getNumOperands() == 2)
4825 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4828 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4829 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4833 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4834 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4843 EVT OpVT =
LHS.getValueType();
4845 LHS = GetWidenedVector(LHS);
4846 RHS = GetWidenedVector(RHS);
4847 OpVT =
LHS.getValueType();
4853 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4859SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4863 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4864 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4866 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4875 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4878 if (ConcatEnd == 1) {
4879 VT = ConcatOps[0].getValueType();
4881 return ConcatOps[0];
4884 SDLoc dl(ConcatOps[0]);
4891 while (ConcatOps[ConcatEnd-1].
getValueType() != MaxVT) {
4892 int Idx = ConcatEnd - 1;
4893 VT = ConcatOps[
Idx--].getValueType();
4907 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4908 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4912 ConcatOps[
Idx+1] = VecOp;
4913 ConcatEnd =
Idx + 2;
4919 unsigned RealVals = ConcatEnd -
Idx - 1;
4920 unsigned SubConcatEnd = 0;
4921 unsigned SubConcatIdx =
Idx + 1;
4922 while (SubConcatEnd < RealVals)
4923 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4924 while (SubConcatEnd < OpsToConcat)
4925 SubConcatOps[SubConcatEnd++] = undefVec;
4927 NextVT, SubConcatOps);
4928 ConcatEnd = SubConcatIdx + 1;
4933 if (ConcatEnd == 1) {
4934 VT = ConcatOps[0].getValueType();
4936 return ConcatOps[0];
4941 if (NumOps != ConcatEnd ) {
4943 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4944 ConcatOps[j] = UndefVal;
4952 unsigned Opcode =
N->getOpcode();
4960 NumElts = NumElts / 2;
4964 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4966 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4967 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4968 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4980 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4981 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4982 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
4985 N->getValueType(0).getVectorElementCount());
4986 return DAG.
getNode(*VPOpcode, dl, WidenVT, InOp1, InOp2, Mask, EVL,
5000 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5001 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
5002 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
5005 unsigned ConcatEnd = 0;
5013 while (CurNumElts != 0) {
5014 while (CurNumElts >= NumElts) {
5016 DAG.getVectorIdxConstant(
Idx, dl));
5018 DAG.getVectorIdxConstant(
Idx, dl));
5019 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
5021 CurNumElts -= NumElts;
5024 NumElts = NumElts / 2;
5029 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
5031 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
5033 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
5034 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
5045 switch (
N->getOpcode()) {
5048 return WidenVecRes_STRICT_FSETCC(
N);
5055 return WidenVecRes_Convert_StrictFP(
N);
5061 unsigned NumOpers =
N->getNumOperands();
5062 unsigned Opcode =
N->getOpcode();
5069 NumElts = NumElts / 2;
5080 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
5084 unsigned ConcatEnd = 0;
5091 for (
unsigned i = 1; i < NumOpers; ++i) {
5097 Oper = GetWidenedVector(Oper);
5103 DAG.getUNDEF(WideOpVT), Oper,
5104 DAG.getVectorIdxConstant(0, dl));
5116 while (CurNumElts != 0) {
5117 while (CurNumElts >= NumElts) {
5120 for (
unsigned i = 0; i < NumOpers; ++i) {
5123 EVT OpVT =
Op.getValueType();
5129 DAG.getVectorIdxConstant(
Idx, dl));
5135 EVT OperVT[] = {VT, MVT::Other};
5137 ConcatOps[ConcatEnd++] = Oper;
5140 CurNumElts -= NumElts;
5143 NumElts = NumElts / 2;
5148 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
5151 for (
unsigned i = 0; i < NumOpers; ++i) {
5154 EVT OpVT =
Op.getValueType();
5158 DAG.getVectorIdxConstant(
Idx, dl));
5163 EVT WidenVT[] = {WidenEltVT, MVT::Other};
5165 ConcatOps[ConcatEnd++] = Oper;
5174 if (Chains.
size() == 1)
5175 NewChain = Chains[0];
5178 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5183SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
5185 EVT ResVT =
N->getValueType(0);
5186 EVT OvVT =
N->getValueType(1);
5187 EVT WideResVT, WideOvVT;
5197 WideLHS = GetWidenedVector(
N->getOperand(0));
5198 WideRHS = GetWidenedVector(
N->getOperand(1));
5208 N->getOperand(0), Zero);
5211 N->getOperand(1), Zero);
5214 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
5215 SDNode *WideNode = DAG.getNode(
5216 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
5219 unsigned OtherNo = 1 - ResNo;
5220 EVT OtherVT =
N->getValueType(OtherNo);
5227 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
5230 return SDValue(WideNode, ResNo);
5243 unsigned Opcode =
N->getOpcode();
5252 InOp = ZExtPromotedInteger(InOp);
5263 InOp = GetWidenedVector(
N->getOperand(0));
5266 if (InVTEC == WidenEC) {
5267 if (
N->getNumOperands() == 1)
5268 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Flags);
5269 if (
N->getNumOperands() == 3) {
5270 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5273 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5275 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5298 unsigned NumConcat =
5303 if (
N->getNumOperands() == 1)
5304 return DAG.getNode(Opcode,
DL, WidenVT, InVec, Flags);
5305 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5310 DAG.getVectorIdxConstant(0,
DL));
5312 if (
N->getNumOperands() == 1)
5313 return DAG.getNode(Opcode,
DL, WidenVT, InVal, Flags);
5314 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5323 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5324 for (
unsigned i=0; i < MinElts; ++i) {
5326 DAG.getVectorIdxConstant(i,
DL));
5327 if (
N->getNumOperands() == 1)
5328 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val, Flags);
5330 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5333 return DAG.getBuildVector(WidenVT,
DL, Ops);
5342 EVT SrcVT = Src.getValueType();
5346 Src = GetWidenedVector(Src);
5347 SrcVT = Src.getValueType();
5354 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5363 EVT SrcVT = Src.getValueType();
5367 Src = GetWidenedVector(Src);
5368 SrcVT = Src.getValueType();
5375 if (
N->getNumOperands() == 1)
5376 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5378 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5379 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5383 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5386SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5397 unsigned Opcode =
N->getOpcode();
5403 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5408 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5409 for (
unsigned i=0; i < MinElts; ++i) {
5411 DAG.getVectorIdxConstant(i,
DL));
5412 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5416 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5418 return DAG.getBuildVector(WidenVT,
DL, Ops);
5421SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5422 unsigned Opcode =
N->getOpcode();
5435 InOp = GetWidenedVector(InOp);
5442 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5449 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5451 DAG.getVectorIdxConstant(i,
DL));
5468 while (Ops.
size() != WidenNumElts)
5471 return DAG.getBuildVector(WidenVT,
DL, Ops);
5477 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5478 return WidenVecRes_BinaryCanTrap(
N);
5488SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5489 SDValue FpValue =
N->getOperand(0);
5493 SDValue Arg = GetWidenedVector(FpValue);
5494 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5500 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5502 EVT ExpVT =
RHS.getValueType();
5507 ExpOp = ModifyToType(RHS, WideExpVT);
5510 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5516 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5517 if (
N->getNumOperands() == 1)
5518 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5520 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5521 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5525 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5526 {InOp,
Mask,
N->getOperand(2)});
5532 cast<VTSDNode>(
N->getOperand(1))->getVT()
5533 .getVectorElementType(),
5535 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5536 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5537 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5540SDValue DAGTypeLegalizer::WidenVecRes_UnaryOpWithTwoResults(
SDNode *
N,
5542 EVT VT0 =
N->getValueType(0);
5543 EVT VT1 =
N->getValueType(1);
5547 "expected both results to be vectors of matching element count");
5550 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5559 DAG.getNode(
N->getOpcode(),
SDLoc(
N), {WidenVT0, WidenVT1}, InOp)
5562 ReplaceOtherWidenResults(
N, WidenNode, ResNo);
5563 return SDValue(WidenNode, ResNo);
5566SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5567 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5568 return GetWidenedVector(WidenVec);
5573 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5574 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5576 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5577 AddrSpaceCastN->getSrcAddressSpace(),
5578 AddrSpaceCastN->getDestAddressSpace());
5584 EVT VT =
N->getValueType(0);
5588 switch (getTypeAction(InVT)) {
5602 SDValue NInOp = GetPromotedInteger(InOp);
5604 if (WidenVT.
bitsEq(NInVT)) {
5607 if (DAG.getDataLayout().isBigEndian()) {
5612 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5631 InOp = GetWidenedVector(InOp);
5633 if (WidenVT.
bitsEq(InVT))
5643 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5648 unsigned NewNumParts = WidenSize / InSize;
5661 EVT OrigInVT =
N->getOperand(0).getValueType();
5674 if (WidenSize % InSize == 0) {
5681 DAG.ExtractVectorElements(InOp, Ops);
5682 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5694 return CreateStackStoreLoad(InOp, WidenVT);
5700 EVT VT =
N->getValueType(0);
5704 EVT EltVT =
N->getOperand(0).getValueType();
5711 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5712 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5714 return DAG.getBuildVector(WidenVT, dl, NewOps);
5718 EVT InVT =
N->getOperand(0).getValueType();
5721 unsigned NumOperands =
N->getNumOperands();
5723 bool InputWidened =
false;
5727 if (WidenNumElts % NumInElts == 0) {
5729 unsigned NumConcat = WidenNumElts / NumInElts;
5730 SDValue UndefVal = DAG.getUNDEF(InVT);
5732 for (
unsigned i=0; i < NumOperands; ++i)
5733 Ops[i] =
N->getOperand(i);
5734 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5739 InputWidened =
true;
5743 for (i=1; i < NumOperands; ++i)
5744 if (!
N->getOperand(i).isUndef())
5747 if (i == NumOperands)
5750 return GetWidenedVector(
N->getOperand(0));
5752 if (NumOperands == 2) {
5754 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5760 for (
unsigned i = 0; i < NumInElts; ++i) {
5762 MaskOps[i + NumInElts] = i + WidenNumElts;
5764 return DAG.getVectorShuffle(WidenVT, dl,
5765 GetWidenedVector(
N->getOperand(0)),
5766 GetWidenedVector(
N->getOperand(1)),
5773 "Cannot use build vectors to widen CONCAT_VECTOR result");
5781 for (
unsigned i=0; i < NumOperands; ++i) {
5784 InOp = GetWidenedVector(InOp);
5785 for (
unsigned j = 0;
j < NumInElts; ++
j)
5787 DAG.getVectorIdxConstant(j, dl));
5789 SDValue UndefVal = DAG.getUNDEF(EltVT);
5790 for (;
Idx < WidenNumElts; ++
Idx)
5791 Ops[
Idx] = UndefVal;
5792 return DAG.getBuildVector(WidenVT, dl, Ops);
5795SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5796 EVT VT =
N->getValueType(0);
5798 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5805SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5806 EVT VT =
N->getValueType(0);
5813 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5815 InOp = GetWidenedVector(InOp);
5821 if (IdxVal == 0 && InVT == WidenVT)
5828 assert(IdxVal % VTNumElts == 0 &&
5829 "Expected Idx to be a multiple of subvector minimum vector length");
5830 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5843 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5844 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5845 "down type's element count");
5852 for (;
I < VTNumElts / GCD; ++
I)
5855 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5856 for (;
I < WidenNumElts / GCD; ++
I)
5863 "EXTRACT_SUBVECTOR for scalable vectors");
5870 for (i = 0; i < VTNumElts; ++i)
5872 DAG.getVectorIdxConstant(IdxVal + i, dl));
5874 SDValue UndefVal = DAG.getUNDEF(EltVT);
5875 for (; i < WidenNumElts; ++i)
5877 return DAG.getBuildVector(WidenVT, dl, Ops);
5888SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5889 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5892 N->getOperand(1),
N->getOperand(2));
5905 if (!
LD->getMemoryVT().isByteSized()) {
5909 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5918 EVT LdVT =
LD->getMemoryVT();
5931 LD->getChain(),
LD->getBasePtr(),
LD->getOffset(), Mask,
5932 EVL,
LD->getMemoryVT(),
LD->getMemOperand());
5944 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5946 Result = GenWidenVectorLoads(LdChain, LD);
5953 if (LdChain.
size() == 1)
5954 NewChain = LdChain[0];
5960 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5971 SDValue EVL =
N->getVectorLength();
5978 "Unable to widen binary VP op");
5979 Mask = GetWidenedVector(Mask);
5980 assert(
Mask.getValueType().getVectorElementCount() ==
5983 "Unable to widen vector load");
5986 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5987 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5988 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
6002 "Unable to widen VP strided load");
6003 Mask = GetWidenedVector(Mask);
6006 assert(
Mask.getValueType().getVectorElementCount() ==
6008 "Data and mask vectors should have the same number of elements");
6010 SDValue Res = DAG.getStridedLoadVP(
6011 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
6012 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
6013 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
6014 N->isExpandingLoad());
6022SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(
SDNode *
N) {
6025 SDValue Passthru =
N->getOperand(2);
6029 Mask.getValueType().getVectorElementType(),
6032 SDValue WideVec = ModifyToType(Vec, WideVecVT);
6033 SDValue WideMask = ModifyToType(Mask, WideMaskVT,
true);
6034 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT);
6036 WideMask, WidePassthru);
6043 EVT MaskVT =
Mask.getValueType();
6044 SDValue PassThru = GetWidenedVector(
N->getPassThru());
6052 Mask = ModifyToType(Mask, WideMaskVT,
true);
6054 SDValue Res = DAG.getMaskedLoad(
6055 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
6056 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
6057 ExtType,
N->isExpandingLoad());
6068 EVT MaskVT =
Mask.getValueType();
6069 SDValue PassThru = GetWidenedVector(
N->getPassThru());
6078 Mask = ModifyToType(Mask, WideMaskVT,
true);
6083 Index.getValueType().getScalarType(),
6085 Index = ModifyToType(Index, WideIndexVT);
6091 N->getMemoryVT().getScalarType(), NumElts);
6092 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
6093 WideMemVT, dl, Ops,
N->getMemOperand(),
6094 N->getIndexType(),
N->getExtensionType());
6111 N->getMemoryVT().getScalarType(), WideEC);
6112 Mask = GetWidenedMask(Mask, WideEC);
6115 Mask,
N->getVectorLength()};
6116 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
6117 dl, Ops,
N->getMemOperand(),
N->getIndexType());
6127 if (
N->isVPOpcode())
6128 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0),
6129 N->getOperand(1),
N->getOperand(2));
6130 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
6158 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
6159 return N->getOperand(OpNo).getValueType();
6167 N =
N.getOperand(0);
6169 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
6170 if (!
N->getOperand(i)->isUndef())
6172 N =
N.getOperand(0);
6176 N =
N.getOperand(0);
6178 N =
N.getOperand(0);
6205 { MaskVT, MVT::Other }, Ops);
6206 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
6216 if (MaskScalarBits < ToMaskScalBits) {
6220 }
else if (MaskScalarBits > ToMaskScalBits) {
6226 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
6228 "Mask should have the right element size by now.");
6231 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
6233 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
6238 EVT SubVT =
Mask->getValueType(0);
6244 assert((
Mask->getValueType(0) == ToMaskVT) &&
6245 "A mask of ToMaskVT should have been produced by now.");
6266 EVT CondVT =
Cond->getValueType(0);
6270 EVT VSelVT =
N->getValueType(0);
6282 EVT FinalVT = VSelVT;
6294 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6312 EVT ToMaskVT = VSelVT;
6319 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6335 if (ScalarBits0 != ScalarBits1) {
6336 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6337 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6349 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6350 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6354 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6367 unsigned Opcode =
N->getOpcode();
6369 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6370 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6371 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6373 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6379 Cond1 = GetWidenedVector(Cond1);
6387 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6388 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6393 Cond1 = ModifyToType(Cond1, CondWidenVT);
6396 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6397 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6399 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6400 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6402 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6406 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6407 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6410 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6415 return DAG.getUNDEF(WidenVT);
6419 EVT VT =
N->getValueType(0);
6426 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6427 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6431 for (
unsigned i = 0; i != NumElts; ++i) {
6432 int Idx =
N->getMaskElt(i);
6433 if (
Idx < (
int)NumElts)
6436 NewMask[i] =
Idx - NumElts + WidenNumElts;
6438 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6442 EVT VT =
N->getValueType(0);
6447 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6453 unsigned IdxVal = WidenNumElts - VTNumElts;
6466 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6469 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6470 "down type's element count");
6473 for (; i < VTNumElts / GCD; ++i)
6476 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6477 for (; i < WidenNumElts / GCD; ++i)
6486 std::iota(
Mask.begin(),
Mask.begin() + VTNumElts, IdxVal);
6488 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6493 assert(
N->getValueType(0).isVector() &&
6494 N->getOperand(0).getValueType().isVector() &&
6495 "Operands must be vectors");
6509 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6510 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6517 InOp1 = GetWidenedVector(InOp1);
6518 InOp2 = GetWidenedVector(InOp2);
6520 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6521 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6528 "Input not widened to expected type!");
6530 if (
N->getOpcode() == ISD::VP_SETCC) {
6533 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6534 N->getOperand(2), Mask,
N->getOperand(4));
6541 assert(
N->getValueType(0).isVector() &&
6542 N->getOperand(1).getValueType().isVector() &&
6543 "Operands must be vectors");
6544 EVT VT =
N->getValueType(0);
6555 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6560 for (
unsigned i = 0; i != NumElts; ++i) {
6562 DAG.getVectorIdxConstant(i, dl));
6564 DAG.getVectorIdxConstant(i, dl));
6566 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6567 {Chain, LHSElem, RHSElem, CC});
6568 Chains[i] = Scalars[i].getValue(1);
6569 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6570 DAG.getBoolConstant(
true, dl, EltVT, VT),
6571 DAG.getBoolConstant(
false, dl, EltVT, VT));
6575 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6577 return DAG.getBuildVector(WidenVT, dl, Scalars);
6583bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6584 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6588 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6591 switch (
N->getOpcode()) {
6594 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6602 Res = WidenVecOp_FAKE_USE(
N);
6608 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6609 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6610 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6611 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6616 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6618 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6619 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6621 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6622 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6632 Res = WidenVecOp_UnrollVectorOp(
N);
6639 Res = WidenVecOp_EXTEND(
N);
6644 Res = WidenVecOp_CMP(
N);
6660 Res = WidenVecOp_Convert(
N);
6665 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6668 case ISD::EXPERIMENTAL_VP_SPLAT:
6669 Res = WidenVecOp_VP_SPLAT(
N, OpNo);
6687 Res = WidenVecOp_VECREDUCE(
N);
6691 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6693 case ISD::VP_REDUCE_FADD:
6694 case ISD::VP_REDUCE_SEQ_FADD:
6695 case ISD::VP_REDUCE_FMUL:
6696 case ISD::VP_REDUCE_SEQ_FMUL:
6697 case ISD::VP_REDUCE_ADD:
6698 case ISD::VP_REDUCE_MUL:
6699 case ISD::VP_REDUCE_AND:
6700 case ISD::VP_REDUCE_OR:
6701 case ISD::VP_REDUCE_XOR:
6702 case ISD::VP_REDUCE_SMAX:
6703 case ISD::VP_REDUCE_SMIN:
6704 case ISD::VP_REDUCE_UMAX:
6705 case ISD::VP_REDUCE_UMIN:
6706 case ISD::VP_REDUCE_FMAX:
6707 case ISD::VP_REDUCE_FMIN:
6708 case ISD::VP_REDUCE_FMAXIMUM:
6709 case ISD::VP_REDUCE_FMINIMUM:
6710 Res = WidenVecOp_VP_REDUCE(
N);
6712 case ISD::VP_CTTZ_ELTS:
6713 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6714 Res = WidenVecOp_VP_CttzElements(
N);
6719 if (!Res.
getNode())
return false;
6727 if (
N->isStrictFPOpcode())
6729 "Invalid operand expansion");
6732 "Invalid operand expansion");
6734 ReplaceValueWith(
SDValue(
N, 0), Res);
6740 EVT VT =
N->getValueType(0);
6745 "Unexpected type action");
6746 InOp = GetWidenedVector(InOp);
6749 "Input wasn't widened!");
6760 FixedEltVT == InEltVT) {
6762 "Not enough elements in the fixed type for the operand!");
6764 "We can't have the same type as we started with!");
6767 DAG.getUNDEF(FixedVT), InOp,
6768 DAG.getVectorIdxConstant(0,
DL));
6771 DAG.getVectorIdxConstant(0,
DL));
6780 return WidenVecOp_Convert(
N);
6785 switch (
N->getOpcode()) {
6800 EVT OpVT =
N->getOperand(0).getValueType();
6801 EVT ResVT =
N->getValueType(0);
6809 DAG.getVectorIdxConstant(0, dl));
6811 DAG.getVectorIdxConstant(0, dl));
6817 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6818 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6820 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6827 return DAG.UnrollVectorOp(
N);
6832 EVT ResultVT =
N->getValueType(0);
6834 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6843 {WideArg,
Test},
N->getFlags());
6850 DAG.getVectorIdxConstant(0,
DL));
6852 EVT OpVT =
N->getOperand(0).getValueType();
6855 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6860 EVT VT =
N->getValueType(0);
6863 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6866 "Unexpected type action");
6867 InOp = GetWidenedVector(InOp);
6869 unsigned Opcode =
N->getOpcode();
6875 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6877 if (
N->isStrictFPOpcode()) {
6879 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6880 {
N->getOperand(0), InOp,
N->getOperand(2) });
6882 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6883 {
N->getOperand(0), InOp });
6889 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6891 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6894 DAG.getVectorIdxConstant(0, dl));
6902 if (
N->isStrictFPOpcode()) {
6905 for (
unsigned i=0; i < NumElts; ++i) {
6907 DAG.getVectorIdxConstant(i, dl));
6908 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6912 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6914 for (
unsigned i = 0; i < NumElts; ++i)
6915 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6917 InOp, DAG.getVectorIdxConstant(i, dl)));
6920 return DAG.getBuildVector(VT, dl, Ops);
6924 EVT DstVT =
N->getValueType(0);
6925 SDValue Src = GetWidenedVector(
N->getOperand(0));
6926 EVT SrcVT = Src.getValueType();
6935 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6938 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6942 return DAG.UnrollVectorOp(
N);
6946 EVT VT =
N->getValueType(0);
6947 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6955 if (!VT.
isVector() && VT != MVT::x86mmx &&
6962 DAG.getVectorIdxConstant(0, dl));
6976 .divideCoefficientBy(EltSize);
6981 DAG.getVectorIdxConstant(0, dl));
6986 return CreateStackStoreLoad(InOp, VT);
6994 SDValue WidenedOp = GetWidenedVector(
N->getOperand(1));
7000 EVT VT =
N->getValueType(0);
7002 EVT InVT =
N->getOperand(0).getValueType();
7007 unsigned NumOperands =
N->getNumOperands();
7010 for (i = 1; i < NumOperands; ++i)
7011 if (!
N->getOperand(i).isUndef())
7014 if (i == NumOperands)
7015 return GetWidenedVector(
N->getOperand(0));
7025 for (
unsigned i=0; i < NumOperands; ++i) {
7029 "Unexpected type action");
7030 InOp = GetWidenedVector(InOp);
7031 for (
unsigned j = 0;
j < NumInElts; ++
j)
7033 DAG.getVectorIdxConstant(j, dl));
7035 return DAG.getBuildVector(VT, dl, Ops);
7038SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
7039 EVT VT =
N->getValueType(0);
7044 SubVec = GetWidenedVector(SubVec);
7050 bool IndicesValid =
false;
7053 IndicesValid =
true;
7057 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
7058 Attribute::VScaleRange);
7063 IndicesValid =
true;
7069 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
7074 "INSERT_SUBVECTOR");
7077SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
7078 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7080 N->getValueType(0), InOp,
N->getOperand(1));
7083SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
7084 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7086 N->getValueType(0), InOp,
N->getOperand(1));
7089SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
7090 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7091 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
7099 if (!
ST->getMemoryVT().getScalarType().isByteSized())
7102 if (
ST->isTruncatingStore())
7121 StVal = GetWidenedVector(StVal);
7125 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
7126 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
7127 EVL, StVT,
ST->getMemOperand(),
7128 ST->getAddressingMode());
7132 if (GenWidenVectorStores(StChain, ST)) {
7133 if (StChain.
size() == 1)
7142SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(
SDNode *
N,
unsigned OpNo) {
7143 assert(OpNo == 1 &&
"Can widen only mask operand of vp_splat");
7144 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0),
7145 N->getOperand(0), GetWidenedVector(
N->getOperand(1)),
7149SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
7150 assert((OpNo == 1 || OpNo == 3) &&
7151 "Can widen only data or mask operand of vp_store");
7159 StVal = GetWidenedVector(StVal);
7165 "Unable to widen VP store");
7166 Mask = GetWidenedVector(Mask);
7168 Mask = GetWidenedVector(Mask);
7174 "Unable to widen VP store");
7175 StVal = GetWidenedVector(StVal);
7178 assert(
Mask.getValueType().getVectorElementCount() ==
7180 "Mask and data vectors should have the same number of elements");
7181 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
7182 ST->getOffset(), Mask,
ST->getVectorLength(),
7183 ST->getMemoryVT(),
ST->getMemOperand(),
7184 ST->getAddressingMode(),
ST->isTruncatingStore(),
7185 ST->isCompressingStore());
7190 assert((OpNo == 1 || OpNo == 4) &&
7191 "Can widen only data or mask operand of vp_strided_store");
7200 "Unable to widen VP strided store");
7204 "Unable to widen VP strided store");
7206 StVal = GetWidenedVector(StVal);
7207 Mask = GetWidenedVector(Mask);
7210 Mask.getValueType().getVectorElementCount() &&
7211 "Data and mask vectors should have the same number of elements");
7213 return DAG.getStridedStoreVP(
7220SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
7221 assert((OpNo == 1 || OpNo == 4) &&
7222 "Can widen only data or mask operand of mstore");
7225 EVT MaskVT =
Mask.getValueType();
7231 StVal = GetWidenedVector(StVal);
7238 Mask = ModifyToType(Mask, WideMaskVT,
true);
7242 Mask = ModifyToType(Mask, WideMaskVT,
true);
7248 StVal = ModifyToType(StVal, WideVT);
7251 assert(
Mask.getValueType().getVectorNumElements() ==
7253 "Mask and data vectors should have the same number of elements");
7260SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
7261 assert(OpNo == 4 &&
"Can widen only the index of mgather");
7262 auto *MG = cast<MaskedGatherSDNode>(
N);
7263 SDValue DataOp = MG->getPassThru();
7265 SDValue Scale = MG->getScale();
7273 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
7274 MG->getMemOperand(), MG->getIndexType(),
7275 MG->getExtensionType());
7281SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
7290 DataOp = GetWidenedVector(DataOp);
7294 EVT IndexVT =
Index.getValueType();
7297 Index = ModifyToType(Index, WideIndexVT);
7300 EVT MaskVT =
Mask.getValueType();
7303 Mask = ModifyToType(Mask, WideMaskVT,
true);
7308 }
else if (OpNo == 4) {
7310 Index = GetWidenedVector(Index);
7316 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
7321SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
7330 DataOp = GetWidenedVector(DataOp);
7331 Index = GetWidenedVector(Index);
7333 Mask = GetWidenedMask(Mask, WideEC);
7336 }
else if (OpNo == 3) {
7338 Index = GetWidenedVector(Index);
7345 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7350 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7351 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7353 EVT VT =
N->getValueType(0);
7368 SVT, InOp0, InOp1,
N->getOperand(2));
7375 DAG.getVectorIdxConstant(0, dl));
7377 EVT OpVT =
N->getOperand(0).getValueType();
7380 return DAG.getNode(ExtendCode, dl, VT,
CC);
7390 EVT VT =
N->getValueType(0);
7392 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7399 for (
unsigned i = 0; i != NumElts; ++i) {
7401 DAG.getVectorIdxConstant(i, dl));
7403 DAG.getVectorIdxConstant(i, dl));
7405 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7406 {Chain, LHSElem, RHSElem, CC});
7407 Chains[i] = Scalars[i].getValue(1);
7408 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7409 DAG.getBoolConstant(
true, dl, EltVT, VT),
7410 DAG.getBoolConstant(
false, dl, EltVT, VT));
7414 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7416 return DAG.getBuildVector(VT, dl, Scalars);
7440 SDValue Op = GetWidenedVector(
N->getOperand(0));
7441 EVT VT =
N->getValueType(0);
7442 EVT OrigVT =
N->getOperand(0).getValueType();
7443 EVT WideVT =
Op.getValueType();
7447 unsigned Opc =
N->getOpcode();
7449 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7450 assert(NeutralElem &&
"Neutral element must exist");
7464 assert(Start.getValueType() == VT);
7467 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7474 unsigned GCD = std::gcd(OrigElts, WideElts);
7477 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7478 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7480 DAG.getVectorIdxConstant(
Idx, dl));
7481 return DAG.getNode(Opc, dl, VT,
Op, Flags);
7484 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7486 DAG.getVectorIdxConstant(
Idx, dl));
7488 return DAG.getNode(Opc, dl, VT,
Op, Flags);
7497 EVT VT =
N->getValueType(0);
7499 EVT WideVT =
Op.getValueType();
7503 unsigned Opc =
N->getOpcode();
7505 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7518 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7525 unsigned GCD = std::gcd(OrigElts, WideElts);
7528 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7529 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7531 DAG.getVectorIdxConstant(
Idx, dl));
7532 return DAG.getNode(Opc, dl, VT, AccOp,
Op, Flags);
7535 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7537 DAG.getVectorIdxConstant(
Idx, dl));
7539 return DAG.getNode(Opc, dl, VT, AccOp,
Op, Flags);
7543 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7546 SDValue Op = GetWidenedVector(
N->getOperand(1));
7548 Op.getValueType().getVectorElementCount());
7550 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7551 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7559 EVT VT =
N->getValueType(0);
7570 DAG.getVectorIdxConstant(0,
DL));
7580 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7581 {Source, Mask, N->getOperand(2)},
N->getFlags());
7598 unsigned WidenEx = 0) {
7603 unsigned AlignInBits =
Align*8;
7605 EVT RetVT = WidenEltVT;
7610 if (Width == WidenEltWidth)
7621 (WidenWidth % MemVTWidth) == 0 &&
7623 (MemVTWidth <= Width ||
7624 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7625 if (MemVTWidth == WidenWidth)
7644 (WidenWidth % MemVTWidth) == 0 &&
7646 (MemVTWidth <= Width ||
7647 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7656 return std::nullopt;
7667 unsigned Start,
unsigned End) {
7668 SDLoc dl(LdOps[Start]);
7669 EVT LdTy = LdOps[Start].getValueType();
7677 for (
unsigned i = Start + 1; i !=
End; ++i) {
7678 EVT NewLdTy = LdOps[i].getValueType();
7679 if (NewLdTy != LdTy) {
7700 EVT LdVT =
LD->getMemoryVT();
7714 TypeSize WidthDiff = WidenWidth - LdWidth;
7721 std::optional<EVT> FirstVT =
7722 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7729 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7734 std::optional<EVT> NewVT = FirstVT;
7736 TypeSize NewVTWidth = FirstVTWidth;
7738 RemainingWidth -= NewVTWidth;
7745 NewVTWidth = NewVT->getSizeInBits();
7751 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7752 LD->getOriginalAlign(), MMOFlags, AAInfo);
7756 if (MemVTs.
empty()) {
7758 if (!FirstVT->isVector()) {
7765 if (FirstVT == WidenVT)
7770 unsigned NumConcat =
7773 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7774 ConcatOps[0] = LdOp;
7775 for (
unsigned i = 1; i != NumConcat; ++i)
7776 ConcatOps[i] = UndefVal;
7788 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7791 for (
EVT MemVT : MemVTs) {
7792 Align NewAlign = ScaledOffset == 0
7793 ?
LD->getOriginalAlign()
7796 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7800 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7815 EVT LdTy = LdOps[i].getValueType();
7818 for (--i; i >= 0; --i) {
7819 LdTy = LdOps[i].getValueType();
7826 ConcatOps[--
Idx] = LdOps[i];
7827 for (--i; i >= 0; --i) {
7828 EVT NewLdTy = LdOps[i].getValueType();
7829 if (NewLdTy != LdTy) {
7840 WidenOps[j] = ConcatOps[
Idx+j];
7841 for (;
j != NumOps; ++
j)
7842 WidenOps[j] = DAG.getUNDEF(LdTy);
7849 ConcatOps[--
Idx] = LdOps[i];
7860 SDValue UndefVal = DAG.getUNDEF(LdTy);
7863 for (; i !=
End-
Idx; ++i)
7864 WidenOps[i] = ConcatOps[
Idx+i];
7865 for (; i != NumOps; ++i)
7866 WidenOps[i] = UndefVal;
7878 EVT LdVT =
LD->getMemoryVT();
7891 "not yet supported");
7902 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7903 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7909 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7910 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7911 LD->getOriginalAlign(), MMOFlags, AAInfo);
7916 SDValue UndefVal = DAG.getUNDEF(EltVT);
7917 for (; i != WidenNumElts; ++i)
7920 return DAG.getBuildVector(WidenVT, dl, Ops);
7932 SDValue ValOp = GetWidenedVector(
ST->getValue());
7935 EVT StVT =
ST->getMemoryVT();
7943 "Mismatch between store and value types");
7957 std::optional<EVT> NewVT =
7962 TypeSize NewVTWidth = NewVT->getSizeInBits();
7965 StWidth -= NewVTWidth;
7966 MemVTs.
back().second++;
7970 for (
const auto &Pair : MemVTs) {
7971 EVT NewVT = Pair.first;
7972 unsigned Count = Pair.second;
7978 Align NewAlign = ScaledOffset == 0
7979 ?
ST->getOriginalAlign()
7982 DAG.getVectorIdxConstant(
Idx, dl));
7983 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7988 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
8000 DAG.getVectorIdxConstant(
Idx++, dl));
8002 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
8006 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
8020 bool FillWithZeroes) {
8025 "input and widen element type must match");
8027 "cannot modify scalable vectors in this way");
8039 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
8042 for (
unsigned i = 1; i != NumConcat; ++i)
8050 DAG.getVectorIdxConstant(0, dl));
8053 "Scalable vectors should have been handled already.");
8061 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
8065 DAG.getVectorIdxConstant(
Idx, dl));
8067 SDValue UndefVal = DAG.getUNDEF(EltVT);
8068 for (;
Idx < WidenNumElts; ++
Idx)
8069 Ops[
Idx] = UndefVal;
8071 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
8072 if (!FillWithZeroes)
8076 "We expect to never want to FillWithZeroes for non-integral types.");
8079 MaskOps.
append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
8080 MaskOps.
append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
8082 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
8083 DAG.getBuildVector(NVT, dl, MaskOps));
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static unsigned getExtendForIntVecReduction(SDNode *N)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getInc() const
const SDValue & getScale() const
const SDValue & getMask() const
const SDValue & getIntID() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
ISD::MemIndexType getIndexType() const
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned, bool)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.