35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
127 R = ScalarizeVecRes_UnaryOp(
N);
130 R = ScalarizeVecRes_ADDRSPACECAST(
N);
134 R = ScalarizeVecRes_UnaryOpWithTwoResults(
N, ResNo);
186 R = ScalarizeVecRes_BinOp(
N);
191 R = ScalarizeVecRes_CMP(
N);
197 R = ScalarizeVecRes_TernaryOp(
N);
200#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
201 case ISD::STRICT_##DAGN:
202#include "llvm/IR/ConstrainedOps.def"
203 R = ScalarizeVecRes_StrictFPOp(
N);
208 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
217 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
227 R = ScalarizeVecRes_FIX(
N);
233 SetScalarizedVector(
SDValue(
N, ResNo), R);
237 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
238 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
240 LHS.getValueType(), LHS, RHS,
N->getFlags());
248 if (getTypeAction(
LHS.getValueType()) ==
250 LHS = GetScalarizedVector(LHS);
251 RHS = GetScalarizedVector(RHS);
253 EVT VT =
LHS.getValueType().getVectorElementType();
261 N->getValueType(0).getVectorElementType(), LHS, RHS);
265 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
266 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
267 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
273 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
274 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
281DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithTwoResults(
SDNode *
N,
283 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
284 "Unexpected vector type!");
285 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
287 EVT VT0 =
N->getValueType(0);
288 EVT VT1 =
N->getValueType(1);
293 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
297 unsigned OtherNo = 1 - ResNo;
298 EVT OtherVT =
N->getValueType(OtherNo);
300 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
304 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
307 return SDValue(ScalarNode, ResNo);
311 EVT VT =
N->getValueType(0).getVectorElementType();
312 unsigned NumOpers =
N->getNumOperands();
314 EVT ValueVTs[] = {VT, MVT::Other};
323 for (
unsigned i = 1; i < NumOpers; ++i) {
329 Oper = GetScalarizedVector(Oper);
340 Opers,
N->getFlags());
351 EVT ResVT =
N->getValueType(0);
352 EVT OvVT =
N->getValueType(1);
356 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
357 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
362 ScalarLHS = ElemsLHS[0];
363 ScalarRHS = ElemsRHS[0];
369 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
373 unsigned OtherNo = 1 - ResNo;
374 EVT OtherVT =
N->getValueType(OtherNo);
376 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
380 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
383 return SDValue(ScalarNode, ResNo);
388 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
389 return GetScalarizedVector(
Op);
394 if (
Op.getValueType().isVector()
395 &&
Op.getValueType().getVectorNumElements() == 1
396 && !isSimpleLegalType(
Op.getValueType()))
397 Op = GetScalarizedVector(
Op);
398 EVT NewVT =
N->getValueType(0).getVectorElementType();
403SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
404 EVT EltVT =
N->getValueType(0).getVectorElementType();
413SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
415 N->getValueType(0).getVectorElementType(),
416 N->getOperand(0),
N->getOperand(1));
422 EVT OpVT =
Op.getValueType();
426 Op = GetScalarizedVector(
Op);
433 N->getValueType(0).getVectorElementType(),
Op,
438 SDValue Op = GetScalarizedVector(
N->getOperand(0));
443SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
447 EVT EltVT =
N->getValueType(0).getVectorElementType();
448 if (
Op.getValueType() != EltVT)
455 assert(
N->isUnindexed() &&
"Indexed vector load?");
459 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
460 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
461 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
462 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
472 EVT DestVT =
N->getValueType(0).getVectorElementType();
474 EVT OpVT =
Op.getValueType();
484 Op = GetScalarizedVector(
Op);
494 EVT EltVT =
N->getValueType(0).getVectorElementType();
496 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
505 EVT OpVT =
Op.getValueType();
507 EVT EltVT =
N->getValueType(0).getVectorElementType();
510 Op = GetScalarizedVector(
Op);
516 switch (
N->getOpcode()) {
528SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
529 EVT DestVT =
N->getValueType(0).getVectorElementType();
531 EVT OpVT =
Op.getValueType();
541 Op = GetScalarizedVector(
Op);
547 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
548 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
549 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
553SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
556 EVT EltVT =
N->getValueType(0).getVectorElementType();
565 EVT OpVT =
Cond.getValueType();
578 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
592 EVT OpVT =
Cond->getOperand(0).getValueType();
599 EVT CondVT =
Cond.getValueType();
600 if (ScalarBool != VecBool) {
601 switch (ScalarBool) {
622 auto BoolVT = getSetCCResultType(CondVT);
623 if (BoolVT.bitsLT(CondVT))
628 GetScalarizedVector(
N->getOperand(2)));
632 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
634 LHS.getValueType(),
N->getOperand(0), LHS,
635 GetScalarizedVector(
N->getOperand(2)));
639 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
641 N->getOperand(0),
N->getOperand(1),
642 LHS, GetScalarizedVector(
N->getOperand(3)),
647 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
650SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
652 SDValue Arg =
N->getOperand(2).getOperand(0);
654 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
655 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
656 return GetScalarizedVector(
N->getOperand(
Op));
659SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
661 EVT SrcVT = Src.getValueType();
666 Src = GetScalarizedVector(Src);
672 EVT DstVT =
N->getValueType(0).getVectorElementType();
673 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
677 assert(
N->getValueType(0).isVector() &&
678 N->getOperand(0).getValueType().isVector() &&
679 "Operand types must be vectors");
682 EVT OpVT =
LHS.getValueType();
683 EVT NVT =
N->getValueType(0).getVectorElementType();
688 LHS = GetScalarizedVector(LHS);
689 RHS = GetScalarizedVector(RHS);
705 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
713 EVT ResultVT =
N->getValueType(0).getVectorElementType();
716 Arg = GetScalarizedVector(Arg);
729 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
736bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
741 switch (
N->getOpcode()) {
744 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
751 Res = ScalarizeVecOp_BITCAST(
N);
754 Res = ScalarizeVecOp_FAKE_USE(
N);
768 Res = ScalarizeVecOp_UnaryOp(
N);
774 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
777 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
780 Res = ScalarizeVecOp_INSERT_SUBVECTOR(
N, OpNo);
783 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
786 Res = ScalarizeVecOp_VSELECT(
N);
789 Res = ScalarizeVecOp_VSETCC(
N);
792 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
795 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
798 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
801 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
804 Res = ScalarizeVecOp_FP_EXTEND(
N);
821 Res = ScalarizeVecOp_VECREDUCE(
N);
825 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
829 Res = ScalarizeVecOp_CMP(
N);
834 if (!Res.
getNode())
return false;
842 "Invalid operand expansion");
844 ReplaceValueWith(
SDValue(
N, 0), Res);
851 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
853 N->getValueType(0), Elt);
858 assert(
N->getOperand(1).getValueType().getVectorNumElements() == 1 &&
859 "Fake Use: Unexpected vector type!");
860 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
867 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
868 "Unexpected vector type!");
869 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
871 N->getValueType(0).getScalarType(), Elt);
879SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
880 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
881 "Unexpected vector type!");
882 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
884 {
N->getValueType(0).getScalarType(), MVT::Other },
885 {
N->getOperand(0), Elt });
895 ReplaceValueWith(
SDValue(
N, 0), Res);
900SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
902 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
903 Ops[i] = GetScalarizedVector(
N->getOperand(i));
909SDValue DAGTypeLegalizer::ScalarizeVecOp_INSERT_SUBVECTOR(
SDNode *
N,
913 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
914 SDValue ContainingVec =
N->getOperand(0);
922SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
923 EVT VT =
N->getValueType(0);
924 SDValue Res = GetScalarizedVector(
N->getOperand(0));
936 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
937 EVT VT =
N->getValueType(0);
947 assert(
N->getValueType(0).isVector() &&
948 N->getOperand(0).getValueType().isVector() &&
949 "Operand types must be vectors");
950 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
952 EVT VT =
N->getValueType(0);
953 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
954 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
956 EVT OpVT =
N->getOperand(0).getValueType();
968 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
976 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
977 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
980 if (
N->isTruncatingStore())
982 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
983 N->getBasePtr(),
N->getPointerInfo(),
984 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
985 N->getMemOperand()->getFlags(),
N->getAAInfo());
987 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
988 N->getBasePtr(),
N->getPointerInfo(),
989 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
995SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
996 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
997 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
999 N->getValueType(0).getVectorElementType(), Elt,
1004SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
1006 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
1007 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1011 {
N->getOperand(0), Elt,
N->getOperand(2) });
1020 ReplaceValueWith(
SDValue(
N, 0), Res);
1027 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
1029 N->getValueType(0).getVectorElementType(), Elt);
1035SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
1036 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1040 {
N->getOperand(0), Elt});
1049 ReplaceValueWith(
SDValue(
N, 0), Res);
1054 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1061SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1067 SDValue Op = GetScalarizedVector(VecOp);
1069 AccOp,
Op,
N->getFlags());
1073 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1074 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1076 EVT ResVT =
N->getValueType(0).getVectorElementType();
1089void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1094 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1097 switch (
N->getOpcode()) {
1100 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1112 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1125 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(
N,
Lo,
Hi);
break;
1128 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1131 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1135 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1138 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1140 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1141 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1144 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1147 case ISD::VP_GATHER:
1148 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1151 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
1155 SplitVecRes_SETCC(
N,
Lo,
Hi);
1158 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1161 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1164 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1167 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1170 SplitVecRes_VECTOR_INTERLEAVE(
N);
1173 SplitVecRes_VAARG(
N,
Lo,
Hi);
1179 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1185 case ISD::VP_BITREVERSE:
1193 case ISD::VP_CTLZ_ZERO_UNDEF:
1195 case ISD::VP_CTTZ_ZERO_UNDEF:
1210 case ISD::VP_FFLOOR:
1215 case ISD::VP_FNEARBYINT:
1220 case ISD::VP_FP_EXTEND:
1222 case ISD::VP_FP_ROUND:
1224 case ISD::VP_FP_TO_SINT:
1226 case ISD::VP_FP_TO_UINT:
1232 case ISD::VP_LLRINT:
1234 case ISD::VP_FROUND:
1236 case ISD::VP_FROUNDEVEN:
1245 case ISD::VP_FROUNDTOZERO:
1247 case ISD::VP_SINT_TO_FP:
1249 case ISD::VP_TRUNCATE:
1251 case ISD::VP_UINT_TO_FP:
1253 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1256 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1260 SplitVecRes_UnaryOpWithTwoResults(
N, ResNo,
Lo,
Hi);
1266 case ISD::VP_SIGN_EXTEND:
1267 case ISD::VP_ZERO_EXTEND:
1268 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1287 case ISD::VP_FMINNUM:
1290 case ISD::VP_FMAXNUM:
1292 case ISD::VP_FMINIMUM:
1294 case ISD::VP_FMAXIMUM:
1303 case ISD::OR:
case ISD::VP_OR:
1323 case ISD::VP_FCOPYSIGN:
1324 SplitVecRes_BinOp(
N,
Lo,
Hi);
1331 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1335 SplitVecRes_CMP(
N,
Lo,
Hi);
1338#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1339 case ISD::STRICT_##DAGN:
1340#include "llvm/IR/ConstrainedOps.def"
1341 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1346 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1355 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1365 SplitVecRes_FIX(
N,
Lo,
Hi);
1367 case ISD::EXPERIMENTAL_VP_REVERSE:
1368 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1377void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1385 DL,
Ptr.getValueType(),
1386 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1389 *ScaledOffset += IncrementSize;
1393 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1399std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1400 return SplitMask(Mask,
SDLoc(Mask));
1403std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1406 EVT MaskVT =
Mask.getValueType();
1408 GetSplitVector(Mask, MaskLo, MaskHi);
1411 return std::make_pair(MaskLo, MaskHi);
1416 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1418 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1422 unsigned Opcode =
N->getOpcode();
1423 if (
N->getNumOperands() == 2) {
1429 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1430 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1433 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1436 std::tie(EVLLo, EVLHi) =
1437 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1440 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1442 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1448 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1450 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1452 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1456 unsigned Opcode =
N->getOpcode();
1457 if (
N->getNumOperands() == 3) {
1463 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1464 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1467 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1470 std::tie(EVLLo, EVLHi) =
1471 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1474 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1476 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1486 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1488 GetSplitVector(LHS, LHSLo, LHSHi);
1489 GetSplitVector(RHS, RHSLo, RHSHi);
1491 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1492 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1495 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1496 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1497 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1502 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1504 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1508 unsigned Opcode =
N->getOpcode();
1527 switch (getTypeAction(InVT)) {
1542 GetExpandedOp(InOp,
Lo,
Hi);
1553 GetSplitVector(InOp,
Lo,
Hi);
1574 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1597 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1599 unsigned NumSubvectors =
N->getNumOperands() / 2;
1600 if (NumSubvectors == 1) {
1601 Lo =
N->getOperand(0);
1602 Hi =
N->getOperand(1);
1616void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1638 GetSplitVector(Vec,
Lo,
Hi);
1641 EVT LoVT =
Lo.getValueType();
1650 unsigned IdxVal =
Idx->getAsZExtVal();
1651 if (IdxVal + SubElems <= LoElems) {
1659 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1685 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1689 auto *
Load = cast<LoadSDNode>(
Lo);
1691 IncrementPointer(Load, LoVT, MPI, StackPtr);
1694 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1703 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1708 EVT RHSVT =
RHS.getValueType();
1711 GetSplitVector(RHS, RHSLo, RHSHi);
1728 SDValue FpValue =
N->getOperand(0);
1730 GetSplitVector(FpValue, ArgLo, ArgHi);
1743 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1747 std::tie(LoVT, HiVT) =
1758 unsigned Opcode =
N->getOpcode();
1765 GetSplitVector(N0, InLo, InHi);
1772 EVT OutLoVT, OutHiVT;
1775 assert((2 * OutNumElements) <= InNumElements &&
1776 "Illegal extend vector in reg split");
1786 for (
unsigned i = 0; i != OutNumElements; ++i)
1787 SplitHi[i] = i + OutNumElements;
1790 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1791 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1796 unsigned NumOps =
N->getNumOperands();
1810 for (
unsigned i = 1; i < NumOps; ++i) {
1815 EVT InVT =
Op.getValueType();
1820 GetSplitVector(
Op, OpLo, OpHi);
1829 EVT LoValueVTs[] = {LoVT, MVT::Other};
1830 EVT HiValueVTs[] = {HiVT, MVT::Other};
1839 Lo.getValue(1),
Hi.getValue(1));
1843 ReplaceValueWith(
SDValue(
N, 1), Chain);
1846SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1848 EVT VT =
N->getValueType(0);
1859 else if (NE > ResNE)
1863 EVT ChainVTs[] = {EltVT, MVT::Other};
1867 for (i = 0; i !=
NE; ++i) {
1869 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1870 SDValue Operand =
N->getOperand(j);
1881 Scalar.getNode()->setFlags(
N->getFlags());
1889 for (; i < ResNE; ++i)
1894 ReplaceValueWith(
SDValue(
N, 1), Chain);
1901void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1904 EVT ResVT =
N->getValueType(0);
1905 EVT OvVT =
N->getValueType(1);
1906 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1910 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1912 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1913 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1919 unsigned Opcode =
N->getOpcode();
1931 unsigned OtherNo = 1 - ResNo;
1932 EVT OtherVT =
N->getValueType(OtherNo);
1934 SetSplitVector(
SDValue(
N, OtherNo),
1940 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1944void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1950 GetSplitVector(Vec,
Lo,
Hi);
1953 unsigned IdxVal = CIdx->getZExtValue();
1954 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1955 if (IdxVal < LoNumElts) {
1957 Lo.getValueType(),
Lo, Elt,
Idx);
2003 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
2006 auto Load = cast<LoadSDNode>(
Lo);
2008 IncrementPointer(Load, LoVT, MPI, StackPtr);
2010 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
2014 if (LoVT !=
Lo.getValueType())
2016 if (HiVT !=
Hi.getValueType())
2024 assert(
N->getValueType(0).isScalableVector() &&
2025 "Only scalable vectors are supported for STEP_VECTOR");
2048 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2061 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
2062 auto [EVLLo, EVLHi] = DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2063 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0), MaskLo, EVLLo);
2064 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT,
N->getOperand(0), MaskHi, EVLHi);
2078 EVT MemoryVT =
LD->getMemoryVT();
2082 EVT LoMemVT, HiMemVT;
2089 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2094 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2098 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2101 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2110 ReplaceValueWith(
SDValue(LD, 1), Ch);
2115 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2124 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2125 Align Alignment =
LD->getOriginalAlign();
2128 EVT MemoryVT =
LD->getMemoryVT();
2130 EVT LoMemVT, HiMemVT;
2131 bool HiIsEmpty =
false;
2132 std::tie(LoMemVT, HiMemVT) =
2138 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2141 GetSplitVector(Mask, MaskLo, MaskHi);
2143 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2148 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2157 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2166 LD->isExpandingLoad());
2172 MPI =
LD->getPointerInfo().getWithOffset(
2177 Alignment,
LD->getAAInfo(),
LD->getRanges());
2180 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2181 LD->isExpandingLoad());
2191 ReplaceValueWith(
SDValue(LD, 1), Ch);
2197 "Indexed VP strided load during type legalization!");
2199 "Unexpected indexed variable-length load offset");
2206 EVT LoMemVT, HiMemVT;
2207 bool HiIsEmpty =
false;
2208 std::tie(LoMemVT, HiMemVT) =
2214 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2217 GetSplitVector(Mask, LoMask, HiMask);
2223 std::tie(LoEVL, HiEVL) =
2261 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2272 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2285 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2294 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2297 GetSplitVector(Mask, MaskLo, MaskHi);
2299 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2303 EVT LoMemVT, HiMemVT;
2304 bool HiIsEmpty =
false;
2305 std::tie(LoMemVT, HiMemVT) =
2308 SDValue PassThruLo, PassThruHi;
2310 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2312 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2355 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2372 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2373 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2375 auto *VPSC = cast<VPGatherSDNode>(
N);
2376 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2379 EVT MemoryVT =
N->getMemoryVT();
2380 Align Alignment =
N->getOriginalAlign();
2384 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2385 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2387 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2390 EVT LoMemVT, HiMemVT;
2395 if (getTypeAction(Ops.Index.getValueType()) ==
2397 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2399 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2406 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2407 SDValue PassThru = MGT->getPassThru();
2408 SDValue PassThruLo, PassThruHi;
2411 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2413 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2418 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2420 OpsLo, MMO, IndexTy, ExtType);
2422 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2424 OpsHi, MMO, IndexTy, ExtType);
2426 auto *VPGT = cast<VPGatherSDNode>(
N);
2428 std::tie(EVLLo, EVLHi) =
2429 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2431 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2433 MMO, VPGT->getIndexType());
2435 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2437 MMO, VPGT->getIndexType());
2447 ReplaceValueWith(
SDValue(
N, 1), Ch);
2461 EVT VecVT =
N->getValueType(0);
2464 bool HasCustomLowering =
false;
2471 HasCustomLowering =
true;
2477 SDValue Passthru =
N->getOperand(2);
2478 if (!HasCustomLowering) {
2488 std::tie(LoMask, HiMask) = SplitMask(Mask);
2498 MF, cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex());
2507 Chain = DAG.
getStore(Chain,
DL,
Lo, StackPtr, PtrInfo);
2520 assert(
N->getValueType(0).isVector() &&
2521 N->getOperand(0).getValueType().isVector() &&
2522 "Operand types must be vectors");
2530 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2532 GetSplitVector(
N->getOperand(0), LL, LH);
2536 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2538 GetSplitVector(
N->getOperand(1), RL, RH);
2543 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2544 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2546 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2547 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2548 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2549 std::tie(EVLLo, EVLHi) =
2550 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2551 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2553 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2567 EVT InVT =
N->getOperand(0).getValueType();
2569 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2574 unsigned Opcode =
N->getOpcode();
2575 if (
N->getNumOperands() <= 2) {
2577 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2578 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2586 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2587 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2590 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2593 std::tie(EVLLo, EVLHi) =
2594 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2607 EVT InVT =
N->getOperand(0).getValueType();
2609 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2613 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2614 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2615 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2620void DAGTypeLegalizer::SplitVecRes_UnaryOpWithTwoResults(
SDNode *
N,
2630 EVT InVT =
N->getOperand(0).getValueType();
2632 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2636 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2637 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2638 Lo->setFlags(
N->getFlags());
2639 Hi->setFlags(
N->getFlags());
2645 unsigned OtherNo = 1 - ResNo;
2646 EVT OtherVT =
N->getValueType(OtherNo);
2654 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2661 EVT SrcVT =
N->getOperand(0).getValueType();
2662 EVT DestVT =
N->getValueType(0);
2685 EVT SplitLoVT, SplitHiVT;
2689 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2690 N->dump(&DAG);
dbgs() <<
"\n");
2691 if (!
N->isVPOpcode()) {
2694 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2705 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2706 N->getOperand(1),
N->getOperand(2));
2711 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2714 std::tie(EVLLo, EVLHi) =
2715 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2717 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2718 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2723 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2731 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2732 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2738 return N.getResNo() == 0 &&
2742 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2747 "Expected build vector node.");
2750 for (
unsigned I = 0;
I < NewElts; ++
I) {
2755 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2757 Ops[
I] = Input1.getOperand(
Idx);
2762 return DAG.getBuildVector(NewVT,
DL, Ops);
2770 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2774 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2776 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2785 for (
auto &
P : ShufflesIdxs) {
2786 if (
P.second.size() < 2)
2790 for (
int &
Idx : Mask) {
2793 unsigned SrcRegIdx =
Idx / NewElts;
2794 if (Inputs[SrcRegIdx].
isUndef()) {
2799 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2802 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2807 Idx = MaskElt % NewElts +
2808 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2814 Inputs[
P.second[0]] =
P.first.first;
2815 Inputs[
P.second[1]] =
P.first.second;
2818 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2822 for (
int &
Idx : Mask) {
2825 unsigned SrcRegIdx =
Idx / NewElts;
2826 if (Inputs[SrcRegIdx].
isUndef()) {
2833 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2834 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2837 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2839 if (UsedSubVector.count() > 1) {
2841 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2842 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2844 if (Pairs.
empty() || Pairs.
back().size() == 2)
2846 if (UsedSubVector.test(2 *
I)) {
2847 Pairs.
back().emplace_back(
I, 0);
2849 assert(UsedSubVector.test(2 *
I + 1) &&
2850 "Expected to be used one of the subvectors.");
2851 Pairs.
back().emplace_back(
I, 1);
2854 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2856 for (
int &
Idx : Mask) {
2859 unsigned SrcRegIdx =
Idx / NewElts;
2861 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2862 return Idxs.front().first == SrcRegIdx ||
2863 Idxs.back().first == SrcRegIdx;
2865 if (It == Pairs.
end())
2867 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2868 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2871 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2872 Inputs[Idxs.front().first] = DAG.
getNode(
2874 Inputs[Idxs.front().first].getValueType(),
2875 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2876 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2885 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2886 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2889 if (Shuffle->getOperand(0).getValueType() != NewVT)
2892 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2893 !Shuffle->isSplat()) {
2895 }
else if (!Inputs[
I].hasOneUse() &&
2896 !Shuffle->getOperand(1).isUndef()) {
2898 for (
int &
Idx : Mask) {
2901 unsigned SrcRegIdx =
Idx / NewElts;
2904 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2909 int OpIdx = MaskElt / NewElts;
2922 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2923 if (Shuffle->getOperand(OpIdx).isUndef())
2925 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2926 if (It == std::end(Inputs))
2928 int FoundOp = std::distance(std::begin(Inputs), It);
2931 for (
int &
Idx : Mask) {
2934 unsigned SrcRegIdx =
Idx / NewElts;
2937 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2942 int MaskIdx = MaskElt / NewElts;
2943 if (OpIdx == MaskIdx)
2944 Idx = MaskElt % NewElts + FoundOp * NewElts;
2947 Op = (OpIdx + 1) % 2;
2955 for (
int &
Idx : Mask) {
2958 unsigned SrcRegIdx =
Idx / NewElts;
2961 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2962 int OpIdx = MaskElt / NewElts;
2965 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2971 TryPeekThroughShufflesInputs(OrigMask);
2973 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2977 for (
const auto &
I : Inputs) {
2979 UniqueConstantInputs.
insert(
I);
2980 else if (!
I.isUndef())
2985 if (UniqueInputs.
size() != std::size(Inputs)) {
2986 auto &&UniqueVec = UniqueInputs.
takeVector();
2987 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2988 unsigned ConstNum = UniqueConstantVec.size();
2989 for (
int &
Idx : Mask) {
2992 unsigned SrcRegIdx =
Idx / NewElts;
2993 if (Inputs[SrcRegIdx].
isUndef()) {
2997 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2998 if (It != UniqueConstantVec.end()) {
3000 NewElts * std::distance(UniqueConstantVec.begin(), It);
3001 assert(
Idx >= 0 &&
"Expected defined mask idx.");
3004 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
3005 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
3007 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
3008 assert(
Idx >= 0 &&
"Expected defined mask idx.");
3010 copy(UniqueConstantVec, std::begin(Inputs));
3011 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
3014 MakeUniqueInputs(OrigMask);
3016 copy(Inputs, std::begin(OrigInputs));
3022 unsigned FirstMaskIdx =
High * NewElts;
3025 assert(!Output &&
"Expected default initialized initial value.");
3026 TryPeekThroughShufflesInputs(Mask);
3027 MakeUniqueInputs(Mask);
3029 copy(Inputs, std::begin(TmpInputs));
3032 bool SecondIteration =
false;
3033 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
3038 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
3039 SecondIteration =
true;
3040 return SecondIteration;
3043 Mask, std::size(Inputs), std::size(Inputs),
3045 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
3046 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3049 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
3051 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
3052 DAG.getUNDEF(NewVT), Mask);
3053 Inputs[
Idx] = Output;
3055 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3058 if (AccumulateResults(Idx1)) {
3061 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
3063 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
3064 Inputs[Idx2], Mask);
3068 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
3070 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
3071 TmpInputs[Idx2], Mask);
3073 Inputs[Idx1] = Output;
3075 copy(OrigInputs, std::begin(Inputs));
3080 EVT OVT =
N->getValueType(0);
3087 const Align Alignment =
3088 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
3090 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
3091 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
3092 Chain =
Hi.getValue(1);
3096 ReplaceValueWith(
SDValue(
N, 1), Chain);
3101 EVT DstVTLo, DstVTHi;
3102 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
3106 EVT SrcVT =
N->getOperand(0).getValueType();
3108 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
3110 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
3112 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
3113 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
3119 GetSplitVector(
N->getOperand(0), InLo, InHi);
3131 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
3136 EVT VT =
N->getValueType(0);
3143 Align Alignment = DAG.getReducedAlign(VT,
false);
3149 auto &MF = DAG.getMachineFunction();
3163 DAG.getConstant(1,
DL, PtrVT));
3165 DAG.getConstant(EltWidth,
DL, PtrVT));
3167 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3169 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3170 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3171 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3174 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3176 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3179void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3181 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3182 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3183 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3187 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3189 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3195void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3196 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3197 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3198 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3202 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3204 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3206 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3207 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3218bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3223 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3226 switch (
N->getOpcode()) {
3229 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3239 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3245 case ISD::VP_TRUNCATE:
3247 Res = SplitVecOp_TruncateHelper(
N);
3250 case ISD::VP_FP_ROUND:
3254 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3257 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3259 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3260 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3263 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3266 case ISD::VP_SCATTER:
3267 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3270 case ISD::VP_GATHER:
3271 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3274 Res = SplitVecOp_VSELECT(
N, OpNo);
3277 Res = SplitVecOp_VECTOR_COMPRESS(
N, OpNo);
3283 case ISD::VP_SINT_TO_FP:
3284 case ISD::VP_UINT_TO_FP:
3285 if (
N->getValueType(0).bitsLT(
3286 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3287 Res = SplitVecOp_TruncateHelper(
N);
3289 Res = SplitVecOp_UnaryOp(
N);
3293 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3297 case ISD::VP_FP_TO_SINT:
3298 case ISD::VP_FP_TO_UINT:
3311 Res = SplitVecOp_UnaryOp(
N);
3314 Res = SplitVecOp_FPOpDifferentTypes(
N);
3319 Res = SplitVecOp_CMP(
N);
3323 Res = SplitVecOp_FAKE_USE(
N);
3328 Res = SplitVecOp_ExtVecInRegOp(
N);
3346 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3350 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3352 case ISD::VP_REDUCE_FADD:
3353 case ISD::VP_REDUCE_SEQ_FADD:
3354 case ISD::VP_REDUCE_FMUL:
3355 case ISD::VP_REDUCE_SEQ_FMUL:
3356 case ISD::VP_REDUCE_ADD:
3357 case ISD::VP_REDUCE_MUL:
3358 case ISD::VP_REDUCE_AND:
3359 case ISD::VP_REDUCE_OR:
3360 case ISD::VP_REDUCE_XOR:
3361 case ISD::VP_REDUCE_SMAX:
3362 case ISD::VP_REDUCE_SMIN:
3363 case ISD::VP_REDUCE_UMAX:
3364 case ISD::VP_REDUCE_UMIN:
3365 case ISD::VP_REDUCE_FMAX:
3366 case ISD::VP_REDUCE_FMIN:
3367 case ISD::VP_REDUCE_FMAXIMUM:
3368 case ISD::VP_REDUCE_FMINIMUM:
3369 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3371 case ISD::VP_CTTZ_ELTS:
3372 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3373 Res = SplitVecOp_VP_CttzElements(
N);
3376 Res = SplitVecOp_VECTOR_HISTOGRAM(
N);
3381 if (!Res.
getNode())
return false;
3388 if (
N->isStrictFPOpcode())
3390 "Invalid operand expansion");
3393 "Invalid operand expansion");
3395 ReplaceValueWith(
SDValue(
N, 0), Res);
3399SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3402 assert(OpNo == 0 &&
"Illegal operand must be mask");
3409 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3412 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3413 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3414 "Lo and Hi have differing types");
3417 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3418 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3420 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3421 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3422 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3423 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3433SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_COMPRESS(
SDNode *
N,
unsigned OpNo) {
3436 assert(OpNo == 1 &&
"Illegal operand must be mask");
3441 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
3443 EVT VecVT =
N->getValueType(0);
3447SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3448 EVT ResVT =
N->getValueType(0);
3452 SDValue VecOp =
N->getOperand(OpNo);
3454 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3455 GetSplitVector(VecOp,
Lo,
Hi);
3457 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3463 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3467 EVT ResVT =
N->getValueType(0);
3476 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3477 GetSplitVector(VecOp,
Lo,
Hi);
3479 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3485 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3488SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3489 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3490 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3492 unsigned Opc =
N->getOpcode();
3493 EVT ResVT =
N->getValueType(0);
3497 SDValue VecOp =
N->getOperand(OpNo);
3499 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3500 GetSplitVector(VecOp,
Lo,
Hi);
3503 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3506 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3511 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3512 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3517 EVT ResVT =
N->getValueType(0);
3520 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3521 EVT InVT =
Lo.getValueType();
3526 if (
N->isStrictFPOpcode()) {
3527 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3528 { N->getOperand(0), Lo });
3529 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3530 { N->getOperand(0), Hi });
3539 ReplaceValueWith(
SDValue(
N, 1), Ch);
3540 }
else if (
N->getNumOperands() == 3) {
3541 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3542 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3543 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3544 std::tie(EVLLo, EVLHi) =
3545 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3546 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3547 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3549 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3550 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3559 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3569 EVT ResVT =
N->getValueType(0);
3571 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3575 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3581 Lo = BitConvertToInteger(
Lo);
3582 Hi = BitConvertToInteger(
Hi);
3584 if (DAG.getDataLayout().isBigEndian())
3592 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3594 EVT ResVT =
N->getValueType(0);
3602 GetSplitVector(SubVec,
Lo,
Hi);
3605 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3611 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3613 return SecondInsertion;
3616SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3618 EVT SubVT =
N->getValueType(0);
3623 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3625 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3628 if (IdxVal < LoEltsMin) {
3630 "Extracted subvector crosses vector split!");
3633 N->getOperand(0).getValueType().isScalableVector())
3635 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3640 "Extracting scalable subvector from fixed-width unsupported");
3648 "subvector from a scalable predicate vector");
3654 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3656 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3657 auto &MF = DAG.getMachineFunction();
3661 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3668 SubVT, dl, Store, StackPtr,
3672SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3681 GetSplitVector(Vec,
Lo,
Hi);
3683 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3685 if (IdxVal < LoElts)
3689 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3690 Idx.getValueType())), 0);
3694 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3706 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3712 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3714 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3715 auto &MF = DAG.getMachineFunction();
3718 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3726 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3728 return DAG.getExtLoad(
3739 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3747 SplitVecRes_Gather(
N,
Lo,
Hi);
3750 ReplaceValueWith(
SDValue(
N, 0), Res);
3755 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3759 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3761 SDValue EVL =
N->getVectorLength();
3763 Align Alignment =
N->getOriginalAlign();
3769 GetSplitVector(
Data, DataLo, DataHi);
3771 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3776 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3779 GetSplitVector(Mask, MaskLo, MaskHi);
3781 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3784 EVT MemoryVT =
N->getMemoryVT();
3785 EVT LoMemVT, HiMemVT;
3786 bool HiIsEmpty =
false;
3787 std::tie(LoMemVT, HiMemVT) =
3788 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3792 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3800 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3801 N->getAddressingMode(),
N->isTruncatingStore(),
3802 N->isCompressingStore());
3809 N->isCompressingStore());
3817 MPI =
N->getPointerInfo().getWithOffset(
3820 MMO = DAG.getMachineFunction().getMachineMemOperand(
3822 Alignment,
N->getAAInfo(),
N->getRanges());
3824 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3825 N->getAddressingMode(),
N->isTruncatingStore(),
3826 N->isCompressingStore());
3835 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3836 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3843 GetSplitVector(
Data, LoData, HiData);
3845 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3847 EVT LoMemVT, HiMemVT;
3848 bool HiIsEmpty =
false;
3849 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3855 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3856 else if (getTypeAction(
Mask.getValueType()) ==
3858 GetSplitVector(Mask, LoMask, HiMask);
3860 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3863 std::tie(LoEVL, HiEVL) =
3864 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3868 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3869 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3870 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3881 EVT PtrVT =
N->getBasePtr().getValueType();
3884 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3887 Align Alignment =
N->getOriginalAlign();
3895 Alignment,
N->getAAInfo(),
N->getRanges());
3898 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3899 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3900 N->isCompressingStore());
3909 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3913 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3916 Align Alignment =
N->getOriginalAlign();
3922 GetSplitVector(
Data, DataLo, DataHi);
3924 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3929 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3932 GetSplitVector(Mask, MaskLo, MaskHi);
3934 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3937 EVT MemoryVT =
N->getMemoryVT();
3938 EVT LoMemVT, HiMemVT;
3939 bool HiIsEmpty =
false;
3940 std::tie(LoMemVT, HiMemVT) =
3941 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3949 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3950 N->getAddressingMode(),
N->isTruncatingStore(),
3951 N->isCompressingStore());
3960 N->isCompressingStore());
3968 MPI =
N->getPointerInfo().getWithOffset(
3971 MMO = DAG.getMachineFunction().getMachineMemOperand(
3973 Alignment,
N->getAAInfo(),
N->getRanges());
3975 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3976 N->getAddressingMode(),
N->isTruncatingStore(),
3977 N->isCompressingStore());
3990 EVT MemoryVT =
N->getMemoryVT();
3991 Align Alignment =
N->getOriginalAlign();
3999 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
4000 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
4003 auto *VPSC = cast<VPScatterSDNode>(
N);
4004 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
4009 EVT LoMemVT, HiMemVT;
4010 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4015 GetSplitVector(Ops.Data, DataLo, DataHi);
4017 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
4021 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
4022 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
4024 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
4028 if (getTypeAction(Ops.Index.getValueType()) ==
4030 GetSplitVector(Ops.Index, IndexLo, IndexHi);
4032 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
4040 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
4041 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
4043 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
4044 MSC->getIndexType(), MSC->isTruncatingStore());
4049 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
4050 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
4051 MMO, MSC->getIndexType(),
4052 MSC->isTruncatingStore());
4054 auto *VPSC = cast<VPScatterSDNode>(
N);
4056 std::tie(EVLLo, EVLHi) =
4057 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
4059 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
4060 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
4061 VPSC->getIndexType());
4066 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
4067 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
4068 VPSC->getIndexType());
4072 assert(
N->isUnindexed() &&
"Indexed store of vector?");
4073 assert(OpNo == 1 &&
"Can only split the stored value");
4076 bool isTruncating =
N->isTruncatingStore();
4079 EVT MemoryVT =
N->getMemoryVT();
4080 Align Alignment =
N->getOriginalAlign();
4084 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
4086 EVT LoMemVT, HiMemVT;
4087 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4094 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
4095 Alignment, MMOFlags, AAInfo);
4097 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
4101 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
4104 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
4105 HiMemVT, Alignment, MMOFlags, AAInfo);
4107 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
4121 EVT EltVT =
N->getValueType(0).getVectorElementType();
4123 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
4126 DAG.getVectorIdxConstant(i,
DL)));
4130 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
4151 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
4152 SDValue InVec =
N->getOperand(OpNo);
4154 EVT OutVT =
N->getValueType(0);
4162 EVT LoOutVT, HiOutVT;
4163 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4164 assert(LoOutVT == HiOutVT &&
"Unequal split?");
4169 if (isTypeLegal(LoOutVT) ||
4170 InElementSize <= OutElementSize * 2)
4171 return SplitVecOp_UnaryOp(
N);
4180 return SplitVecOp_UnaryOp(
N);
4184 GetSplitVector(InVec, InLoVec, InHiVec);
4190 EVT HalfElementVT = IsFloat ?
4192 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4199 if (
N->isStrictFPOpcode()) {
4200 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4201 {N->getOperand(0), InLoVec});
4202 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4203 {N->getOperand(0), InHiVec});
4209 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4210 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4222 if (
N->isStrictFPOpcode()) {
4226 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4234 DAG.getTargetConstant(
4240 unsigned Opc =
N->getOpcode();
4242 assert(
N->getValueType(0).isVector() &&
4243 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4244 "Operand types must be vectors");
4246 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4248 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4249 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4260 }
else if (isStrict) {
4261 LoRes = DAG.
getNode(Opc,
DL, DAG.getVTList(PartResVT,
N->getValueType(1)),
4262 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4263 HiRes = DAG.
getNode(Opc,
DL, DAG.getVTList(PartResVT,
N->getValueType(1)),
4264 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4267 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4269 assert(Opc == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4270 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4271 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4272 std::tie(EVLLo, EVLHi) =
4273 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4274 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4275 N->getOperand(2), MaskLo, EVLLo);
4276 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4277 N->getOperand(2), MaskHi, EVLHi);
4281 EVT OpVT =
N->getOperand(0).getValueType();
4284 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4290 EVT ResVT =
N->getValueType(0);
4293 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4294 EVT InVT =
Lo.getValueType();
4299 if (
N->isStrictFPOpcode()) {
4300 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4301 { N->getOperand(0), Lo, N->getOperand(2) });
4302 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4303 { N->getOperand(0), Hi, N->getOperand(2) });
4307 Lo.getValue(1),
Hi.getValue(1));
4308 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4309 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4310 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4311 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4312 std::tie(EVLLo, EVLHi) =
4313 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4314 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4315 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4329SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4332 EVT LHSLoVT, LHSHiVT;
4333 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4335 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4336 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4339 std::tie(LHSLo, LHSHi) =
4340 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4343 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4345 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4346 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4355 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4356 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4357 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4359 EVT ResVT =
N->getValueType(0);
4364 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4365 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4371 EVT ResVT =
N->getValueType(0);
4374 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4375 EVT InVT =
Lo.getValueType();
4381 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4382 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4389 EVT ResVT =
N->getValueType(0);
4393 GetSplitVector(VecOp,
Lo,
Hi);
4395 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4396 auto [EVLLo, EVLHi] =
4398 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4404 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4406 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4407 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4410SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_HISTOGRAM(
SDNode *
N) {
4421 SDValue IndexLo, IndexHi, MaskLo, MaskHi;
4422 std::tie(IndexLo, IndexHi) = DAG.SplitVector(HG->
getIndex(),
DL);
4423 std::tie(MaskLo, MaskHi) = DAG.SplitVector(HG->
getMask(),
DL);
4425 SDValue Lo = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT,
DL,
4426 OpsLo, MMO, IndexType);
4427 SDValue OpsHi[] = {
Lo, Inc, MaskHi,
Ptr, IndexHi, Scale, IntID};
4428 return DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT,
DL, OpsHi,
4436void DAGTypeLegalizer::ReplaceOtherWidenResults(
SDNode *
N,
SDNode *WidenNode,
4437 unsigned WidenResNo) {
4438 unsigned NumResults =
N->getNumValues();
4439 for (
unsigned ResNo = 0; ResNo < NumResults; ResNo++) {
4440 if (ResNo == WidenResNo)
4442 EVT ResVT =
N->getValueType(ResNo);
4449 DAG.getVectorIdxConstant(0,
DL));
4450 ReplaceValueWith(
SDValue(
N, ResNo), ResVal);
4455void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4456 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4459 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4464 auto unrollExpandedOp = [&]() {
4469 EVT VT =
N->getValueType(0);
4474 if (
N->getNumValues() > 1)
4475 ReplaceOtherWidenResults(
N, Res.
getNode(), ResNo);
4481 switch (
N->getOpcode()) {
4484 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4492 Res = WidenVecRes_ADDRSPACECAST(
N);
4499 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4503 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4507 case ISD::EXPERIMENTAL_VP_SPLAT:
4508 Res = WidenVecRes_ScalarOp(
N);
4513 case ISD::VP_SELECT:
4515 Res = WidenVecRes_Select(
N);
4519 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4520 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4522 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4525 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4527 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4528 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4531 Res = WidenVecRes_VECTOR_COMPRESS(
N);
4534 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4537 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4539 case ISD::VP_GATHER:
4540 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4543 Res = WidenVecRes_VECTOR_REVERSE(
N);
4553 case ISD::OR:
case ISD::VP_OR:
4561 case ISD::VP_FMINNUM:
4564 case ISD::VP_FMAXNUM:
4566 case ISD::VP_FMINIMUM:
4568 case ISD::VP_FMAXIMUM:
4601 case ISD::VP_FCOPYSIGN:
4602 Res = WidenVecRes_Binary(
N);
4607 Res = WidenVecRes_CMP(
N);
4613 if (unrollExpandedOp())
4628 Res = WidenVecRes_BinaryCanTrap(
N);
4637 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4640#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4641 case ISD::STRICT_##DAGN:
4642#include "llvm/IR/ConstrainedOps.def"
4643 Res = WidenVecRes_StrictFP(
N);
4652 Res = WidenVecRes_OverflowOp(
N, ResNo);
4656 Res = WidenVecRes_FCOPYSIGN(
N);
4661 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4666 if (!unrollExpandedOp())
4667 Res = WidenVecRes_ExpOp(
N);
4673 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4678 case ISD::VP_FP_EXTEND:
4680 case ISD::VP_FP_ROUND:
4682 case ISD::VP_FP_TO_SINT:
4684 case ISD::VP_FP_TO_UINT:
4686 case ISD::VP_SIGN_EXTEND:
4688 case ISD::VP_SINT_TO_FP:
4689 case ISD::VP_TRUNCATE:
4692 case ISD::VP_UINT_TO_FP:
4694 case ISD::VP_ZERO_EXTEND:
4695 Res = WidenVecRes_Convert(
N);
4700 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4706 case ISD::VP_LLRINT:
4709 Res = WidenVecRes_XROUND(
N);
4735 if (unrollExpandedOp())
4745 case ISD::VP_BITREVERSE:
4751 case ISD::VP_CTLZ_ZERO_UNDEF:
4757 case ISD::VP_CTTZ_ZERO_UNDEF:
4762 case ISD::VP_FFLOOR:
4764 case ISD::VP_FNEARBYINT:
4765 case ISD::VP_FROUND:
4766 case ISD::VP_FROUNDEVEN:
4767 case ISD::VP_FROUNDTOZERO:
4771 Res = WidenVecRes_Unary(
N);
4778 Res = WidenVecRes_Ternary(
N);
4782 if (!unrollExpandedOp())
4783 Res = WidenVecRes_UnaryOpWithTwoResults(
N, ResNo);
4790 SetWidenedVector(
SDValue(
N, ResNo), Res);
4797 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4798 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4799 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4800 if (
N->getNumOperands() == 3)
4801 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4803 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4804 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4808 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4809 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4816 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4817 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4818 if (
N->getNumOperands() == 2)
4819 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4822 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4823 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4827 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4828 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4837 EVT OpVT =
LHS.getValueType();
4839 LHS = GetWidenedVector(LHS);
4840 RHS = GetWidenedVector(RHS);
4841 OpVT =
LHS.getValueType();
4847 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4853SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4857 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4858 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4860 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4869 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4872 if (ConcatEnd == 1) {
4873 VT = ConcatOps[0].getValueType();
4875 return ConcatOps[0];
4878 SDLoc dl(ConcatOps[0]);
4885 while (ConcatOps[ConcatEnd-1].
getValueType() != MaxVT) {
4886 int Idx = ConcatEnd - 1;
4887 VT = ConcatOps[
Idx--].getValueType();
4901 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4902 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4906 ConcatOps[
Idx+1] = VecOp;
4907 ConcatEnd =
Idx + 2;
4913 unsigned RealVals = ConcatEnd -
Idx - 1;
4914 unsigned SubConcatEnd = 0;
4915 unsigned SubConcatIdx =
Idx + 1;
4916 while (SubConcatEnd < RealVals)
4917 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4918 while (SubConcatEnd < OpsToConcat)
4919 SubConcatOps[SubConcatEnd++] = undefVec;
4921 NextVT, SubConcatOps);
4922 ConcatEnd = SubConcatIdx + 1;
4927 if (ConcatEnd == 1) {
4928 VT = ConcatOps[0].getValueType();
4930 return ConcatOps[0];
4935 if (NumOps != ConcatEnd ) {
4937 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4938 ConcatOps[j] = UndefVal;
4946 unsigned Opcode =
N->getOpcode();
4954 NumElts = NumElts / 2;
4958 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4960 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4961 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4962 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4974 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4975 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4976 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
4979 N->getValueType(0).getVectorElementCount());
4980 return DAG.
getNode(*VPOpcode, dl, WidenVT, InOp1, InOp2, Mask, EVL,
4994 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4995 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4996 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4999 unsigned ConcatEnd = 0;
5007 while (CurNumElts != 0) {
5008 while (CurNumElts >= NumElts) {
5010 DAG.getVectorIdxConstant(
Idx, dl));
5012 DAG.getVectorIdxConstant(
Idx, dl));
5013 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
5015 CurNumElts -= NumElts;
5018 NumElts = NumElts / 2;
5023 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
5025 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
5027 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
5028 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
5039 switch (
N->getOpcode()) {
5042 return WidenVecRes_STRICT_FSETCC(
N);
5049 return WidenVecRes_Convert_StrictFP(
N);
5055 unsigned NumOpers =
N->getNumOperands();
5056 unsigned Opcode =
N->getOpcode();
5063 NumElts = NumElts / 2;
5074 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
5078 unsigned ConcatEnd = 0;
5085 for (
unsigned i = 1; i < NumOpers; ++i) {
5091 Oper = GetWidenedVector(Oper);
5097 DAG.getUNDEF(WideOpVT), Oper,
5098 DAG.getVectorIdxConstant(0, dl));
5110 while (CurNumElts != 0) {
5111 while (CurNumElts >= NumElts) {
5114 for (
unsigned i = 0; i < NumOpers; ++i) {
5117 EVT OpVT =
Op.getValueType();
5123 DAG.getVectorIdxConstant(
Idx, dl));
5129 EVT OperVT[] = {VT, MVT::Other};
5131 ConcatOps[ConcatEnd++] = Oper;
5134 CurNumElts -= NumElts;
5137 NumElts = NumElts / 2;
5142 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
5145 for (
unsigned i = 0; i < NumOpers; ++i) {
5148 EVT OpVT =
Op.getValueType();
5152 DAG.getVectorIdxConstant(
Idx, dl));
5157 EVT WidenVT[] = {WidenEltVT, MVT::Other};
5159 ConcatOps[ConcatEnd++] = Oper;
5168 if (Chains.
size() == 1)
5169 NewChain = Chains[0];
5172 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5177SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
5179 EVT ResVT =
N->getValueType(0);
5180 EVT OvVT =
N->getValueType(1);
5181 EVT WideResVT, WideOvVT;
5191 WideLHS = GetWidenedVector(
N->getOperand(0));
5192 WideRHS = GetWidenedVector(
N->getOperand(1));
5202 N->getOperand(0), Zero);
5205 N->getOperand(1), Zero);
5208 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
5209 SDNode *WideNode = DAG.getNode(
5210 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
5213 unsigned OtherNo = 1 - ResNo;
5214 EVT OtherVT =
N->getValueType(OtherNo);
5221 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
5224 return SDValue(WideNode, ResNo);
5237 unsigned Opcode =
N->getOpcode();
5246 InOp = ZExtPromotedInteger(InOp);
5257 InOp = GetWidenedVector(
N->getOperand(0));
5260 if (InVTEC == WidenEC) {
5261 if (
N->getNumOperands() == 1)
5262 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Flags);
5263 if (
N->getNumOperands() == 3) {
5264 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5267 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5269 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5292 unsigned NumConcat =
5297 if (
N->getNumOperands() == 1)
5298 return DAG.getNode(Opcode,
DL, WidenVT, InVec, Flags);
5299 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5304 DAG.getVectorIdxConstant(0,
DL));
5306 if (
N->getNumOperands() == 1)
5307 return DAG.getNode(Opcode,
DL, WidenVT, InVal, Flags);
5308 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5317 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5318 for (
unsigned i=0; i < MinElts; ++i) {
5320 DAG.getVectorIdxConstant(i,
DL));
5321 if (
N->getNumOperands() == 1)
5322 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val, Flags);
5324 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5327 return DAG.getBuildVector(WidenVT,
DL, Ops);
5336 EVT SrcVT = Src.getValueType();
5340 Src = GetWidenedVector(Src);
5341 SrcVT = Src.getValueType();
5348 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5357 EVT SrcVT = Src.getValueType();
5361 Src = GetWidenedVector(Src);
5362 SrcVT = Src.getValueType();
5369 if (
N->getNumOperands() == 1)
5370 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5372 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5373 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5377 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5380SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5391 unsigned Opcode =
N->getOpcode();
5397 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5402 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5403 for (
unsigned i=0; i < MinElts; ++i) {
5405 DAG.getVectorIdxConstant(i,
DL));
5406 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5410 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5412 return DAG.getBuildVector(WidenVT,
DL, Ops);
5415SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5416 unsigned Opcode =
N->getOpcode();
5429 InOp = GetWidenedVector(InOp);
5436 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5443 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5445 DAG.getVectorIdxConstant(i,
DL));
5462 while (Ops.
size() != WidenNumElts)
5465 return DAG.getBuildVector(WidenVT,
DL, Ops);
5471 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5472 return WidenVecRes_BinaryCanTrap(
N);
5482SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5483 SDValue FpValue =
N->getOperand(0);
5487 SDValue Arg = GetWidenedVector(FpValue);
5488 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5494 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5496 EVT ExpVT =
RHS.getValueType();
5501 ExpOp = ModifyToType(RHS, WideExpVT);
5504 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5510 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5511 if (
N->getNumOperands() == 1)
5512 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5514 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5515 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5519 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5520 {InOp,
Mask,
N->getOperand(2)});
5526 cast<VTSDNode>(
N->getOperand(1))->getVT()
5527 .getVectorElementType(),
5529 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5530 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5531 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5534SDValue DAGTypeLegalizer::WidenVecRes_UnaryOpWithTwoResults(
SDNode *
N,
5536 EVT VT0 =
N->getValueType(0);
5537 EVT VT1 =
N->getValueType(1);
5541 "expected both results to be vectors of matching element count");
5544 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5553 DAG.getNode(
N->getOpcode(),
SDLoc(
N), {WidenVT0, WidenVT1}, InOp)
5556 ReplaceOtherWidenResults(
N, WidenNode, ResNo);
5557 return SDValue(WidenNode, ResNo);
5560SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5561 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5562 return GetWidenedVector(WidenVec);
5567 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5568 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5570 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5571 AddrSpaceCastN->getSrcAddressSpace(),
5572 AddrSpaceCastN->getDestAddressSpace());
5578 EVT VT =
N->getValueType(0);
5582 switch (getTypeAction(InVT)) {
5596 SDValue NInOp = GetPromotedInteger(InOp);
5598 if (WidenVT.
bitsEq(NInVT)) {
5601 if (DAG.getDataLayout().isBigEndian()) {
5606 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5625 InOp = GetWidenedVector(InOp);
5627 if (WidenVT.
bitsEq(InVT))
5637 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5642 unsigned NewNumParts = WidenSize / InSize;
5655 EVT OrigInVT =
N->getOperand(0).getValueType();
5668 if (WidenSize % InSize == 0) {
5675 DAG.ExtractVectorElements(InOp, Ops);
5676 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5688 return CreateStackStoreLoad(InOp, WidenVT);
5694 EVT VT =
N->getValueType(0);
5698 EVT EltVT =
N->getOperand(0).getValueType();
5705 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5706 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5708 return DAG.getBuildVector(WidenVT, dl, NewOps);
5712 EVT InVT =
N->getOperand(0).getValueType();
5715 unsigned NumOperands =
N->getNumOperands();
5717 bool InputWidened =
false;
5721 if (WidenNumElts % NumInElts == 0) {
5723 unsigned NumConcat = WidenNumElts / NumInElts;
5724 SDValue UndefVal = DAG.getUNDEF(InVT);
5726 for (
unsigned i=0; i < NumOperands; ++i)
5727 Ops[i] =
N->getOperand(i);
5728 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5733 InputWidened =
true;
5737 for (i=1; i < NumOperands; ++i)
5738 if (!
N->getOperand(i).isUndef())
5741 if (i == NumOperands)
5744 return GetWidenedVector(
N->getOperand(0));
5746 if (NumOperands == 2) {
5748 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5754 for (
unsigned i = 0; i < NumInElts; ++i) {
5756 MaskOps[i + NumInElts] = i + WidenNumElts;
5758 return DAG.getVectorShuffle(WidenVT, dl,
5759 GetWidenedVector(
N->getOperand(0)),
5760 GetWidenedVector(
N->getOperand(1)),
5767 "Cannot use build vectors to widen CONCAT_VECTOR result");
5775 for (
unsigned i=0; i < NumOperands; ++i) {
5778 InOp = GetWidenedVector(InOp);
5779 for (
unsigned j = 0;
j < NumInElts; ++
j)
5781 DAG.getVectorIdxConstant(j, dl));
5783 SDValue UndefVal = DAG.getUNDEF(EltVT);
5784 for (;
Idx < WidenNumElts; ++
Idx)
5785 Ops[
Idx] = UndefVal;
5786 return DAG.getBuildVector(WidenVT, dl, Ops);
5789SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5790 EVT VT =
N->getValueType(0);
5792 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5799SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5800 EVT VT =
N->getValueType(0);
5807 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5809 InOp = GetWidenedVector(InOp);
5815 if (IdxVal == 0 && InVT == WidenVT)
5822 assert(IdxVal % VTNumElts == 0 &&
5823 "Expected Idx to be a multiple of subvector minimum vector length");
5824 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5837 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5838 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5839 "down type's element count");
5846 for (;
I < VTNumElts / GCD; ++
I)
5849 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5850 for (;
I < WidenNumElts / GCD; ++
I)
5857 "EXTRACT_SUBVECTOR for scalable vectors");
5864 for (i = 0; i < VTNumElts; ++i)
5866 DAG.getVectorIdxConstant(IdxVal + i, dl));
5868 SDValue UndefVal = DAG.getUNDEF(EltVT);
5869 for (; i < WidenNumElts; ++i)
5871 return DAG.getBuildVector(WidenVT, dl, Ops);
5882SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5883 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5886 N->getOperand(1),
N->getOperand(2));
5899 if (!
LD->getMemoryVT().isByteSized()) {
5903 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5912 EVT LdVT =
LD->getMemoryVT();
5925 LD->getChain(),
LD->getBasePtr(),
LD->getOffset(), Mask,
5926 EVL,
LD->getMemoryVT(),
LD->getMemOperand());
5938 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5940 Result = GenWidenVectorLoads(LdChain, LD);
5947 if (LdChain.
size() == 1)
5948 NewChain = LdChain[0];
5954 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5965 SDValue EVL =
N->getVectorLength();
5972 "Unable to widen binary VP op");
5973 Mask = GetWidenedVector(Mask);
5974 assert(
Mask.getValueType().getVectorElementCount() ==
5977 "Unable to widen vector load");
5980 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5981 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5982 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5996 "Unable to widen VP strided load");
5997 Mask = GetWidenedVector(Mask);
6000 assert(
Mask.getValueType().getVectorElementCount() ==
6002 "Data and mask vectors should have the same number of elements");
6004 SDValue Res = DAG.getStridedLoadVP(
6005 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
6006 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
6007 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
6008 N->isExpandingLoad());
6016SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(
SDNode *
N) {
6019 SDValue Passthru =
N->getOperand(2);
6023 Mask.getValueType().getVectorElementType(),
6026 SDValue WideVec = ModifyToType(Vec, WideVecVT);
6027 SDValue WideMask = ModifyToType(Mask, WideMaskVT,
true);
6028 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT);
6030 WideMask, WidePassthru);
6037 EVT MaskVT =
Mask.getValueType();
6038 SDValue PassThru = GetWidenedVector(
N->getPassThru());
6046 Mask = ModifyToType(Mask, WideMaskVT,
true);
6048 SDValue Res = DAG.getMaskedLoad(
6049 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
6050 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
6051 ExtType,
N->isExpandingLoad());
6062 EVT MaskVT =
Mask.getValueType();
6063 SDValue PassThru = GetWidenedVector(
N->getPassThru());
6072 Mask = ModifyToType(Mask, WideMaskVT,
true);
6077 Index.getValueType().getScalarType(),
6079 Index = ModifyToType(Index, WideIndexVT);
6085 N->getMemoryVT().getScalarType(), NumElts);
6086 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
6087 WideMemVT, dl, Ops,
N->getMemOperand(),
6088 N->getIndexType(),
N->getExtensionType());
6105 N->getMemoryVT().getScalarType(), WideEC);
6106 Mask = GetWidenedMask(Mask, WideEC);
6109 Mask,
N->getVectorLength()};
6110 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
6111 dl, Ops,
N->getMemOperand(),
N->getIndexType());
6121 if (
N->isVPOpcode())
6122 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0),
6123 N->getOperand(1),
N->getOperand(2));
6124 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
6152 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
6153 return N->getOperand(OpNo).getValueType();
6161 N =
N.getOperand(0);
6163 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
6164 if (!
N->getOperand(i)->isUndef())
6166 N =
N.getOperand(0);
6170 N =
N.getOperand(0);
6172 N =
N.getOperand(0);
6199 { MaskVT, MVT::Other }, Ops);
6200 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
6210 if (MaskScalarBits < ToMaskScalBits) {
6214 }
else if (MaskScalarBits > ToMaskScalBits) {
6220 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
6222 "Mask should have the right element size by now.");
6225 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
6227 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
6232 EVT SubVT =
Mask->getValueType(0);
6238 assert((
Mask->getValueType(0) == ToMaskVT) &&
6239 "A mask of ToMaskVT should have been produced by now.");
6260 EVT CondVT =
Cond->getValueType(0);
6264 EVT VSelVT =
N->getValueType(0);
6276 EVT FinalVT = VSelVT;
6288 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6306 EVT ToMaskVT = VSelVT;
6313 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6329 if (ScalarBits0 != ScalarBits1) {
6330 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6331 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6343 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6344 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6348 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6361 unsigned Opcode =
N->getOpcode();
6363 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6364 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6365 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6367 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6373 Cond1 = GetWidenedVector(Cond1);
6381 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6382 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6387 Cond1 = ModifyToType(Cond1, CondWidenVT);
6390 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6391 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6393 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6394 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6396 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6400 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6401 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6404 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6409 return DAG.getUNDEF(WidenVT);
6413 EVT VT =
N->getValueType(0);
6420 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6421 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6425 for (
unsigned i = 0; i != NumElts; ++i) {
6426 int Idx =
N->getMaskElt(i);
6427 if (
Idx < (
int)NumElts)
6430 NewMask[i] =
Idx - NumElts + WidenNumElts;
6432 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6436 EVT VT =
N->getValueType(0);
6441 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6447 unsigned IdxVal = WidenNumElts - VTNumElts;
6460 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6463 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6464 "down type's element count");
6467 for (; i < VTNumElts / GCD; ++i)
6470 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6471 for (; i < WidenNumElts / GCD; ++i)
6480 std::iota(
Mask.begin(),
Mask.begin() + VTNumElts, IdxVal);
6482 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6487 assert(
N->getValueType(0).isVector() &&
6488 N->getOperand(0).getValueType().isVector() &&
6489 "Operands must be vectors");
6503 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6504 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6511 InOp1 = GetWidenedVector(InOp1);
6512 InOp2 = GetWidenedVector(InOp2);
6514 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6515 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6522 "Input not widened to expected type!");
6524 if (
N->getOpcode() == ISD::VP_SETCC) {
6527 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6528 N->getOperand(2), Mask,
N->getOperand(4));
6535 assert(
N->getValueType(0).isVector() &&
6536 N->getOperand(1).getValueType().isVector() &&
6537 "Operands must be vectors");
6538 EVT VT =
N->getValueType(0);
6549 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6554 for (
unsigned i = 0; i != NumElts; ++i) {
6556 DAG.getVectorIdxConstant(i, dl));
6558 DAG.getVectorIdxConstant(i, dl));
6560 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6561 {Chain, LHSElem, RHSElem, CC});
6562 Chains[i] = Scalars[i].getValue(1);
6563 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6564 DAG.getBoolConstant(
true, dl, EltVT, VT),
6565 DAG.getBoolConstant(
false, dl, EltVT, VT));
6569 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6571 return DAG.getBuildVector(WidenVT, dl, Scalars);
6577bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6578 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6582 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6585 switch (
N->getOpcode()) {
6588 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6596 Res = WidenVecOp_FAKE_USE(
N);
6602 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6603 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6604 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6605 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6610 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6612 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6613 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6615 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6616 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6626 Res = WidenVecOp_UnrollVectorOp(
N);
6633 Res = WidenVecOp_EXTEND(
N);
6638 Res = WidenVecOp_CMP(
N);
6654 Res = WidenVecOp_Convert(
N);
6659 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6662 case ISD::EXPERIMENTAL_VP_SPLAT:
6663 Res = WidenVecOp_VP_SPLAT(
N, OpNo);
6681 Res = WidenVecOp_VECREDUCE(
N);
6685 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6687 case ISD::VP_REDUCE_FADD:
6688 case ISD::VP_REDUCE_SEQ_FADD:
6689 case ISD::VP_REDUCE_FMUL:
6690 case ISD::VP_REDUCE_SEQ_FMUL:
6691 case ISD::VP_REDUCE_ADD:
6692 case ISD::VP_REDUCE_MUL:
6693 case ISD::VP_REDUCE_AND:
6694 case ISD::VP_REDUCE_OR:
6695 case ISD::VP_REDUCE_XOR:
6696 case ISD::VP_REDUCE_SMAX:
6697 case ISD::VP_REDUCE_SMIN:
6698 case ISD::VP_REDUCE_UMAX:
6699 case ISD::VP_REDUCE_UMIN:
6700 case ISD::VP_REDUCE_FMAX:
6701 case ISD::VP_REDUCE_FMIN:
6702 case ISD::VP_REDUCE_FMAXIMUM:
6703 case ISD::VP_REDUCE_FMINIMUM:
6704 Res = WidenVecOp_VP_REDUCE(
N);
6706 case ISD::VP_CTTZ_ELTS:
6707 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6708 Res = WidenVecOp_VP_CttzElements(
N);
6713 if (!Res.
getNode())
return false;
6721 if (
N->isStrictFPOpcode())
6723 "Invalid operand expansion");
6726 "Invalid operand expansion");
6728 ReplaceValueWith(
SDValue(
N, 0), Res);
6734 EVT VT =
N->getValueType(0);
6739 "Unexpected type action");
6740 InOp = GetWidenedVector(InOp);
6743 "Input wasn't widened!");
6754 FixedEltVT == InEltVT) {
6756 "Not enough elements in the fixed type for the operand!");
6758 "We can't have the same type as we started with!");
6761 DAG.getUNDEF(FixedVT), InOp,
6762 DAG.getVectorIdxConstant(0,
DL));
6765 DAG.getVectorIdxConstant(0,
DL));
6774 return WidenVecOp_Convert(
N);
6779 switch (
N->getOpcode()) {
6794 EVT OpVT =
N->getOperand(0).getValueType();
6795 EVT ResVT =
N->getValueType(0);
6803 DAG.getVectorIdxConstant(0, dl));
6805 DAG.getVectorIdxConstant(0, dl));
6811 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6812 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6814 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6821 return DAG.UnrollVectorOp(
N);
6826 EVT ResultVT =
N->getValueType(0);
6828 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6837 {WideArg,
Test},
N->getFlags());
6844 DAG.getVectorIdxConstant(0,
DL));
6846 EVT OpVT =
N->getOperand(0).getValueType();
6849 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6854 EVT VT =
N->getValueType(0);
6857 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6860 "Unexpected type action");
6861 InOp = GetWidenedVector(InOp);
6863 unsigned Opcode =
N->getOpcode();
6869 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6871 if (
N->isStrictFPOpcode()) {
6873 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6874 {
N->getOperand(0), InOp,
N->getOperand(2) });
6876 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6877 {
N->getOperand(0), InOp });
6883 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6885 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6888 DAG.getVectorIdxConstant(0, dl));
6896 if (
N->isStrictFPOpcode()) {
6899 for (
unsigned i=0; i < NumElts; ++i) {
6901 DAG.getVectorIdxConstant(i, dl));
6902 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6906 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6908 for (
unsigned i = 0; i < NumElts; ++i)
6909 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6911 InOp, DAG.getVectorIdxConstant(i, dl)));
6914 return DAG.getBuildVector(VT, dl, Ops);
6918 EVT DstVT =
N->getValueType(0);
6919 SDValue Src = GetWidenedVector(
N->getOperand(0));
6920 EVT SrcVT = Src.getValueType();
6929 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6932 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6936 return DAG.UnrollVectorOp(
N);
6940 EVT VT =
N->getValueType(0);
6941 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6949 if (!VT.
isVector() && VT != MVT::x86mmx &&
6956 DAG.getVectorIdxConstant(0, dl));
6970 .divideCoefficientBy(EltSize);
6975 DAG.getVectorIdxConstant(0, dl));
6980 return CreateStackStoreLoad(InOp, VT);
6988 SDValue WidenedOp = GetWidenedVector(
N->getOperand(1));
6994 EVT VT =
N->getValueType(0);
6996 EVT InVT =
N->getOperand(0).getValueType();
7001 unsigned NumOperands =
N->getNumOperands();
7004 for (i = 1; i < NumOperands; ++i)
7005 if (!
N->getOperand(i).isUndef())
7008 if (i == NumOperands)
7009 return GetWidenedVector(
N->getOperand(0));
7019 for (
unsigned i=0; i < NumOperands; ++i) {
7023 "Unexpected type action");
7024 InOp = GetWidenedVector(InOp);
7025 for (
unsigned j = 0;
j < NumInElts; ++
j)
7027 DAG.getVectorIdxConstant(j, dl));
7029 return DAG.getBuildVector(VT, dl, Ops);
7032SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
7033 EVT VT =
N->getValueType(0);
7038 SubVec = GetWidenedVector(SubVec);
7044 bool IndicesValid =
false;
7047 IndicesValid =
true;
7051 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
7052 Attribute::VScaleRange);
7057 IndicesValid =
true;
7063 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
7068 "INSERT_SUBVECTOR");
7071SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
7072 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7074 N->getValueType(0), InOp,
N->getOperand(1));
7077SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
7078 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7080 N->getValueType(0), InOp,
N->getOperand(1));
7083SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
7084 SDValue InOp = GetWidenedVector(
N->getOperand(0));
7085 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
7093 if (!
ST->getMemoryVT().getScalarType().isByteSized())
7096 if (
ST->isTruncatingStore())
7115 StVal = GetWidenedVector(StVal);
7119 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
7120 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
7121 EVL, StVT,
ST->getMemOperand(),
7122 ST->getAddressingMode());
7126 if (GenWidenVectorStores(StChain, ST)) {
7127 if (StChain.
size() == 1)
7136SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(
SDNode *
N,
unsigned OpNo) {
7137 assert(OpNo == 1 &&
"Can widen only mask operand of vp_splat");
7138 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0),
7139 N->getOperand(0), GetWidenedVector(
N->getOperand(1)),
7143SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
7144 assert((OpNo == 1 || OpNo == 3) &&
7145 "Can widen only data or mask operand of vp_store");
7153 StVal = GetWidenedVector(StVal);
7159 "Unable to widen VP store");
7160 Mask = GetWidenedVector(Mask);
7162 Mask = GetWidenedVector(Mask);
7168 "Unable to widen VP store");
7169 StVal = GetWidenedVector(StVal);
7172 assert(
Mask.getValueType().getVectorElementCount() ==
7174 "Mask and data vectors should have the same number of elements");
7175 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
7176 ST->getOffset(), Mask,
ST->getVectorLength(),
7177 ST->getMemoryVT(),
ST->getMemOperand(),
7178 ST->getAddressingMode(),
ST->isTruncatingStore(),
7179 ST->isCompressingStore());
7184 assert((OpNo == 1 || OpNo == 4) &&
7185 "Can widen only data or mask operand of vp_strided_store");
7194 "Unable to widen VP strided store");
7198 "Unable to widen VP strided store");
7200 StVal = GetWidenedVector(StVal);
7201 Mask = GetWidenedVector(Mask);
7204 Mask.getValueType().getVectorElementCount() &&
7205 "Data and mask vectors should have the same number of elements");
7207 return DAG.getStridedStoreVP(
7214SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
7215 assert((OpNo == 1 || OpNo == 4) &&
7216 "Can widen only data or mask operand of mstore");
7219 EVT MaskVT =
Mask.getValueType();
7225 StVal = GetWidenedVector(StVal);
7232 Mask = ModifyToType(Mask, WideMaskVT,
true);
7236 Mask = ModifyToType(Mask, WideMaskVT,
true);
7242 StVal = ModifyToType(StVal, WideVT);
7245 assert(
Mask.getValueType().getVectorNumElements() ==
7247 "Mask and data vectors should have the same number of elements");
7254SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
7255 assert(OpNo == 4 &&
"Can widen only the index of mgather");
7256 auto *MG = cast<MaskedGatherSDNode>(
N);
7257 SDValue DataOp = MG->getPassThru();
7259 SDValue Scale = MG->getScale();
7267 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
7268 MG->getMemOperand(), MG->getIndexType(),
7269 MG->getExtensionType());
7275SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
7284 DataOp = GetWidenedVector(DataOp);
7288 EVT IndexVT =
Index.getValueType();
7291 Index = ModifyToType(Index, WideIndexVT);
7294 EVT MaskVT =
Mask.getValueType();
7297 Mask = ModifyToType(Mask, WideMaskVT,
true);
7302 }
else if (OpNo == 4) {
7304 Index = GetWidenedVector(Index);
7310 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
7315SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
7324 DataOp = GetWidenedVector(DataOp);
7325 Index = GetWidenedVector(Index);
7327 Mask = GetWidenedMask(Mask, WideEC);
7330 }
else if (OpNo == 3) {
7332 Index = GetWidenedVector(Index);
7339 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7344 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7345 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7347 EVT VT =
N->getValueType(0);
7362 SVT, InOp0, InOp1,
N->getOperand(2));
7369 DAG.getVectorIdxConstant(0, dl));
7371 EVT OpVT =
N->getOperand(0).getValueType();
7374 return DAG.getNode(ExtendCode, dl, VT,
CC);
7384 EVT VT =
N->getValueType(0);
7386 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7393 for (
unsigned i = 0; i != NumElts; ++i) {
7395 DAG.getVectorIdxConstant(i, dl));
7397 DAG.getVectorIdxConstant(i, dl));
7399 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7400 {Chain, LHSElem, RHSElem, CC});
7401 Chains[i] = Scalars[i].getValue(1);
7402 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7403 DAG.getBoolConstant(
true, dl, EltVT, VT),
7404 DAG.getBoolConstant(
false, dl, EltVT, VT));
7408 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7410 return DAG.getBuildVector(VT, dl, Scalars);
7434 SDValue Op = GetWidenedVector(
N->getOperand(0));
7435 EVT VT =
N->getValueType(0);
7436 EVT OrigVT =
N->getOperand(0).getValueType();
7437 EVT WideVT =
Op.getValueType();
7441 unsigned Opc =
N->getOpcode();
7443 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7444 assert(NeutralElem &&
"Neutral element must exist");
7458 assert(Start.getValueType() == VT);
7461 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7468 unsigned GCD = std::gcd(OrigElts, WideElts);
7471 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7472 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7474 DAG.getVectorIdxConstant(
Idx, dl));
7475 return DAG.getNode(Opc, dl, VT,
Op, Flags);
7478 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7480 DAG.getVectorIdxConstant(
Idx, dl));
7482 return DAG.getNode(Opc, dl, VT,
Op, Flags);
7491 EVT VT =
N->getValueType(0);
7493 EVT WideVT =
Op.getValueType();
7497 unsigned Opc =
N->getOpcode();
7499 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7512 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7519 unsigned GCD = std::gcd(OrigElts, WideElts);
7522 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7523 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7525 DAG.getVectorIdxConstant(
Idx, dl));
7526 return DAG.getNode(Opc, dl, VT, AccOp,
Op, Flags);
7529 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7531 DAG.getVectorIdxConstant(
Idx, dl));
7533 return DAG.getNode(Opc, dl, VT, AccOp,
Op, Flags);
7537 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7540 SDValue Op = GetWidenedVector(
N->getOperand(1));
7542 Op.getValueType().getVectorElementCount());
7544 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7545 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7553 EVT VT =
N->getValueType(0);
7564 DAG.getVectorIdxConstant(0,
DL));
7574 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7575 {Source, Mask, N->getOperand(2)},
N->getFlags());
7592 unsigned WidenEx = 0) {
7597 unsigned AlignInBits =
Align*8;
7599 EVT RetVT = WidenEltVT;
7604 if (Width == WidenEltWidth)
7615 (WidenWidth % MemVTWidth) == 0 &&
7617 (MemVTWidth <= Width ||
7618 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7619 if (MemVTWidth == WidenWidth)
7638 (WidenWidth % MemVTWidth) == 0 &&
7640 (MemVTWidth <= Width ||
7641 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7650 return std::nullopt;
7661 unsigned Start,
unsigned End) {
7662 SDLoc dl(LdOps[Start]);
7663 EVT LdTy = LdOps[Start].getValueType();
7671 for (
unsigned i = Start + 1; i !=
End; ++i) {
7672 EVT NewLdTy = LdOps[i].getValueType();
7673 if (NewLdTy != LdTy) {
7694 EVT LdVT =
LD->getMemoryVT();
7708 TypeSize WidthDiff = WidenWidth - LdWidth;
7715 std::optional<EVT> FirstVT =
7716 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7723 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7728 std::optional<EVT> NewVT = FirstVT;
7730 TypeSize NewVTWidth = FirstVTWidth;
7732 RemainingWidth -= NewVTWidth;
7739 NewVTWidth = NewVT->getSizeInBits();
7745 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7746 LD->getOriginalAlign(), MMOFlags, AAInfo);
7750 if (MemVTs.
empty()) {
7752 if (!FirstVT->isVector()) {
7759 if (FirstVT == WidenVT)
7764 unsigned NumConcat =
7767 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7768 ConcatOps[0] = LdOp;
7769 for (
unsigned i = 1; i != NumConcat; ++i)
7770 ConcatOps[i] = UndefVal;
7782 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7785 for (
EVT MemVT : MemVTs) {
7786 Align NewAlign = ScaledOffset == 0
7787 ?
LD->getOriginalAlign()
7790 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7794 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7809 EVT LdTy = LdOps[i].getValueType();
7812 for (--i; i >= 0; --i) {
7813 LdTy = LdOps[i].getValueType();
7820 ConcatOps[--
Idx] = LdOps[i];
7821 for (--i; i >= 0; --i) {
7822 EVT NewLdTy = LdOps[i].getValueType();
7823 if (NewLdTy != LdTy) {
7834 WidenOps[j] = ConcatOps[
Idx+j];
7835 for (;
j != NumOps; ++
j)
7836 WidenOps[j] = DAG.getUNDEF(LdTy);
7843 ConcatOps[--
Idx] = LdOps[i];
7854 SDValue UndefVal = DAG.getUNDEF(LdTy);
7857 for (; i !=
End-
Idx; ++i)
7858 WidenOps[i] = ConcatOps[
Idx+i];
7859 for (; i != NumOps; ++i)
7860 WidenOps[i] = UndefVal;
7872 EVT LdVT =
LD->getMemoryVT();
7885 "not yet supported");
7896 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7897 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7903 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7904 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7905 LD->getOriginalAlign(), MMOFlags, AAInfo);
7910 SDValue UndefVal = DAG.getUNDEF(EltVT);
7911 for (; i != WidenNumElts; ++i)
7914 return DAG.getBuildVector(WidenVT, dl, Ops);
7926 SDValue ValOp = GetWidenedVector(
ST->getValue());
7929 EVT StVT =
ST->getMemoryVT();
7937 "Mismatch between store and value types");
7951 std::optional<EVT> NewVT =
7956 TypeSize NewVTWidth = NewVT->getSizeInBits();
7959 StWidth -= NewVTWidth;
7960 MemVTs.
back().second++;
7964 for (
const auto &Pair : MemVTs) {
7965 EVT NewVT = Pair.first;
7966 unsigned Count = Pair.second;
7972 Align NewAlign = ScaledOffset == 0
7973 ?
ST->getOriginalAlign()
7976 DAG.getVectorIdxConstant(
Idx, dl));
7977 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7982 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7994 DAG.getVectorIdxConstant(
Idx++, dl));
7996 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
8000 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
8014 bool FillWithZeroes) {
8019 "input and widen element type must match");
8021 "cannot modify scalable vectors in this way");
8033 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
8036 for (
unsigned i = 1; i != NumConcat; ++i)
8044 DAG.getVectorIdxConstant(0, dl));
8047 "Scalable vectors should have been handled already.");
8055 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
8059 DAG.getVectorIdxConstant(
Idx, dl));
8061 SDValue UndefVal = DAG.getUNDEF(EltVT);
8062 for (;
Idx < WidenNumElts; ++
Idx)
8063 Ops[
Idx] = UndefVal;
8065 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
8066 if (!FillWithZeroes)
8070 "We expect to never want to FillWithZeroes for non-integral types.");
8073 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
8074 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
8076 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
8077 DAG.getBuildVector(NVT, dl,
MaskOps));
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static unsigned getExtendForIntVecReduction(SDNode *N)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getInc() const
const SDValue & getScale() const
const SDValue & getMask() const
const SDValue & getIntID() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
ISD::MemIndexType getIndexType() const
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.