34#define DEBUG_TYPE "legalize-types"
40void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
45 switch (
N->getOpcode()) {
48 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
60 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
62 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
68 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
69 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
75 R = ScalarizeVecRes_VecInregOp(
N);
117 R = ScalarizeVecRes_UnaryOp(
N);
120 R = ScalarizeVecRes_FFREXP(
N, ResNo);
165 R = ScalarizeVecRes_BinOp(
N);
170 R = ScalarizeVecRes_TernaryOp(
N);
173#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
174 case ISD::STRICT_##DAGN:
175#include "llvm/IR/ConstrainedOps.def"
176 R = ScalarizeVecRes_StrictFPOp(
N);
181 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
190 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
200 R = ScalarizeVecRes_FIX(
N);
206 SetScalarizedVector(
SDValue(
N, ResNo), R);
210 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
211 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
213 LHS.getValueType(), LHS, RHS,
N->getFlags());
217 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
218 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
219 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
225 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
226 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
232SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
233 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
234 "Unexpected vector type!");
235 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
237 EVT VT0 =
N->getValueType(0);
238 EVT VT1 =
N->getValueType(1);
243 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
247 unsigned OtherNo = 1 - ResNo;
248 EVT OtherVT =
N->getValueType(OtherNo);
250 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
254 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
257 return SDValue(ScalarNode, ResNo);
261 EVT VT =
N->getValueType(0).getVectorElementType();
262 unsigned NumOpers =
N->getNumOperands();
264 EVT ValueVTs[] = {VT, MVT::Other};
273 for (
unsigned i = 1; i < NumOpers; ++i) {
279 Oper = GetScalarizedVector(Oper);
290 Opers,
N->getFlags());
301 EVT ResVT =
N->getValueType(0);
302 EVT OvVT =
N->getValueType(1);
306 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
307 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
312 ScalarLHS = ElemsLHS[0];
313 ScalarRHS = ElemsRHS[0];
319 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
323 unsigned OtherNo = 1 - ResNo;
324 EVT OtherVT =
N->getValueType(OtherNo);
326 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
330 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
333 return SDValue(ScalarNode, ResNo);
338 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
339 return GetScalarizedVector(
Op);
344 if (
Op.getValueType().isVector()
345 &&
Op.getValueType().getVectorNumElements() == 1
346 && !isSimpleLegalType(
Op.getValueType()))
347 Op = GetScalarizedVector(
Op);
348 EVT NewVT =
N->getValueType(0).getVectorElementType();
353SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
354 EVT EltVT =
N->getValueType(0).getVectorElementType();
363SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
365 N->getValueType(0).getVectorElementType(),
366 N->getOperand(0),
N->getOperand(1));
372 EVT OpVT =
Op.getValueType();
376 Op = GetScalarizedVector(
Op);
383 N->getValueType(0).getVectorElementType(),
Op,
388 SDValue Op = GetScalarizedVector(
N->getOperand(0));
393SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
397 EVT EltVT =
N->getValueType(0).getVectorElementType();
398 if (
Op.getValueType() != EltVT)
405 assert(
N->isUnindexed() &&
"Indexed vector load?");
409 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
410 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
411 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
412 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
422 EVT DestVT =
N->getValueType(0).getVectorElementType();
424 EVT OpVT =
Op.getValueType();
434 Op = GetScalarizedVector(
Op);
444 EVT EltVT =
N->getValueType(0).getVectorElementType();
446 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
455 EVT OpVT =
Op.getValueType();
457 EVT EltVT =
N->getValueType(0).getVectorElementType();
460 Op = GetScalarizedVector(
Op);
466 switch (
N->getOpcode()) {
478SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
481 EVT EltVT =
N->getValueType(0).getVectorElementType();
490 EVT OpVT =
Cond.getValueType();
503 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
517 EVT OpVT =
Cond->getOperand(0).getValueType();
524 EVT CondVT =
Cond.getValueType();
525 if (ScalarBool != VecBool) {
526 switch (ScalarBool) {
547 auto BoolVT = getSetCCResultType(CondVT);
548 if (BoolVT.bitsLT(CondVT))
553 GetScalarizedVector(
N->getOperand(2)));
557 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
559 LHS.getValueType(),
N->getOperand(0), LHS,
560 GetScalarizedVector(
N->getOperand(2)));
564 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
566 N->getOperand(0),
N->getOperand(1),
567 LHS, GetScalarizedVector(
N->getOperand(3)),
572 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
575SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
577 SDValue Arg =
N->getOperand(2).getOperand(0);
579 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
580 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
581 return GetScalarizedVector(
N->getOperand(
Op));
584SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
586 EVT SrcVT = Src.getValueType();
591 Src = GetScalarizedVector(Src);
597 EVT DstVT =
N->getValueType(0).getVectorElementType();
598 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
602 assert(
N->getValueType(0).isVector() &&
603 N->getOperand(0).getValueType().isVector() &&
604 "Operand types must be vectors");
607 EVT OpVT =
LHS.getValueType();
608 EVT NVT =
N->getValueType(0).getVectorElementType();
613 LHS = GetScalarizedVector(LHS);
614 RHS = GetScalarizedVector(RHS);
630 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
638 EVT ResultVT =
N->getValueType(0).getVectorElementType();
641 Arg = GetScalarizedVector(Arg);
654 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
661bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
666 switch (
N->getOpcode()) {
669 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
676 Res = ScalarizeVecOp_BITCAST(
N);
688 Res = ScalarizeVecOp_UnaryOp(
N);
694 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
697 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
700 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
703 Res = ScalarizeVecOp_VSELECT(
N);
706 Res = ScalarizeVecOp_VSETCC(
N);
709 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
712 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
715 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
718 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
721 Res = ScalarizeVecOp_FP_EXTEND(
N);
738 Res = ScalarizeVecOp_VECREDUCE(
N);
742 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
747 if (!Res.
getNode())
return false;
755 "Invalid operand expansion");
757 ReplaceValueWith(
SDValue(
N, 0), Res);
764 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
766 N->getValueType(0), Elt);
772 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
773 "Unexpected vector type!");
774 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
776 N->getValueType(0).getScalarType(), Elt);
784SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
785 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
786 "Unexpected vector type!");
787 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
789 {
N->getValueType(0).getScalarType(), MVT::Other },
790 {
N->getOperand(0), Elt });
800 ReplaceValueWith(
SDValue(
N, 0), Res);
805SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
807 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
808 Ops[i] = GetScalarizedVector(
N->getOperand(i));
814SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
815 EVT VT =
N->getValueType(0);
816 SDValue Res = GetScalarizedVector(
N->getOperand(0));
828 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
829 EVT VT =
N->getValueType(0);
839 assert(
N->getValueType(0).isVector() &&
840 N->getOperand(0).getValueType().isVector() &&
841 "Operand types must be vectors");
842 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
844 EVT VT =
N->getValueType(0);
845 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
846 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
848 EVT OpVT =
N->getOperand(0).getValueType();
860 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
868 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
869 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
872 if (
N->isTruncatingStore())
874 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
875 N->getBasePtr(),
N->getPointerInfo(),
876 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
877 N->getMemOperand()->getFlags(),
N->getAAInfo());
879 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
880 N->getBasePtr(),
N->getPointerInfo(),
881 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
887SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
888 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
889 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
891 N->getValueType(0).getVectorElementType(), Elt,
896SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
898 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
899 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
903 {
N->getOperand(0), Elt,
N->getOperand(2) });
912 ReplaceValueWith(
SDValue(
N, 0), Res);
919 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
921 N->getValueType(0).getVectorElementType(), Elt);
927SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
928 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
932 {
N->getOperand(0), Elt});
941 ReplaceValueWith(
SDValue(
N, 0), Res);
946 SDValue Res = GetScalarizedVector(
N->getOperand(0));
953SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
961 AccOp,
Op,
N->getFlags());
972void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
977 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
980 switch (
N->getOpcode()) {
983 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
995 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1010 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1013 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1017 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1020 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1022 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1023 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1026 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1029 case ISD::VP_GATHER:
1030 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1034 SplitVecRes_SETCC(
N,
Lo,
Hi);
1037 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1040 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1043 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1046 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1049 SplitVecRes_VECTOR_INTERLEAVE(
N);
1052 SplitVecRes_VAARG(
N,
Lo,
Hi);
1058 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1064 case ISD::VP_BITREVERSE:
1072 case ISD::VP_CTLZ_ZERO_UNDEF:
1074 case ISD::VP_CTTZ_ZERO_UNDEF:
1085 case ISD::VP_FFLOOR:
1090 case ISD::VP_FNEARBYINT:
1095 case ISD::VP_FP_EXTEND:
1097 case ISD::VP_FP_ROUND:
1099 case ISD::VP_FP_TO_SINT:
1101 case ISD::VP_FP_TO_UINT:
1107 case ISD::VP_LLRINT:
1109 case ISD::VP_FROUND:
1111 case ISD::VP_FROUNDEVEN:
1115 case ISD::VP_FROUNDTOZERO:
1117 case ISD::VP_SINT_TO_FP:
1119 case ISD::VP_TRUNCATE:
1121 case ISD::VP_UINT_TO_FP:
1123 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1126 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1132 case ISD::VP_SIGN_EXTEND:
1133 case ISD::VP_ZERO_EXTEND:
1134 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1148 case ISD::VP_FMINIMUM:
1150 case ISD::VP_FMAXIMUM:
1156 case ISD::OR:
case ISD::VP_OR:
1176 case ISD::VP_FCOPYSIGN:
1177 SplitVecRes_BinOp(
N,
Lo,
Hi);
1184 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1187#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1188 case ISD::STRICT_##DAGN:
1189#include "llvm/IR/ConstrainedOps.def"
1190 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1195 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1204 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1214 SplitVecRes_FIX(
N,
Lo,
Hi);
1216 case ISD::EXPERIMENTAL_VP_REVERSE:
1217 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1226void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1235 DL,
Ptr.getValueType(),
1236 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1238 Flags.setNoUnsignedWrap(
true);
1240 *ScaledOffset += IncrementSize;
1244 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1250std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1251 return SplitMask(Mask,
SDLoc(Mask));
1254std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1257 EVT MaskVT =
Mask.getValueType();
1259 GetSplitVector(Mask, MaskLo, MaskHi);
1262 return std::make_pair(MaskLo, MaskHi);
1267 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1269 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1273 unsigned Opcode =
N->getOpcode();
1274 if (
N->getNumOperands() == 2) {
1280 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1281 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1284 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1287 std::tie(EVLLo, EVLHi) =
1288 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1291 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1293 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1299 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1301 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1303 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1307 unsigned Opcode =
N->getOpcode();
1308 if (
N->getNumOperands() == 3) {
1314 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1315 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1318 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1321 std::tie(EVLLo, EVLHi) =
1322 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1325 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1327 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1332 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1334 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1338 unsigned Opcode =
N->getOpcode();
1357 switch (getTypeAction(InVT)) {
1372 GetExpandedOp(InOp,
Lo,
Hi);
1383 GetSplitVector(InOp,
Lo,
Hi);
1404 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1427 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1429 unsigned NumSubvectors =
N->getNumOperands() / 2;
1430 if (NumSubvectors == 1) {
1431 Lo =
N->getOperand(0);
1432 Hi =
N->getOperand(1);
1446void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1468 GetSplitVector(Vec,
Lo,
Hi);
1471 EVT LoVT =
Lo.getValueType();
1480 unsigned IdxVal =
Idx->getAsZExtVal();
1481 if (IdxVal + SubElems <= LoElems) {
1489 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1515 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1519 auto *
Load = cast<LoadSDNode>(
Lo);
1521 IncrementPointer(Load, LoVT, MPI, StackPtr);
1524 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1533 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1538 EVT RHSVT =
RHS.getValueType();
1541 GetSplitVector(RHS, RHSLo, RHSHi);
1558 SDValue FpValue =
N->getOperand(0);
1560 GetSplitVector(FpValue, ArgLo, ArgHi);
1573 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1577 std::tie(LoVT, HiVT) =
1588 unsigned Opcode =
N->getOpcode();
1595 GetSplitVector(N0, InLo, InHi);
1602 EVT OutLoVT, OutHiVT;
1605 assert((2 * OutNumElements) <= InNumElements &&
1606 "Illegal extend vector in reg split");
1616 for (
unsigned i = 0; i != OutNumElements; ++i)
1617 SplitHi[i] = i + OutNumElements;
1620 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1621 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1626 unsigned NumOps =
N->getNumOperands();
1640 for (
unsigned i = 1; i < NumOps; ++i) {
1645 EVT InVT =
Op.getValueType();
1650 GetSplitVector(
Op, OpLo, OpHi);
1659 EVT LoValueVTs[] = {LoVT, MVT::Other};
1660 EVT HiValueVTs[] = {HiVT, MVT::Other};
1669 Lo.getValue(1),
Hi.getValue(1));
1673 ReplaceValueWith(
SDValue(
N, 1), Chain);
1676SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1678 EVT VT =
N->getValueType(0);
1689 else if (NE > ResNE)
1693 EVT ChainVTs[] = {EltVT, MVT::Other};
1697 for (i = 0; i !=
NE; ++i) {
1699 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1700 SDValue Operand =
N->getOperand(j);
1711 Scalar.getNode()->setFlags(
N->getFlags());
1719 for (; i < ResNE; ++i)
1724 ReplaceValueWith(
SDValue(
N, 1), Chain);
1731void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1734 EVT ResVT =
N->getValueType(0);
1735 EVT OvVT =
N->getValueType(1);
1736 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1740 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1742 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1743 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1749 unsigned Opcode =
N->getOpcode();
1761 unsigned OtherNo = 1 - ResNo;
1762 EVT OtherVT =
N->getValueType(OtherNo);
1764 SetSplitVector(
SDValue(
N, OtherNo),
1770 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1774void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1780 GetSplitVector(Vec,
Lo,
Hi);
1783 unsigned IdxVal = CIdx->getZExtValue();
1784 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1785 if (IdxVal < LoNumElts) {
1787 Lo.getValueType(),
Lo, Elt,
Idx);
1797 if (CustomLowerNode(
N,
N->getValueType(0),
true))
1838 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1841 auto Load = cast<LoadSDNode>(
Lo);
1843 IncrementPointer(Load, LoVT, MPI, StackPtr);
1845 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1849 if (LoVT !=
Lo.getValueType())
1851 if (HiVT !=
Hi.getValueType())
1859 assert(
N->getValueType(0).isScalableVector() &&
1860 "Only scalable vectors are supported for STEP_VECTOR");
1883 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
1903 EVT MemoryVT =
LD->getMemoryVT();
1907 EVT LoMemVT, HiMemVT;
1914 ReplaceValueWith(
SDValue(LD, 1), NewChain);
1919 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
1923 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
1926 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
1935 ReplaceValueWith(
SDValue(LD, 1), Ch);
1940 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
1949 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
1950 Align Alignment =
LD->getOriginalAlign();
1953 EVT MemoryVT =
LD->getMemoryVT();
1955 EVT LoMemVT, HiMemVT;
1956 bool HiIsEmpty =
false;
1957 std::tie(LoMemVT, HiMemVT) =
1963 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
1966 GetSplitVector(Mask, MaskLo, MaskHi);
1968 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
1973 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
1982 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
1991 LD->isExpandingLoad());
1997 MPI =
LD->getPointerInfo().getWithOffset(
2002 Alignment,
LD->getAAInfo(),
LD->getRanges());
2005 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2006 LD->isExpandingLoad());
2016 ReplaceValueWith(
SDValue(LD, 1), Ch);
2022 "Indexed VP strided load during type legalization!");
2024 "Unexpected indexed variable-length load offset");
2031 EVT LoMemVT, HiMemVT;
2032 bool HiIsEmpty =
false;
2033 std::tie(LoMemVT, HiMemVT) =
2039 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2042 GetSplitVector(Mask, LoMask, HiMask);
2048 std::tie(LoEVL, HiEVL) =
2086 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2097 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2110 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2119 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2122 GetSplitVector(Mask, MaskLo, MaskHi);
2124 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2128 EVT LoMemVT, HiMemVT;
2129 bool HiIsEmpty =
false;
2130 std::tie(LoMemVT, HiMemVT) =
2133 SDValue PassThruLo, PassThruHi;
2135 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2137 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2180 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2197 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2198 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2200 auto *VPSC = cast<VPGatherSDNode>(
N);
2201 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2204 EVT MemoryVT =
N->getMemoryVT();
2205 Align Alignment =
N->getOriginalAlign();
2209 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2210 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2212 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2215 EVT LoMemVT, HiMemVT;
2220 if (getTypeAction(Ops.Index.getValueType()) ==
2222 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2224 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2231 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2232 SDValue PassThru = MGT->getPassThru();
2233 SDValue PassThruLo, PassThruHi;
2236 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2238 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2243 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2245 OpsLo, MMO, IndexTy, ExtType);
2247 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2249 OpsHi, MMO, IndexTy, ExtType);
2251 auto *VPGT = cast<VPGatherSDNode>(
N);
2253 std::tie(EVLLo, EVLHi) =
2254 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2256 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2258 MMO, VPGT->getIndexType());
2260 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2262 MMO, VPGT->getIndexType());
2272 ReplaceValueWith(
SDValue(
N, 1), Ch);
2276 assert(
N->getValueType(0).isVector() &&
2277 N->getOperand(0).getValueType().isVector() &&
2278 "Operand types must be vectors");
2286 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2288 GetSplitVector(
N->getOperand(0), LL, LH);
2292 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2294 GetSplitVector(
N->getOperand(1), RL, RH);
2299 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2300 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2302 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2303 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2304 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2305 std::tie(EVLLo, EVLHi) =
2306 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2307 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2309 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2323 EVT InVT =
N->getOperand(0).getValueType();
2325 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2330 unsigned Opcode =
N->getOpcode();
2331 if (
N->getNumOperands() <= 2) {
2333 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2334 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2342 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2343 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2346 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2349 std::tie(EVLLo, EVLHi) =
2350 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2356void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2364 EVT InVT =
N->getOperand(0).getValueType();
2366 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2370 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2371 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2372 Lo->setFlags(
N->getFlags());
2373 Hi->setFlags(
N->getFlags());
2379 unsigned OtherNo = 1 - ResNo;
2380 EVT OtherVT =
N->getValueType(OtherNo);
2388 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2395 EVT SrcVT =
N->getOperand(0).getValueType();
2396 EVT DestVT =
N->getValueType(0);
2419 EVT SplitLoVT, SplitHiVT;
2423 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2424 N->dump(&DAG);
dbgs() <<
"\n");
2425 if (!
N->isVPOpcode()) {
2428 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2439 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2440 N->getOperand(1),
N->getOperand(2));
2445 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2448 std::tie(EVLLo, EVLHi) =
2449 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2451 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2452 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2457 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2465 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2466 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2472 return N.getResNo() == 0 &&
2476 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2481 "Expected build vector node.");
2484 for (
unsigned I = 0;
I < NewElts; ++
I) {
2489 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2491 Ops[
I] = Input1.getOperand(
Idx);
2493 if (Ops[
I].getValueType().bitsGT(EltVT))
2496 return DAG.getBuildVector(NewVT,
DL, Ops);
2504 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2508 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2510 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2519 for (
auto &
P : ShufflesIdxs) {
2520 if (
P.second.size() < 2)
2524 for (
int &
Idx : Mask) {
2527 unsigned SrcRegIdx =
Idx / NewElts;
2528 if (Inputs[SrcRegIdx].
isUndef()) {
2533 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
2536 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2541 Idx = MaskElt % NewElts +
2542 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2548 Inputs[
P.second[0]] =
P.first.first;
2549 Inputs[
P.second[1]] =
P.first.second;
2552 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2556 for (
int &
Idx : Mask) {
2559 unsigned SrcRegIdx =
Idx / NewElts;
2560 if (Inputs[SrcRegIdx].
isUndef()) {
2565 getTypeAction(Inputs[SrcRegIdx].getValueType());
2567 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2568 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2571 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2573 if (UsedSubVector.count() > 1) {
2575 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2576 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2578 if (Pairs.
empty() || Pairs.
back().size() == 2)
2580 if (UsedSubVector.test(2 *
I)) {
2581 Pairs.
back().emplace_back(
I, 0);
2583 assert(UsedSubVector.test(2 *
I + 1) &&
2584 "Expected to be used one of the subvectors.");
2585 Pairs.
back().emplace_back(
I, 1);
2588 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2590 for (
int &
Idx : Mask) {
2593 unsigned SrcRegIdx =
Idx / NewElts;
2595 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2596 return Idxs.front().first == SrcRegIdx ||
2597 Idxs.back().first == SrcRegIdx;
2599 if (It == Pairs.
end())
2601 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2602 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2605 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2606 Inputs[Idxs.front().first] = DAG.
getNode(
2608 Inputs[Idxs.front().first].getValueType(),
2609 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2610 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2619 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2620 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].getNode());
2623 if (Shuffle->getOperand(0).getValueType() != NewVT)
2626 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2627 !Shuffle->isSplat()) {
2629 }
else if (!Inputs[
I].hasOneUse() &&
2630 !Shuffle->getOperand(1).isUndef()) {
2632 for (
int &
Idx : Mask) {
2635 unsigned SrcRegIdx =
Idx / NewElts;
2638 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2643 int OpIdx = MaskElt / NewElts;
2656 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2657 if (Shuffle->getOperand(OpIdx).isUndef())
2659 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2660 if (It == std::end(Inputs))
2662 int FoundOp = std::distance(std::begin(Inputs), It);
2665 for (
int &
Idx : Mask) {
2668 unsigned SrcRegIdx =
Idx / NewElts;
2671 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2676 int MaskIdx = MaskElt / NewElts;
2677 if (OpIdx == MaskIdx)
2678 Idx = MaskElt % NewElts + FoundOp * NewElts;
2681 Op = (OpIdx + 1) % 2;
2689 for (
int &
Idx : Mask) {
2692 unsigned SrcRegIdx =
Idx / NewElts;
2695 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2696 int OpIdx = MaskElt / NewElts;
2699 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2705 TryPeekThroughShufflesInputs(OrigMask);
2707 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2711 for (
const auto &
I : Inputs) {
2713 UniqueConstantInputs.
insert(
I);
2714 else if (!
I.isUndef())
2719 if (UniqueInputs.
size() != std::size(Inputs)) {
2720 auto &&UniqueVec = UniqueInputs.
takeVector();
2721 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2722 unsigned ConstNum = UniqueConstantVec.size();
2723 for (
int &
Idx : Mask) {
2726 unsigned SrcRegIdx =
Idx / NewElts;
2727 if (Inputs[SrcRegIdx].
isUndef()) {
2731 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2732 if (It != UniqueConstantVec.end()) {
2734 NewElts * std::distance(UniqueConstantVec.begin(), It);
2735 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2738 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2739 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2741 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2742 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2744 copy(UniqueConstantVec, std::begin(Inputs));
2745 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2748 MakeUniqueInputs(OrigMask);
2750 copy(Inputs, std::begin(OrigInputs));
2756 unsigned FirstMaskIdx =
High * NewElts;
2759 assert(!Output &&
"Expected default initialized initial value.");
2760 TryPeekThroughShufflesInputs(Mask);
2761 MakeUniqueInputs(Mask);
2763 copy(Inputs, std::begin(TmpInputs));
2766 bool SecondIteration =
false;
2767 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2772 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2773 SecondIteration =
true;
2774 return SecondIteration;
2777 Mask, std::size(Inputs), std::size(Inputs),
2779 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2780 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2783 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2785 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2786 DAG.getUNDEF(NewVT), Mask);
2787 Inputs[
Idx] = Output;
2789 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2792 if (AccumulateResults(Idx1)) {
2795 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2797 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2798 Inputs[Idx2], Mask);
2802 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2804 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2805 TmpInputs[Idx2], Mask);
2807 Inputs[Idx1] = Output;
2809 copy(OrigInputs, std::begin(Inputs));
2814 EVT OVT =
N->getValueType(0);
2821 const Align Alignment =
2822 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2824 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2825 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2826 Chain =
Hi.getValue(1);
2830 ReplaceValueWith(
SDValue(
N, 1), Chain);
2835 EVT DstVTLo, DstVTHi;
2836 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2840 EVT SrcVT =
N->getOperand(0).getValueType();
2842 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2844 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2846 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2847 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2853 GetSplitVector(
N->getOperand(0), InLo, InHi);
2862 EVT VT =
N->getValueType(0);
2866 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
2870 DAG.getVectorIdxConstant(0,
DL));
2878 EVT VT =
N->getValueType(0);
2885 Align Alignment = DAG.getReducedAlign(VT,
false);
2891 auto &MF = DAG.getMachineFunction();
2905 DAG.getConstant(1,
DL, PtrVT));
2907 DAG.getConstant(EltWidth,
DL, PtrVT));
2909 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
2911 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
2912 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
2913 DAG.getUNDEF(PtrVT), Stride, TrueMask,
2916 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
2918 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
2920 DAG.getVectorIdxConstant(0,
DL));
2926void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
2928 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2929 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2930 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2934 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
2936 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
2942void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
2943 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2944 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2945 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2949 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
2951 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
2953 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
2954 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
2965bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
2970 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
2973 switch (
N->getOpcode()) {
2976 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
2984 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
2990 case ISD::VP_TRUNCATE:
2992 Res = SplitVecOp_TruncateHelper(
N);
2995 case ISD::VP_FP_ROUND:
2999 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3002 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3004 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3005 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3008 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3011 case ISD::VP_SCATTER:
3012 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3015 case ISD::VP_GATHER:
3016 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3019 Res = SplitVecOp_VSELECT(
N, OpNo);
3025 case ISD::VP_SINT_TO_FP:
3026 case ISD::VP_UINT_TO_FP:
3027 if (
N->getValueType(0).bitsLT(
3028 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3029 Res = SplitVecOp_TruncateHelper(
N);
3031 Res = SplitVecOp_UnaryOp(
N);
3035 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3039 case ISD::VP_FP_TO_SINT:
3040 case ISD::VP_FP_TO_UINT:
3051 Res = SplitVecOp_UnaryOp(
N);
3054 Res = SplitVecOp_FPOpDifferentTypes(
N);
3060 Res = SplitVecOp_ExtVecInRegOp(
N);
3078 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3082 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3084 case ISD::VP_REDUCE_FADD:
3085 case ISD::VP_REDUCE_SEQ_FADD:
3086 case ISD::VP_REDUCE_FMUL:
3087 case ISD::VP_REDUCE_SEQ_FMUL:
3088 case ISD::VP_REDUCE_ADD:
3089 case ISD::VP_REDUCE_MUL:
3090 case ISD::VP_REDUCE_AND:
3091 case ISD::VP_REDUCE_OR:
3092 case ISD::VP_REDUCE_XOR:
3093 case ISD::VP_REDUCE_SMAX:
3094 case ISD::VP_REDUCE_SMIN:
3095 case ISD::VP_REDUCE_UMAX:
3096 case ISD::VP_REDUCE_UMIN:
3097 case ISD::VP_REDUCE_FMAX:
3098 case ISD::VP_REDUCE_FMIN:
3099 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3104 if (!Res.
getNode())
return false;
3111 if (
N->isStrictFPOpcode())
3113 "Invalid operand expansion");
3116 "Invalid operand expansion");
3118 ReplaceValueWith(
SDValue(
N, 0), Res);
3122SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3125 assert(OpNo == 0 &&
"Illegal operand must be mask");
3132 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3135 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3136 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3137 "Lo and Hi have differing types");
3140 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3141 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3143 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3144 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3145 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3146 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3156SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3157 EVT ResVT =
N->getValueType(0);
3161 SDValue VecOp =
N->getOperand(OpNo);
3163 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3164 GetSplitVector(VecOp,
Lo,
Hi);
3166 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3172 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3176 EVT ResVT =
N->getValueType(0);
3185 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3186 GetSplitVector(VecOp,
Lo,
Hi);
3188 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3194 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3197SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3198 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3199 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3201 unsigned Opc =
N->getOpcode();
3202 EVT ResVT =
N->getValueType(0);
3206 SDValue VecOp =
N->getOperand(OpNo);
3208 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3209 GetSplitVector(VecOp,
Lo,
Hi);
3212 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3215 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3220 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3221 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3226 EVT ResVT =
N->getValueType(0);
3229 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3230 EVT InVT =
Lo.getValueType();
3235 if (
N->isStrictFPOpcode()) {
3236 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3237 { N->getOperand(0), Lo });
3238 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3239 { N->getOperand(0), Hi });
3248 ReplaceValueWith(
SDValue(
N, 1), Ch);
3249 }
else if (
N->getNumOperands() == 3) {
3250 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3251 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3252 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3253 std::tie(EVLLo, EVLHi) =
3254 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3255 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3256 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3258 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3259 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3269 EVT ResVT =
N->getValueType(0);
3271 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3275 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3281 Lo = BitConvertToInteger(
Lo);
3282 Hi = BitConvertToInteger(
Hi);
3284 if (DAG.getDataLayout().isBigEndian())
3292 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3294 EVT ResVT =
N->getValueType(0);
3302 GetSplitVector(SubVec,
Lo,
Hi);
3305 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3311 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3313 return SecondInsertion;
3316SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3318 EVT SubVT =
N->getValueType(0);
3323 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3325 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3328 if (IdxVal < LoEltsMin) {
3330 "Extracted subvector crosses vector split!");
3333 N->getOperand(0).getValueType().isScalableVector())
3335 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3340 "Extracting scalable subvector from fixed-width unsupported");
3348 "subvector from a scalable predicate vector");
3354 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3356 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3357 auto &MF = DAG.getMachineFunction();
3361 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3368 SubVT, dl, Store, StackPtr,
3372SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3381 GetSplitVector(Vec,
Lo,
Hi);
3383 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3385 if (IdxVal < LoElts)
3389 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3390 Idx.getValueType())), 0);
3394 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3410 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3412 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3413 auto &MF = DAG.getMachineFunction();
3416 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3424 if (
N->getValueType(0).bitsLT(EltVT)) {
3425 SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
3427 return DAG.getZExtOrTrunc(Load, dl,
N->getValueType(0));
3430 return DAG.getExtLoad(
3441 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3449 SplitVecRes_Gather(
N,
Lo,
Hi);
3452 ReplaceValueWith(
SDValue(
N, 0), Res);
3457 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3461 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3463 SDValue EVL =
N->getVectorLength();
3465 Align Alignment =
N->getOriginalAlign();
3471 GetSplitVector(
Data, DataLo, DataHi);
3473 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3478 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3481 GetSplitVector(Mask, MaskLo, MaskHi);
3483 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3486 EVT MemoryVT =
N->getMemoryVT();
3487 EVT LoMemVT, HiMemVT;
3488 bool HiIsEmpty =
false;
3489 std::tie(LoMemVT, HiMemVT) =
3490 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3494 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3502 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3503 N->getAddressingMode(),
N->isTruncatingStore(),
3504 N->isCompressingStore());
3511 N->isCompressingStore());
3519 MPI =
N->getPointerInfo().getWithOffset(
3522 MMO = DAG.getMachineFunction().getMachineMemOperand(
3524 Alignment,
N->getAAInfo(),
N->getRanges());
3526 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3527 N->getAddressingMode(),
N->isTruncatingStore(),
3528 N->isCompressingStore());
3537 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3538 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3545 GetSplitVector(
Data, LoData, HiData);
3547 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3549 EVT LoMemVT, HiMemVT;
3550 bool HiIsEmpty =
false;
3551 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3557 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3558 else if (getTypeAction(
Mask.getValueType()) ==
3560 GetSplitVector(Mask, LoMask, HiMask);
3562 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3565 std::tie(LoEVL, HiEVL) =
3566 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3570 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3571 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3572 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3583 EVT PtrVT =
N->getBasePtr().getValueType();
3586 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3589 Align Alignment =
N->getOriginalAlign();
3597 Alignment,
N->getAAInfo(),
N->getRanges());
3600 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3601 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3602 N->isCompressingStore());
3611 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3615 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3618 Align Alignment =
N->getOriginalAlign();
3624 GetSplitVector(
Data, DataLo, DataHi);
3626 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3631 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3634 GetSplitVector(Mask, MaskLo, MaskHi);
3636 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3639 EVT MemoryVT =
N->getMemoryVT();
3640 EVT LoMemVT, HiMemVT;
3641 bool HiIsEmpty =
false;
3642 std::tie(LoMemVT, HiMemVT) =
3643 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3651 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3652 N->getAddressingMode(),
N->isTruncatingStore(),
3653 N->isCompressingStore());
3662 N->isCompressingStore());
3670 MPI =
N->getPointerInfo().getWithOffset(
3673 MMO = DAG.getMachineFunction().getMachineMemOperand(
3675 Alignment,
N->getAAInfo(),
N->getRanges());
3677 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3678 N->getAddressingMode(),
N->isTruncatingStore(),
3679 N->isCompressingStore());
3692 EVT MemoryVT =
N->getMemoryVT();
3693 Align Alignment =
N->getOriginalAlign();
3701 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3702 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3705 auto *VPSC = cast<VPScatterSDNode>(
N);
3706 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3711 EVT LoMemVT, HiMemVT;
3712 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3717 GetSplitVector(Ops.Data, DataLo, DataHi);
3719 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3723 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3724 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3726 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3730 if (getTypeAction(Ops.Index.getValueType()) ==
3732 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3734 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3742 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3743 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3745 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3746 MSC->getIndexType(), MSC->isTruncatingStore());
3751 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3752 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3753 MMO, MSC->getIndexType(),
3754 MSC->isTruncatingStore());
3756 auto *VPSC = cast<VPScatterSDNode>(
N);
3758 std::tie(EVLLo, EVLHi) =
3759 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3761 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3762 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3763 VPSC->getIndexType());
3768 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3769 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3770 VPSC->getIndexType());
3774 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3775 assert(OpNo == 1 &&
"Can only split the stored value");
3778 bool isTruncating =
N->isTruncatingStore();
3781 EVT MemoryVT =
N->getMemoryVT();
3782 Align Alignment =
N->getOriginalAlign();
3786 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3788 EVT LoMemVT, HiMemVT;
3789 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3796 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3797 Alignment, MMOFlags, AAInfo);
3799 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3803 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3806 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3807 HiMemVT, Alignment, MMOFlags, AAInfo);
3809 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3823 EVT EltVT =
N->getValueType(0).getVectorElementType();
3825 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3828 DAG.getVectorIdxConstant(i,
DL)));
3832 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3853 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3854 SDValue InVec =
N->getOperand(OpNo);
3856 EVT OutVT =
N->getValueType(0);
3864 EVT LoOutVT, HiOutVT;
3865 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
3866 assert(LoOutVT == HiOutVT &&
"Unequal split?");
3871 if (isTypeLegal(LoOutVT) ||
3872 InElementSize <= OutElementSize * 2)
3873 return SplitVecOp_UnaryOp(
N);
3882 return SplitVecOp_UnaryOp(
N);
3886 GetSplitVector(InVec, InLoVec, InHiVec);
3892 EVT HalfElementVT = IsFloat ?
3894 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
3901 if (
N->isStrictFPOpcode()) {
3902 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3903 {N->getOperand(0), InLoVec});
3904 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3905 {N->getOperand(0), InHiVec});
3911 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
3912 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
3924 if (
N->isStrictFPOpcode()) {
3928 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
3936 DAG.getTargetConstant(
3942 assert(
N->getValueType(0).isVector() &&
3943 N->getOperand(0).getValueType().isVector() &&
3944 "Operand types must be vectors");
3946 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
3948 GetSplitVector(
N->getOperand(0), Lo0, Hi0);
3949 GetSplitVector(
N->getOperand(1), Lo1, Hi1);
3960 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
3961 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3962 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
3963 std::tie(EVLLo, EVLHi) =
3964 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
3965 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
3966 N->getOperand(2), MaskLo, EVLLo);
3967 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
3968 N->getOperand(2), MaskHi, EVLHi);
3972 EVT OpVT =
N->getOperand(0).getValueType();
3975 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
3981 EVT ResVT =
N->getValueType(0);
3984 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3985 EVT InVT =
Lo.getValueType();
3990 if (
N->isStrictFPOpcode()) {
3991 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
3992 { N->getOperand(0), Lo, N->getOperand(2) });
3993 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
3994 { N->getOperand(0), Hi, N->getOperand(2) });
3998 Lo.getValue(1),
Hi.getValue(1));
3999 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4000 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4001 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4002 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4003 std::tie(EVLLo, EVLHi) =
4004 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4005 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4006 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4020SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4023 EVT LHSLoVT, LHSHiVT;
4024 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4026 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4027 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4030 std::tie(LHSLo, LHSHi) =
4031 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4034 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4036 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4037 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4043 EVT ResVT =
N->getValueType(0);
4046 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4047 EVT InVT =
Lo.getValueType();
4053 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4054 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4063void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4064 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4067 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4072 auto unrollExpandedOp = [&]() {
4077 EVT VT =
N->getValueType(0);
4087 switch (
N->getOpcode()) {
4090 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4102 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4106 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4110 Res = WidenVecRes_ScalarOp(
N);
4115 case ISD::VP_SELECT:
4117 Res = WidenVecRes_Select(
N);
4121 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4122 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4124 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4127 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4129 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4130 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4133 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4136 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4138 case ISD::VP_GATHER:
4139 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4142 Res = WidenVecRes_VECTOR_REVERSE(
N);
4150 case ISD::OR:
case ISD::VP_OR:
4159 case ISD::VP_FMINIMUM:
4161 case ISD::VP_FMAXIMUM:
4192 case ISD::VP_FCOPYSIGN:
4193 Res = WidenVecRes_Binary(
N);
4198 if (unrollExpandedOp())
4213 Res = WidenVecRes_BinaryCanTrap(
N);
4222 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4225#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4226 case ISD::STRICT_##DAGN:
4227#include "llvm/IR/ConstrainedOps.def"
4228 Res = WidenVecRes_StrictFP(
N);
4237 Res = WidenVecRes_OverflowOp(
N, ResNo);
4241 Res = WidenVecRes_FCOPYSIGN(
N);
4246 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4251 if (!unrollExpandedOp())
4252 Res = WidenVecRes_ExpOp(
N);
4258 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4263 case ISD::VP_FP_EXTEND:
4265 case ISD::VP_FP_ROUND:
4267 case ISD::VP_FP_TO_SINT:
4269 case ISD::VP_FP_TO_UINT:
4271 case ISD::VP_SIGN_EXTEND:
4273 case ISD::VP_SINT_TO_FP:
4274 case ISD::VP_TRUNCATE:
4277 case ISD::VP_UINT_TO_FP:
4279 case ISD::VP_ZERO_EXTEND:
4280 Res = WidenVecRes_Convert(
N);
4285 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4291 case ISD::VP_LLRINT:
4292 Res = WidenVecRes_XRINT(
N);
4312 if (unrollExpandedOp())
4322 case ISD::VP_BITREVERSE:
4328 case ISD::VP_CTLZ_ZERO_UNDEF:
4334 case ISD::VP_CTTZ_ZERO_UNDEF:
4339 case ISD::VP_FFLOOR:
4341 case ISD::VP_FNEARBYINT:
4342 case ISD::VP_FROUND:
4343 case ISD::VP_FROUNDEVEN:
4344 case ISD::VP_FROUNDTOZERO:
4348 Res = WidenVecRes_Unary(
N);
4355 Res = WidenVecRes_Ternary(
N);
4361 SetWidenedVector(
SDValue(
N, ResNo), Res);
4368 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4369 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4370 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4371 if (
N->getNumOperands() == 3)
4372 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4374 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4375 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4379 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4380 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4387 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4388 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4389 if (
N->getNumOperands() == 2)
4390 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4393 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4394 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4398 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4399 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4402SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4406 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4407 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4409 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4418 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4421 if (ConcatEnd == 1) {
4422 VT = ConcatOps[0].getValueType();
4424 return ConcatOps[0];
4427 SDLoc dl(ConcatOps[0]);
4434 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4435 int Idx = ConcatEnd - 1;
4436 VT = ConcatOps[
Idx--].getValueType();
4437 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4450 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4451 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4455 ConcatOps[
Idx+1] = VecOp;
4456 ConcatEnd =
Idx + 2;
4462 unsigned RealVals = ConcatEnd -
Idx - 1;
4463 unsigned SubConcatEnd = 0;
4464 unsigned SubConcatIdx =
Idx + 1;
4465 while (SubConcatEnd < RealVals)
4466 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4467 while (SubConcatEnd < OpsToConcat)
4468 SubConcatOps[SubConcatEnd++] = undefVec;
4470 NextVT, SubConcatOps);
4471 ConcatEnd = SubConcatIdx + 1;
4476 if (ConcatEnd == 1) {
4477 VT = ConcatOps[0].getValueType();
4479 return ConcatOps[0];
4484 if (NumOps != ConcatEnd ) {
4486 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4487 ConcatOps[j] = UndefVal;
4495 unsigned Opcode =
N->getOpcode();
4503 NumElts = NumElts / 2;
4507 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4509 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4510 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4511 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4523 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4524 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4525 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4528 unsigned ConcatEnd = 0;
4536 while (CurNumElts != 0) {
4537 while (CurNumElts >= NumElts) {
4539 DAG.getVectorIdxConstant(
Idx, dl));
4541 DAG.getVectorIdxConstant(
Idx, dl));
4542 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4544 CurNumElts -= NumElts;
4547 NumElts = NumElts / 2;
4552 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4554 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4556 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4557 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4568 switch (
N->getOpcode()) {
4571 return WidenVecRes_STRICT_FSETCC(
N);
4578 return WidenVecRes_Convert_StrictFP(
N);
4584 unsigned NumOpers =
N->getNumOperands();
4585 unsigned Opcode =
N->getOpcode();
4592 NumElts = NumElts / 2;
4603 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4607 unsigned ConcatEnd = 0;
4614 for (
unsigned i = 1; i < NumOpers; ++i) {
4620 Oper = GetWidenedVector(Oper);
4626 DAG.getUNDEF(WideOpVT), Oper,
4627 DAG.getVectorIdxConstant(0, dl));
4639 while (CurNumElts != 0) {
4640 while (CurNumElts >= NumElts) {
4643 for (
unsigned i = 0; i < NumOpers; ++i) {
4646 EVT OpVT =
Op.getValueType();
4652 DAG.getVectorIdxConstant(
Idx, dl));
4658 EVT OperVT[] = {VT, MVT::Other};
4660 ConcatOps[ConcatEnd++] = Oper;
4663 CurNumElts -= NumElts;
4666 NumElts = NumElts / 2;
4671 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4674 for (
unsigned i = 0; i < NumOpers; ++i) {
4677 EVT OpVT =
Op.getValueType();
4681 DAG.getVectorIdxConstant(
Idx, dl));
4686 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4688 ConcatOps[ConcatEnd++] = Oper;
4697 if (Chains.
size() == 1)
4698 NewChain = Chains[0];
4701 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4706SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4708 EVT ResVT =
N->getValueType(0);
4709 EVT OvVT =
N->getValueType(1);
4710 EVT WideResVT, WideOvVT;
4720 WideLHS = GetWidenedVector(
N->getOperand(0));
4721 WideRHS = GetWidenedVector(
N->getOperand(1));
4731 N->getOperand(0), Zero);
4734 N->getOperand(1), Zero);
4737 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4738 SDNode *WideNode = DAG.getNode(
4739 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4742 unsigned OtherNo = 1 - ResNo;
4743 EVT OtherVT =
N->getValueType(OtherNo);
4750 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4753 return SDValue(WideNode, ResNo);
4766 unsigned Opcode =
N->getOpcode();
4775 InOp = ZExtPromotedInteger(InOp);
4786 InOp = GetWidenedVector(
N->getOperand(0));
4789 if (InVTEC == WidenEC) {
4790 if (
N->getNumOperands() == 1)
4791 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4792 if (
N->getNumOperands() == 3) {
4793 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4796 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
4798 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
4821 unsigned NumConcat =
4826 if (
N->getNumOperands() == 1)
4827 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
4828 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
4833 DAG.getVectorIdxConstant(0,
DL));
4835 if (
N->getNumOperands() == 1)
4836 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
4837 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
4846 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4847 for (
unsigned i=0; i < MinElts; ++i) {
4849 DAG.getVectorIdxConstant(i,
DL));
4850 if (
N->getNumOperands() == 1)
4851 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
4853 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
4856 return DAG.getBuildVector(WidenVT,
DL, Ops);
4865 EVT SrcVT = Src.getValueType();
4869 Src = GetWidenedVector(Src);
4870 SrcVT = Src.getValueType();
4877 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
4886 EVT SrcVT = Src.getValueType();
4890 Src = GetWidenedVector(Src);
4891 SrcVT = Src.getValueType();
4898 if (
N->getNumOperands() == 1)
4899 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
4901 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
4902 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4906 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
4909SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
4920 unsigned Opcode =
N->getOpcode();
4926 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
4931 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4932 for (
unsigned i=0; i < MinElts; ++i) {
4934 DAG.getVectorIdxConstant(i,
DL));
4935 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
4939 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4941 return DAG.getBuildVector(WidenVT,
DL, Ops);
4944SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
4945 unsigned Opcode =
N->getOpcode();
4958 InOp = GetWidenedVector(InOp);
4965 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4972 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
4974 DAG.getVectorIdxConstant(i,
DL));
4991 while (Ops.
size() != WidenNumElts)
4994 return DAG.getBuildVector(WidenVT,
DL, Ops);
5000 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5001 return WidenVecRes_BinaryCanTrap(
N);
5011SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5012 SDValue FpValue =
N->getOperand(0);
5016 SDValue Arg = GetWidenedVector(FpValue);
5017 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5023 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5025 SDValue ExpOp =
RHS.getValueType().isVector() ? GetWidenedVector(RHS) :
RHS;
5027 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5033 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5034 if (
N->getNumOperands() == 1)
5035 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5037 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5038 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5042 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5043 {InOp,
Mask,
N->getOperand(2)});
5049 cast<VTSDNode>(
N->getOperand(1))->getVT()
5050 .getVectorElementType(),
5052 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5053 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5054 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5057SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5058 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5059 return GetWidenedVector(WidenVec);
5065 EVT VT =
N->getValueType(0);
5069 switch (getTypeAction(InVT)) {
5083 SDValue NInOp = GetPromotedInteger(InOp);
5085 if (WidenVT.
bitsEq(NInVT)) {
5088 if (DAG.getDataLayout().isBigEndian()) {
5093 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5112 InOp = GetWidenedVector(InOp);
5114 if (WidenVT.
bitsEq(InVT))
5124 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5129 unsigned NewNumParts = WidenSize / InSize;
5142 EVT OrigInVT =
N->getOperand(0).getValueType();
5155 if (WidenSize % InSize == 0) {
5162 DAG.ExtractVectorElements(InOp, Ops);
5163 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5175 return CreateStackStoreLoad(InOp, WidenVT);
5181 EVT VT =
N->getValueType(0);
5185 EVT EltVT =
N->getOperand(0).getValueType();
5192 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5193 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5195 return DAG.getBuildVector(WidenVT, dl, NewOps);
5199 EVT InVT =
N->getOperand(0).getValueType();
5202 unsigned NumOperands =
N->getNumOperands();
5204 bool InputWidened =
false;
5208 if (WidenNumElts % NumInElts == 0) {
5210 unsigned NumConcat = WidenNumElts / NumInElts;
5211 SDValue UndefVal = DAG.getUNDEF(InVT);
5213 for (
unsigned i=0; i < NumOperands; ++i)
5214 Ops[i] =
N->getOperand(i);
5215 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5220 InputWidened =
true;
5224 for (i=1; i < NumOperands; ++i)
5225 if (!
N->getOperand(i).isUndef())
5228 if (i == NumOperands)
5231 return GetWidenedVector(
N->getOperand(0));
5233 if (NumOperands == 2) {
5235 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5241 for (
unsigned i = 0; i < NumInElts; ++i) {
5243 MaskOps[i + NumInElts] = i + WidenNumElts;
5245 return DAG.getVectorShuffle(WidenVT, dl,
5246 GetWidenedVector(
N->getOperand(0)),
5247 GetWidenedVector(
N->getOperand(1)),
5254 "Cannot use build vectors to widen CONCAT_VECTOR result");
5262 for (
unsigned i=0; i < NumOperands; ++i) {
5265 InOp = GetWidenedVector(InOp);
5266 for (
unsigned j = 0;
j < NumInElts; ++
j)
5268 DAG.getVectorIdxConstant(j, dl));
5270 SDValue UndefVal = DAG.getUNDEF(EltVT);
5271 for (;
Idx < WidenNumElts; ++
Idx)
5272 Ops[
Idx] = UndefVal;
5273 return DAG.getBuildVector(WidenVT, dl, Ops);
5276SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5277 EVT VT =
N->getValueType(0);
5279 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5286SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5287 EVT VT =
N->getValueType(0);
5294 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5296 InOp = GetWidenedVector(InOp);
5302 if (IdxVal == 0 && InVT == WidenVT)
5309 assert(IdxVal % VTNumElts == 0 &&
5310 "Expected Idx to be a multiple of subvector minimum vector length");
5311 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5324 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5325 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5326 "down type's element count");
5333 for (;
I < VTNumElts / GCD; ++
I)
5336 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5337 for (;
I < WidenNumElts / GCD; ++
I)
5344 "EXTRACT_SUBVECTOR for scalable vectors");
5351 for (i = 0; i < VTNumElts; ++i)
5353 DAG.getVectorIdxConstant(IdxVal + i, dl));
5355 SDValue UndefVal = DAG.getUNDEF(EltVT);
5356 for (; i < WidenNumElts; ++i)
5358 return DAG.getBuildVector(WidenVT, dl, Ops);
5369SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5370 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5373 N->getOperand(1),
N->getOperand(2));
5386 if (!
LD->getMemoryVT().isByteSized()) {
5390 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5399 EVT LdVT =
LD->getMemoryVT();
5410 const auto *MMO =
LD->getMemOperand();
5412 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5426 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5428 Result = GenWidenVectorLoads(LdChain, LD);
5435 if (LdChain.
size() == 1)
5436 NewChain = LdChain[0];
5442 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5453 SDValue EVL =
N->getVectorLength();
5460 "Unable to widen binary VP op");
5461 Mask = GetWidenedVector(Mask);
5462 assert(
Mask.getValueType().getVectorElementCount() ==
5465 "Unable to widen vector load");
5468 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5469 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5470 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5484 "Unable to widen VP strided load");
5485 Mask = GetWidenedVector(Mask);
5488 assert(
Mask.getValueType().getVectorElementCount() ==
5490 "Data and mask vectors should have the same number of elements");
5492 SDValue Res = DAG.getStridedLoadVP(
5493 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5494 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5495 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5496 N->isExpandingLoad());
5508 EVT MaskVT =
Mask.getValueType();
5509 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5517 Mask = ModifyToType(Mask, WideMaskVT,
true);
5519 SDValue Res = DAG.getMaskedLoad(
5520 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5521 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5522 ExtType,
N->isExpandingLoad());
5533 EVT MaskVT =
Mask.getValueType();
5534 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5543 Mask = ModifyToType(Mask, WideMaskVT,
true);
5548 Index.getValueType().getScalarType(),
5556 N->getMemoryVT().getScalarType(), NumElts);
5557 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5558 WideMemVT, dl, Ops,
N->getMemOperand(),
5559 N->getIndexType(),
N->getExtensionType());
5576 N->getMemoryVT().getScalarType(), WideEC);
5577 Mask = GetWidenedMask(Mask, WideEC);
5580 Mask,
N->getVectorLength()};
5581 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5582 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5592 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5620 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5621 return N->getOperand(OpNo).getValueType();
5629 N =
N.getOperand(0);
5631 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5632 if (!
N->getOperand(i)->isUndef())
5634 N =
N.getOperand(0);
5638 N =
N.getOperand(0);
5640 N =
N.getOperand(0);
5667 { MaskVT, MVT::Other }, Ops);
5668 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5678 if (MaskScalarBits < ToMaskScalBits) {
5682 }
else if (MaskScalarBits > ToMaskScalBits) {
5688 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5690 "Mask should have the right element size by now.");
5693 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5695 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5700 EVT SubVT =
Mask->getValueType(0);
5706 assert((
Mask->getValueType(0) == ToMaskVT) &&
5707 "A mask of ToMaskVT should have been produced by now.");
5728 EVT CondVT =
Cond->getValueType(0);
5732 EVT VSelVT =
N->getValueType(0);
5744 EVT FinalVT = VSelVT;
5756 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
5774 EVT ToMaskVT = VSelVT;
5781 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5797 if (ScalarBits0 != ScalarBits1) {
5798 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
5799 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
5811 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
5812 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
5816 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5829 unsigned Opcode =
N->getOpcode();
5831 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
5832 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5833 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5835 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
5841 Cond1 = GetWidenedVector(Cond1);
5849 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
5850 SDValue Res = ModifyToType(SplitSelect, WidenVT);
5855 Cond1 = ModifyToType(Cond1, CondWidenVT);
5858 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5859 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5861 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
5862 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
5864 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
5868 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
5869 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
5872 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
5877 return DAG.getUNDEF(WidenVT);
5881 EVT VT =
N->getValueType(0);
5888 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5889 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
5893 for (
unsigned i = 0; i != NumElts; ++i) {
5894 int Idx =
N->getMaskElt(i);
5895 if (
Idx < (
int)NumElts)
5900 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
5902 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
5906 EVT VT =
N->getValueType(0);
5911 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
5917 unsigned IdxVal = WidenNumElts - VTNumElts;
5930 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5933 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5934 "down type's element count");
5937 for (; i < VTNumElts / GCD; ++i)
5940 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
5941 for (; i < WidenNumElts / GCD; ++i)
5950 for (
unsigned i = 0; i != VTNumElts; ++i) {
5951 Mask.push_back(IdxVal + i);
5953 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
5956 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
5961 assert(
N->getValueType(0).isVector() &&
5962 N->getOperand(0).getValueType().isVector() &&
5963 "Operands must be vectors");
5977 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
5978 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
5985 InOp1 = GetWidenedVector(InOp1);
5986 InOp2 = GetWidenedVector(InOp2);
5988 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
5989 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
5996 "Input not widened to expected type!");
5998 if (
N->getOpcode() == ISD::VP_SETCC) {
6001 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6002 N->getOperand(2), Mask,
N->getOperand(4));
6009 assert(
N->getValueType(0).isVector() &&
6010 N->getOperand(1).getValueType().isVector() &&
6011 "Operands must be vectors");
6012 EVT VT =
N->getValueType(0);
6023 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6028 for (
unsigned i = 0; i != NumElts; ++i) {
6030 DAG.getVectorIdxConstant(i, dl));
6032 DAG.getVectorIdxConstant(i, dl));
6034 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6035 {Chain, LHSElem, RHSElem, CC});
6036 Chains[i] = Scalars[i].getValue(1);
6037 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6038 DAG.getBoolConstant(
true, dl, EltVT, VT),
6039 DAG.getBoolConstant(
false, dl, EltVT, VT));
6043 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6045 return DAG.getBuildVector(WidenVT, dl, Scalars);
6051bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6052 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6056 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6059 switch (
N->getOpcode()) {
6062 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6073 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6074 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6075 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6076 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6081 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6083 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6084 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6086 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6087 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6095 Res = WidenVecOp_UnrollVectorOp(
N);
6102 Res = WidenVecOp_EXTEND(
N);
6118 Res = WidenVecOp_Convert(
N);
6123 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6141 Res = WidenVecOp_VECREDUCE(
N);
6145 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6147 case ISD::VP_REDUCE_FADD:
6148 case ISD::VP_REDUCE_SEQ_FADD:
6149 case ISD::VP_REDUCE_FMUL:
6150 case ISD::VP_REDUCE_SEQ_FMUL:
6151 case ISD::VP_REDUCE_ADD:
6152 case ISD::VP_REDUCE_MUL:
6153 case ISD::VP_REDUCE_AND:
6154 case ISD::VP_REDUCE_OR:
6155 case ISD::VP_REDUCE_XOR:
6156 case ISD::VP_REDUCE_SMAX:
6157 case ISD::VP_REDUCE_SMIN:
6158 case ISD::VP_REDUCE_UMAX:
6159 case ISD::VP_REDUCE_UMIN:
6160 case ISD::VP_REDUCE_FMAX:
6161 case ISD::VP_REDUCE_FMIN:
6162 Res = WidenVecOp_VP_REDUCE(
N);
6167 if (!Res.
getNode())
return false;
6175 if (
N->isStrictFPOpcode())
6177 "Invalid operand expansion");
6180 "Invalid operand expansion");
6182 ReplaceValueWith(
SDValue(
N, 0), Res);
6188 EVT VT =
N->getValueType(0);
6193 "Unexpected type action");
6194 InOp = GetWidenedVector(InOp);
6197 "Input wasn't widened!");
6208 FixedEltVT == InEltVT) {
6210 "Not enough elements in the fixed type for the operand!");
6212 "We can't have the same type as we started with!");
6215 DAG.getUNDEF(FixedVT), InOp,
6216 DAG.getVectorIdxConstant(0,
DL));
6219 DAG.getVectorIdxConstant(0,
DL));
6228 return WidenVecOp_Convert(
N);
6233 switch (
N->getOpcode()) {
6249 return DAG.UnrollVectorOp(
N);
6254 EVT ResultVT =
N->getValueType(0);
6256 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6265 {WideArg,
Test},
N->getFlags());
6272 DAG.getVectorIdxConstant(0,
DL));
6274 EVT OpVT =
N->getOperand(0).getValueType();
6277 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6282 EVT VT =
N->getValueType(0);
6285 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6288 "Unexpected type action");
6289 InOp = GetWidenedVector(InOp);
6291 unsigned Opcode =
N->getOpcode();
6297 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6299 if (
N->isStrictFPOpcode()) {
6301 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6302 {
N->getOperand(0), InOp,
N->getOperand(2) });
6304 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6305 {
N->getOperand(0), InOp });
6311 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6313 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6316 DAG.getVectorIdxConstant(0, dl));
6324 if (
N->isStrictFPOpcode()) {
6327 for (
unsigned i=0; i < NumElts; ++i) {
6329 DAG.getVectorIdxConstant(i, dl));
6330 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6334 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6336 for (
unsigned i = 0; i < NumElts; ++i)
6337 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6339 InOp, DAG.getVectorIdxConstant(i, dl)));
6342 return DAG.getBuildVector(VT, dl, Ops);
6346 EVT DstVT =
N->getValueType(0);
6347 SDValue Src = GetWidenedVector(
N->getOperand(0));
6348 EVT SrcVT = Src.getValueType();
6357 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6360 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6364 return DAG.UnrollVectorOp(
N);
6368 EVT VT =
N->getValueType(0);
6369 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6377 if (!VT.
isVector() && VT != MVT::x86mmx &&
6384 DAG.getVectorIdxConstant(0, dl));
6398 .divideCoefficientBy(EltSize);
6403 DAG.getVectorIdxConstant(0, dl));
6408 return CreateStackStoreLoad(InOp, VT);
6412 EVT VT =
N->getValueType(0);
6414 EVT InVT =
N->getOperand(0).getValueType();
6419 unsigned NumOperands =
N->getNumOperands();
6422 for (i = 1; i < NumOperands; ++i)
6423 if (!
N->getOperand(i).isUndef())
6426 if (i == NumOperands)
6427 return GetWidenedVector(
N->getOperand(0));
6437 for (
unsigned i=0; i < NumOperands; ++i) {
6441 "Unexpected type action");
6442 InOp = GetWidenedVector(InOp);
6443 for (
unsigned j = 0;
j < NumInElts; ++
j)
6445 DAG.getVectorIdxConstant(j, dl));
6447 return DAG.getBuildVector(VT, dl, Ops);
6450SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6451 EVT VT =
N->getValueType(0);
6456 SubVec = GetWidenedVector(SubVec);
6462 bool IndicesValid =
false;
6465 IndicesValid =
true;
6469 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6470 Attribute::VScaleRange);
6475 IndicesValid =
true;
6481 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6486 "INSERT_SUBVECTOR");
6489SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6490 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6492 N->getValueType(0), InOp,
N->getOperand(1));
6495SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6496 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6498 N->getValueType(0), InOp,
N->getOperand(1));
6501SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6502 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6503 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6511 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6514 if (
ST->isTruncatingStore())
6533 StVal = GetWidenedVector(StVal);
6537 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6538 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6539 EVL, StVT,
ST->getMemOperand(),
6540 ST->getAddressingMode());
6544 if (GenWidenVectorStores(StChain, ST)) {
6545 if (StChain.
size() == 1)
6554SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6555 assert((OpNo == 1 || OpNo == 3) &&
6556 "Can widen only data or mask operand of vp_store");
6564 StVal = GetWidenedVector(StVal);
6570 "Unable to widen VP store");
6571 Mask = GetWidenedVector(Mask);
6573 Mask = GetWidenedVector(Mask);
6579 "Unable to widen VP store");
6580 StVal = GetWidenedVector(StVal);
6583 assert(
Mask.getValueType().getVectorElementCount() ==
6585 "Mask and data vectors should have the same number of elements");
6586 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6587 ST->getOffset(), Mask,
ST->getVectorLength(),
6588 ST->getMemoryVT(),
ST->getMemOperand(),
6589 ST->getAddressingMode(),
ST->isTruncatingStore(),
6590 ST->isCompressingStore());
6595 assert((OpNo == 1 || OpNo == 4) &&
6596 "Can widen only data or mask operand of vp_strided_store");
6605 "Unable to widen VP strided store");
6609 "Unable to widen VP strided store");
6611 StVal = GetWidenedVector(StVal);
6612 Mask = GetWidenedVector(Mask);
6615 Mask.getValueType().getVectorElementCount() &&
6616 "Data and mask vectors should have the same number of elements");
6618 return DAG.getStridedStoreVP(
6625SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6626 assert((OpNo == 1 || OpNo == 4) &&
6627 "Can widen only data or mask operand of mstore");
6630 EVT MaskVT =
Mask.getValueType();
6636 StVal = GetWidenedVector(StVal);
6643 Mask = ModifyToType(Mask, WideMaskVT,
true);
6647 Mask = ModifyToType(Mask, WideMaskVT,
true);
6653 StVal = ModifyToType(StVal, WideVT);
6656 assert(
Mask.getValueType().getVectorNumElements() ==
6658 "Mask and data vectors should have the same number of elements");
6665SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6666 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6667 auto *MG = cast<MaskedGatherSDNode>(
N);
6668 SDValue DataOp = MG->getPassThru();
6670 SDValue Scale = MG->getScale();
6678 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6679 MG->getMemOperand(), MG->getIndexType(),
6680 MG->getExtensionType());
6686SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6695 DataOp = GetWidenedVector(DataOp);
6699 EVT IndexVT =
Index.getValueType();
6705 EVT MaskVT =
Mask.getValueType();
6708 Mask = ModifyToType(Mask, WideMaskVT,
true);
6713 }
else if (OpNo == 4) {
6721 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
6726SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
6735 DataOp = GetWidenedVector(DataOp);
6738 Mask = GetWidenedMask(Mask, WideEC);
6741 }
else if (OpNo == 3) {
6750 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
6755 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
6756 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6758 EVT VT =
N->getValueType(0);
6773 SVT, InOp0, InOp1,
N->getOperand(2));
6780 DAG.getVectorIdxConstant(0, dl));
6782 EVT OpVT =
N->getOperand(0).getValueType();
6785 return DAG.getNode(ExtendCode, dl, VT,
CC);
6795 EVT VT =
N->getValueType(0);
6797 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6804 for (
unsigned i = 0; i != NumElts; ++i) {
6806 DAG.getVectorIdxConstant(i, dl));
6808 DAG.getVectorIdxConstant(i, dl));
6810 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6811 {Chain, LHSElem, RHSElem, CC});
6812 Chains[i] = Scalars[i].getValue(1);
6813 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6814 DAG.getBoolConstant(
true, dl, EltVT, VT),
6815 DAG.getBoolConstant(
false, dl, EltVT, VT));
6819 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6821 return DAG.getBuildVector(VT, dl, Scalars);
6826 SDValue Op = GetWidenedVector(
N->getOperand(0));
6827 EVT OrigVT =
N->getOperand(0).getValueType();
6828 EVT WideVT =
Op.getValueType();
6832 unsigned Opc =
N->getOpcode();
6834 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6835 assert(NeutralElem &&
"Neutral element must exist");
6842 unsigned GCD = std::gcd(OrigElts, WideElts);
6845 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6846 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6848 DAG.getVectorIdxConstant(
Idx, dl));
6849 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6852 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6854 DAG.getVectorIdxConstant(
Idx, dl));
6856 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6866 EVT WideVT =
Op.getValueType();
6870 unsigned Opc =
N->getOpcode();
6872 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6879 unsigned GCD = std::gcd(OrigElts, WideElts);
6882 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6883 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6885 DAG.getVectorIdxConstant(
Idx, dl));
6886 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6889 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6891 DAG.getVectorIdxConstant(
Idx, dl));
6893 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6897 assert(
N->isVPOpcode() &&
"Expected VP opcode");
6900 SDValue Op = GetWidenedVector(
N->getOperand(1));
6902 Op.getValueType().getVectorElementCount());
6904 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
6905 {N->getOperand(0), Op, Mask, N->getOperand(3)},
6913 EVT VT =
N->getValueType(0);
6924 DAG.getVectorIdxConstant(0,
DL));
6941 unsigned WidenEx = 0) {
6946 unsigned AlignInBits =
Align*8;
6949 EVT RetVT = WidenEltVT;
6950 if (!Scalable && Width == WidenEltWidth)
6964 (WidenWidth % MemVTWidth) == 0 &&
6966 (MemVTWidth <= Width ||
6967 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
6968 if (MemVTWidth == WidenWidth)
6987 (WidenWidth % MemVTWidth) == 0 &&
6989 (MemVTWidth <= Width ||
6990 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
6999 return std::nullopt;
7010 unsigned Start,
unsigned End) {
7011 SDLoc dl(LdOps[Start]);
7012 EVT LdTy = LdOps[Start].getValueType();
7020 for (
unsigned i = Start + 1; i !=
End; ++i) {
7021 EVT NewLdTy = LdOps[i].getValueType();
7022 if (NewLdTy != LdTy) {
7043 EVT LdVT =
LD->getMemoryVT();
7057 TypeSize WidthDiff = WidenWidth - LdWidth;
7064 std::optional<EVT> FirstVT =
7065 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7072 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7077 std::optional<EVT> NewVT = FirstVT;
7079 TypeSize NewVTWidth = FirstVTWidth;
7081 RemainingWidth -= NewVTWidth;
7088 NewVTWidth = NewVT->getSizeInBits();
7094 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7095 LD->getOriginalAlign(), MMOFlags, AAInfo);
7099 if (MemVTs.
empty()) {
7101 if (!FirstVT->isVector()) {
7108 if (FirstVT == WidenVT)
7113 unsigned NumConcat =
7116 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7117 ConcatOps[0] = LdOp;
7118 for (
unsigned i = 1; i != NumConcat; ++i)
7119 ConcatOps[i] = UndefVal;
7131 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7134 for (
EVT MemVT : MemVTs) {
7135 Align NewAlign = ScaledOffset == 0
7136 ?
LD->getOriginalAlign()
7139 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7143 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7148 if (!LdOps[0].getValueType().
isVector())
7158 EVT LdTy = LdOps[i].getValueType();
7161 for (--i; i >= 0; --i) {
7162 LdTy = LdOps[i].getValueType();
7169 ConcatOps[--
Idx] = LdOps[i];
7170 for (--i; i >= 0; --i) {
7171 EVT NewLdTy = LdOps[i].getValueType();
7172 if (NewLdTy != LdTy) {
7183 WidenOps[j] = ConcatOps[
Idx+j];
7184 for (;
j != NumOps; ++
j)
7185 WidenOps[j] = DAG.getUNDEF(LdTy);
7192 ConcatOps[--
Idx] = LdOps[i];
7203 SDValue UndefVal = DAG.getUNDEF(LdTy);
7206 for (; i !=
End-
Idx; ++i)
7207 WidenOps[i] = ConcatOps[
Idx+i];
7208 for (; i != NumOps; ++i)
7209 WidenOps[i] = UndefVal;
7221 EVT LdVT =
LD->getMemoryVT();
7234 "not yet supported");
7245 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7246 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7252 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7253 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7254 LD->getOriginalAlign(), MMOFlags, AAInfo);
7259 SDValue UndefVal = DAG.getUNDEF(EltVT);
7260 for (; i != WidenNumElts; ++i)
7263 return DAG.getBuildVector(WidenVT, dl, Ops);
7275 SDValue ValOp = GetWidenedVector(
ST->getValue());
7278 EVT StVT =
ST->getMemoryVT();
7286 "Mismatch between store and value types");
7300 std::optional<EVT> NewVT =
7305 TypeSize NewVTWidth = NewVT->getSizeInBits();
7308 StWidth -= NewVTWidth;
7309 MemVTs.
back().second++;
7313 for (
const auto &Pair : MemVTs) {
7314 EVT NewVT = Pair.first;
7315 unsigned Count = Pair.second;
7321 Align NewAlign = ScaledOffset == 0
7322 ?
ST->getOriginalAlign()
7325 DAG.getVectorIdxConstant(
Idx, dl));
7326 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7331 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7343 DAG.getVectorIdxConstant(
Idx++, dl));
7345 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7349 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7363 bool FillWithZeroes) {
7368 "input and widen element type must match");
7370 "cannot modify scalable vectors in this way");
7382 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7385 for (
unsigned i = 1; i != NumConcat; ++i)
7393 DAG.getVectorIdxConstant(0, dl));
7396 "Scalable vectors should have been handled already.");
7404 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7408 DAG.getVectorIdxConstant(
Idx, dl));
7410 SDValue UndefVal = DAG.getUNDEF(EltVT);
7411 for (;
Idx < WidenNumElts; ++
Idx)
7412 Ops[
Idx] = UndefVal;
7414 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7415 if (!FillWithZeroes)
7419 "We expect to never want to FillWithZeroes for non-integral types.");
7422 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7423 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7425 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7426 DAG.getBuildVector(NVT, dl,
MaskOps));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.