35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
118 R = ScalarizeVecRes_UnaryOp(
N);
121 R = ScalarizeVecRes_ADDRSPACECAST(
N);
124 R = ScalarizeVecRes_FFREXP(
N, ResNo);
169 R = ScalarizeVecRes_BinOp(
N);
174 R = ScalarizeVecRes_TernaryOp(
N);
177#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
178 case ISD::STRICT_##DAGN:
179#include "llvm/IR/ConstrainedOps.def"
180 R = ScalarizeVecRes_StrictFPOp(
N);
185 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
194 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
204 R = ScalarizeVecRes_FIX(
N);
210 SetScalarizedVector(
SDValue(
N, ResNo), R);
214 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
215 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
217 LHS.getValueType(), LHS, RHS,
N->getFlags());
221 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
222 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
223 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
229 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
230 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
236SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
237 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
238 "Unexpected vector type!");
239 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
241 EVT VT0 =
N->getValueType(0);
242 EVT VT1 =
N->getValueType(1);
247 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
251 unsigned OtherNo = 1 - ResNo;
252 EVT OtherVT =
N->getValueType(OtherNo);
254 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
258 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
261 return SDValue(ScalarNode, ResNo);
265 EVT VT =
N->getValueType(0).getVectorElementType();
266 unsigned NumOpers =
N->getNumOperands();
268 EVT ValueVTs[] = {VT, MVT::Other};
277 for (
unsigned i = 1; i < NumOpers; ++i) {
283 Oper = GetScalarizedVector(Oper);
294 Opers,
N->getFlags());
305 EVT ResVT =
N->getValueType(0);
306 EVT OvVT =
N->getValueType(1);
310 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
311 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
316 ScalarLHS = ElemsLHS[0];
317 ScalarRHS = ElemsRHS[0];
323 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
327 unsigned OtherNo = 1 - ResNo;
328 EVT OtherVT =
N->getValueType(OtherNo);
330 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
334 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
337 return SDValue(ScalarNode, ResNo);
342 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
343 return GetScalarizedVector(
Op);
348 if (
Op.getValueType().isVector()
349 &&
Op.getValueType().getVectorNumElements() == 1
350 && !isSimpleLegalType(
Op.getValueType()))
351 Op = GetScalarizedVector(
Op);
352 EVT NewVT =
N->getValueType(0).getVectorElementType();
357SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
358 EVT EltVT =
N->getValueType(0).getVectorElementType();
367SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
369 N->getValueType(0).getVectorElementType(),
370 N->getOperand(0),
N->getOperand(1));
376 EVT OpVT =
Op.getValueType();
380 Op = GetScalarizedVector(
Op);
387 N->getValueType(0).getVectorElementType(),
Op,
392 SDValue Op = GetScalarizedVector(
N->getOperand(0));
397SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
401 EVT EltVT =
N->getValueType(0).getVectorElementType();
402 if (
Op.getValueType() != EltVT)
409 assert(
N->isUnindexed() &&
"Indexed vector load?");
413 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
414 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
415 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
416 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
426 EVT DestVT =
N->getValueType(0).getVectorElementType();
428 EVT OpVT =
Op.getValueType();
438 Op = GetScalarizedVector(
Op);
448 EVT EltVT =
N->getValueType(0).getVectorElementType();
450 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
459 EVT OpVT =
Op.getValueType();
461 EVT EltVT =
N->getValueType(0).getVectorElementType();
464 Op = GetScalarizedVector(
Op);
470 switch (
N->getOpcode()) {
482SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
483 EVT DestVT =
N->getValueType(0).getVectorElementType();
485 EVT OpVT =
Op.getValueType();
495 Op = GetScalarizedVector(
Op);
501 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
502 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
503 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
507SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
510 EVT EltVT =
N->getValueType(0).getVectorElementType();
519 EVT OpVT =
Cond.getValueType();
532 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
546 EVT OpVT =
Cond->getOperand(0).getValueType();
553 EVT CondVT =
Cond.getValueType();
554 if (ScalarBool != VecBool) {
555 switch (ScalarBool) {
576 auto BoolVT = getSetCCResultType(CondVT);
577 if (BoolVT.bitsLT(CondVT))
582 GetScalarizedVector(
N->getOperand(2)));
586 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
588 LHS.getValueType(),
N->getOperand(0), LHS,
589 GetScalarizedVector(
N->getOperand(2)));
593 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
595 N->getOperand(0),
N->getOperand(1),
596 LHS, GetScalarizedVector(
N->getOperand(3)),
601 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
604SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
606 SDValue Arg =
N->getOperand(2).getOperand(0);
608 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
609 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
610 return GetScalarizedVector(
N->getOperand(
Op));
613SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
615 EVT SrcVT = Src.getValueType();
620 Src = GetScalarizedVector(Src);
626 EVT DstVT =
N->getValueType(0).getVectorElementType();
627 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
631 assert(
N->getValueType(0).isVector() &&
632 N->getOperand(0).getValueType().isVector() &&
633 "Operand types must be vectors");
636 EVT OpVT =
LHS.getValueType();
637 EVT NVT =
N->getValueType(0).getVectorElementType();
642 LHS = GetScalarizedVector(LHS);
643 RHS = GetScalarizedVector(RHS);
659 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
667 EVT ResultVT =
N->getValueType(0).getVectorElementType();
670 Arg = GetScalarizedVector(Arg);
683 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
690bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
695 switch (
N->getOpcode()) {
698 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
705 Res = ScalarizeVecOp_BITCAST(
N);
717 Res = ScalarizeVecOp_UnaryOp(
N);
723 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
726 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
729 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
732 Res = ScalarizeVecOp_VSELECT(
N);
735 Res = ScalarizeVecOp_VSETCC(
N);
738 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
741 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
744 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
747 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
750 Res = ScalarizeVecOp_FP_EXTEND(
N);
767 Res = ScalarizeVecOp_VECREDUCE(
N);
771 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
776 if (!Res.
getNode())
return false;
784 "Invalid operand expansion");
786 ReplaceValueWith(
SDValue(
N, 0), Res);
793 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
795 N->getValueType(0), Elt);
801 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
802 "Unexpected vector type!");
803 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
805 N->getValueType(0).getScalarType(), Elt);
813SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
814 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
815 "Unexpected vector type!");
816 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
818 {
N->getValueType(0).getScalarType(), MVT::Other },
819 {
N->getOperand(0), Elt });
829 ReplaceValueWith(
SDValue(
N, 0), Res);
834SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
836 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
837 Ops[i] = GetScalarizedVector(
N->getOperand(i));
843SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
844 EVT VT =
N->getValueType(0);
845 SDValue Res = GetScalarizedVector(
N->getOperand(0));
857 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
858 EVT VT =
N->getValueType(0);
868 assert(
N->getValueType(0).isVector() &&
869 N->getOperand(0).getValueType().isVector() &&
870 "Operand types must be vectors");
871 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
873 EVT VT =
N->getValueType(0);
874 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
875 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
877 EVT OpVT =
N->getOperand(0).getValueType();
889 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
897 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
898 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
901 if (
N->isTruncatingStore())
903 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
904 N->getBasePtr(),
N->getPointerInfo(),
905 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
906 N->getMemOperand()->getFlags(),
N->getAAInfo());
908 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
909 N->getBasePtr(),
N->getPointerInfo(),
910 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
916SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
917 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
918 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
920 N->getValueType(0).getVectorElementType(), Elt,
925SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
927 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
928 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
932 {
N->getOperand(0), Elt,
N->getOperand(2) });
941 ReplaceValueWith(
SDValue(
N, 0), Res);
948 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
950 N->getValueType(0).getVectorElementType(), Elt);
956SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
957 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
961 {
N->getOperand(0), Elt});
970 ReplaceValueWith(
SDValue(
N, 0), Res);
975 SDValue Res = GetScalarizedVector(
N->getOperand(0));
982SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
990 AccOp,
Op,
N->getFlags());
1001void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1006 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1009 switch (
N->getOpcode()) {
1012 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1024 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1039 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1042 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1046 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1049 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1051 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1052 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1055 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1058 case ISD::VP_GATHER:
1059 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1063 SplitVecRes_SETCC(
N,
Lo,
Hi);
1066 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1069 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1072 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1075 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1078 SplitVecRes_VECTOR_INTERLEAVE(
N);
1081 SplitVecRes_VAARG(
N,
Lo,
Hi);
1087 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1093 case ISD::VP_BITREVERSE:
1101 case ISD::VP_CTLZ_ZERO_UNDEF:
1103 case ISD::VP_CTTZ_ZERO_UNDEF:
1114 case ISD::VP_FFLOOR:
1119 case ISD::VP_FNEARBYINT:
1124 case ISD::VP_FP_EXTEND:
1126 case ISD::VP_FP_ROUND:
1128 case ISD::VP_FP_TO_SINT:
1130 case ISD::VP_FP_TO_UINT:
1136 case ISD::VP_LLRINT:
1138 case ISD::VP_FROUND:
1140 case ISD::VP_FROUNDEVEN:
1144 case ISD::VP_FROUNDTOZERO:
1146 case ISD::VP_SINT_TO_FP:
1148 case ISD::VP_TRUNCATE:
1150 case ISD::VP_UINT_TO_FP:
1152 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1155 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1158 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1164 case ISD::VP_SIGN_EXTEND:
1165 case ISD::VP_ZERO_EXTEND:
1166 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1180 case ISD::VP_FMINIMUM:
1182 case ISD::VP_FMAXIMUM:
1188 case ISD::OR:
case ISD::VP_OR:
1208 case ISD::VP_FCOPYSIGN:
1209 SplitVecRes_BinOp(
N,
Lo,
Hi);
1216 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1219#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1220 case ISD::STRICT_##DAGN:
1221#include "llvm/IR/ConstrainedOps.def"
1222 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1227 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1236 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1246 SplitVecRes_FIX(
N,
Lo,
Hi);
1248 case ISD::EXPERIMENTAL_VP_REVERSE:
1249 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1258void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1267 DL,
Ptr.getValueType(),
1268 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1270 Flags.setNoUnsignedWrap(
true);
1272 *ScaledOffset += IncrementSize;
1276 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1282std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1283 return SplitMask(Mask,
SDLoc(Mask));
1286std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1289 EVT MaskVT =
Mask.getValueType();
1291 GetSplitVector(Mask, MaskLo, MaskHi);
1294 return std::make_pair(MaskLo, MaskHi);
1299 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1301 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1305 unsigned Opcode =
N->getOpcode();
1306 if (
N->getNumOperands() == 2) {
1312 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1313 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1316 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1319 std::tie(EVLLo, EVLHi) =
1320 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1323 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1325 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1331 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1333 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1335 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1339 unsigned Opcode =
N->getOpcode();
1340 if (
N->getNumOperands() == 3) {
1346 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1347 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1350 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1353 std::tie(EVLLo, EVLHi) =
1354 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1357 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1359 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1364 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1366 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1370 unsigned Opcode =
N->getOpcode();
1389 switch (getTypeAction(InVT)) {
1404 GetExpandedOp(InOp,
Lo,
Hi);
1415 GetSplitVector(InOp,
Lo,
Hi);
1436 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1459 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1461 unsigned NumSubvectors =
N->getNumOperands() / 2;
1462 if (NumSubvectors == 1) {
1463 Lo =
N->getOperand(0);
1464 Hi =
N->getOperand(1);
1478void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1500 GetSplitVector(Vec,
Lo,
Hi);
1503 EVT LoVT =
Lo.getValueType();
1512 unsigned IdxVal =
Idx->getAsZExtVal();
1513 if (IdxVal + SubElems <= LoElems) {
1521 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1547 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1551 auto *
Load = cast<LoadSDNode>(
Lo);
1553 IncrementPointer(Load, LoVT, MPI, StackPtr);
1556 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1565 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1570 EVT RHSVT =
RHS.getValueType();
1573 GetSplitVector(RHS, RHSLo, RHSHi);
1590 SDValue FpValue =
N->getOperand(0);
1592 GetSplitVector(FpValue, ArgLo, ArgHi);
1605 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1609 std::tie(LoVT, HiVT) =
1620 unsigned Opcode =
N->getOpcode();
1627 GetSplitVector(N0, InLo, InHi);
1634 EVT OutLoVT, OutHiVT;
1637 assert((2 * OutNumElements) <= InNumElements &&
1638 "Illegal extend vector in reg split");
1648 for (
unsigned i = 0; i != OutNumElements; ++i)
1649 SplitHi[i] = i + OutNumElements;
1652 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1653 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1658 unsigned NumOps =
N->getNumOperands();
1672 for (
unsigned i = 1; i < NumOps; ++i) {
1677 EVT InVT =
Op.getValueType();
1682 GetSplitVector(
Op, OpLo, OpHi);
1691 EVT LoValueVTs[] = {LoVT, MVT::Other};
1692 EVT HiValueVTs[] = {HiVT, MVT::Other};
1701 Lo.getValue(1),
Hi.getValue(1));
1705 ReplaceValueWith(
SDValue(
N, 1), Chain);
1708SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1710 EVT VT =
N->getValueType(0);
1721 else if (NE > ResNE)
1725 EVT ChainVTs[] = {EltVT, MVT::Other};
1729 for (i = 0; i !=
NE; ++i) {
1731 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1732 SDValue Operand =
N->getOperand(j);
1743 Scalar.getNode()->setFlags(
N->getFlags());
1751 for (; i < ResNE; ++i)
1756 ReplaceValueWith(
SDValue(
N, 1), Chain);
1763void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1766 EVT ResVT =
N->getValueType(0);
1767 EVT OvVT =
N->getValueType(1);
1768 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1772 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1774 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1775 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1781 unsigned Opcode =
N->getOpcode();
1793 unsigned OtherNo = 1 - ResNo;
1794 EVT OtherVT =
N->getValueType(OtherNo);
1796 SetSplitVector(
SDValue(
N, OtherNo),
1802 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1806void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1812 GetSplitVector(Vec,
Lo,
Hi);
1815 unsigned IdxVal = CIdx->getZExtValue();
1816 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1817 if (IdxVal < LoNumElts) {
1819 Lo.getValueType(),
Lo, Elt,
Idx);
1829 if (CustomLowerNode(
N,
N->getValueType(0),
true))
1870 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1873 auto Load = cast<LoadSDNode>(
Lo);
1875 IncrementPointer(Load, LoVT, MPI, StackPtr);
1877 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1881 if (LoVT !=
Lo.getValueType())
1883 if (HiVT !=
Hi.getValueType())
1891 assert(
N->getValueType(0).isScalableVector() &&
1892 "Only scalable vectors are supported for STEP_VECTOR");
1915 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
1935 EVT MemoryVT =
LD->getMemoryVT();
1939 EVT LoMemVT, HiMemVT;
1946 ReplaceValueWith(
SDValue(LD, 1), NewChain);
1951 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
1955 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
1958 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
1967 ReplaceValueWith(
SDValue(LD, 1), Ch);
1972 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
1981 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
1982 Align Alignment =
LD->getOriginalAlign();
1985 EVT MemoryVT =
LD->getMemoryVT();
1987 EVT LoMemVT, HiMemVT;
1988 bool HiIsEmpty =
false;
1989 std::tie(LoMemVT, HiMemVT) =
1995 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
1998 GetSplitVector(Mask, MaskLo, MaskHi);
2000 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2005 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2014 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2023 LD->isExpandingLoad());
2029 MPI =
LD->getPointerInfo().getWithOffset(
2034 Alignment,
LD->getAAInfo(),
LD->getRanges());
2037 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2038 LD->isExpandingLoad());
2048 ReplaceValueWith(
SDValue(LD, 1), Ch);
2054 "Indexed VP strided load during type legalization!");
2056 "Unexpected indexed variable-length load offset");
2063 EVT LoMemVT, HiMemVT;
2064 bool HiIsEmpty =
false;
2065 std::tie(LoMemVT, HiMemVT) =
2071 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2074 GetSplitVector(Mask, LoMask, HiMask);
2080 std::tie(LoEVL, HiEVL) =
2118 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2129 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2142 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2151 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2154 GetSplitVector(Mask, MaskLo, MaskHi);
2156 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2160 EVT LoMemVT, HiMemVT;
2161 bool HiIsEmpty =
false;
2162 std::tie(LoMemVT, HiMemVT) =
2165 SDValue PassThruLo, PassThruHi;
2167 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2169 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2212 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2229 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2230 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2232 auto *VPSC = cast<VPGatherSDNode>(
N);
2233 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2236 EVT MemoryVT =
N->getMemoryVT();
2237 Align Alignment =
N->getOriginalAlign();
2241 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2242 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2244 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2247 EVT LoMemVT, HiMemVT;
2252 if (getTypeAction(Ops.Index.getValueType()) ==
2254 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2256 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2263 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2264 SDValue PassThru = MGT->getPassThru();
2265 SDValue PassThruLo, PassThruHi;
2268 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2270 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2275 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2277 OpsLo, MMO, IndexTy, ExtType);
2279 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2281 OpsHi, MMO, IndexTy, ExtType);
2283 auto *VPGT = cast<VPGatherSDNode>(
N);
2285 std::tie(EVLLo, EVLHi) =
2286 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2288 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2290 MMO, VPGT->getIndexType());
2292 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2294 MMO, VPGT->getIndexType());
2304 ReplaceValueWith(
SDValue(
N, 1), Ch);
2308 assert(
N->getValueType(0).isVector() &&
2309 N->getOperand(0).getValueType().isVector() &&
2310 "Operand types must be vectors");
2318 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2320 GetSplitVector(
N->getOperand(0), LL, LH);
2324 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2326 GetSplitVector(
N->getOperand(1), RL, RH);
2331 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2332 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2334 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2335 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2336 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2337 std::tie(EVLLo, EVLHi) =
2338 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2339 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2341 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2355 EVT InVT =
N->getOperand(0).getValueType();
2357 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2362 unsigned Opcode =
N->getOpcode();
2363 if (
N->getNumOperands() <= 2) {
2365 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2366 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2374 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2375 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2378 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2381 std::tie(EVLLo, EVLHi) =
2382 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2395 EVT InVT =
N->getOperand(0).getValueType();
2397 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2401 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2402 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2403 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2408void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2416 EVT InVT =
N->getOperand(0).getValueType();
2418 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2422 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2423 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2424 Lo->setFlags(
N->getFlags());
2425 Hi->setFlags(
N->getFlags());
2431 unsigned OtherNo = 1 - ResNo;
2432 EVT OtherVT =
N->getValueType(OtherNo);
2440 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2447 EVT SrcVT =
N->getOperand(0).getValueType();
2448 EVT DestVT =
N->getValueType(0);
2471 EVT SplitLoVT, SplitHiVT;
2475 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2476 N->dump(&DAG);
dbgs() <<
"\n");
2477 if (!
N->isVPOpcode()) {
2480 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2491 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2492 N->getOperand(1),
N->getOperand(2));
2497 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2500 std::tie(EVLLo, EVLHi) =
2501 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2503 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2504 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2509 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2517 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2518 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2524 return N.getResNo() == 0 &&
2528 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2533 "Expected build vector node.");
2536 for (
unsigned I = 0;
I < NewElts; ++
I) {
2541 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2543 Ops[
I] = Input1.getOperand(
Idx);
2545 if (Ops[
I].getValueType().bitsGT(EltVT))
2548 return DAG.getBuildVector(NewVT,
DL, Ops);
2556 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2560 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2562 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2571 for (
auto &
P : ShufflesIdxs) {
2572 if (
P.second.size() < 2)
2576 for (
int &
Idx : Mask) {
2579 unsigned SrcRegIdx =
Idx / NewElts;
2580 if (Inputs[SrcRegIdx].
isUndef()) {
2585 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
2588 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2593 Idx = MaskElt % NewElts +
2594 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2600 Inputs[
P.second[0]] =
P.first.first;
2601 Inputs[
P.second[1]] =
P.first.second;
2604 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2608 for (
int &
Idx : Mask) {
2611 unsigned SrcRegIdx =
Idx / NewElts;
2612 if (Inputs[SrcRegIdx].
isUndef()) {
2617 getTypeAction(Inputs[SrcRegIdx].getValueType());
2619 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2620 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2623 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2625 if (UsedSubVector.count() > 1) {
2627 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2628 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2630 if (Pairs.
empty() || Pairs.
back().size() == 2)
2632 if (UsedSubVector.test(2 *
I)) {
2633 Pairs.
back().emplace_back(
I, 0);
2635 assert(UsedSubVector.test(2 *
I + 1) &&
2636 "Expected to be used one of the subvectors.");
2637 Pairs.
back().emplace_back(
I, 1);
2640 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2642 for (
int &
Idx : Mask) {
2645 unsigned SrcRegIdx =
Idx / NewElts;
2647 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2648 return Idxs.front().first == SrcRegIdx ||
2649 Idxs.back().first == SrcRegIdx;
2651 if (It == Pairs.
end())
2653 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2654 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2657 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2658 Inputs[Idxs.front().first] = DAG.
getNode(
2660 Inputs[Idxs.front().first].getValueType(),
2661 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2662 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2671 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2672 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].getNode());
2675 if (Shuffle->getOperand(0).getValueType() != NewVT)
2678 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2679 !Shuffle->isSplat()) {
2681 }
else if (!Inputs[
I].hasOneUse() &&
2682 !Shuffle->getOperand(1).isUndef()) {
2684 for (
int &
Idx : Mask) {
2687 unsigned SrcRegIdx =
Idx / NewElts;
2690 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2695 int OpIdx = MaskElt / NewElts;
2708 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2709 if (Shuffle->getOperand(OpIdx).isUndef())
2711 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2712 if (It == std::end(Inputs))
2714 int FoundOp = std::distance(std::begin(Inputs), It);
2717 for (
int &
Idx : Mask) {
2720 unsigned SrcRegIdx =
Idx / NewElts;
2723 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2728 int MaskIdx = MaskElt / NewElts;
2729 if (OpIdx == MaskIdx)
2730 Idx = MaskElt % NewElts + FoundOp * NewElts;
2733 Op = (OpIdx + 1) % 2;
2741 for (
int &
Idx : Mask) {
2744 unsigned SrcRegIdx =
Idx / NewElts;
2747 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2748 int OpIdx = MaskElt / NewElts;
2751 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2757 TryPeekThroughShufflesInputs(OrigMask);
2759 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2763 for (
const auto &
I : Inputs) {
2765 UniqueConstantInputs.
insert(
I);
2766 else if (!
I.isUndef())
2771 if (UniqueInputs.
size() != std::size(Inputs)) {
2772 auto &&UniqueVec = UniqueInputs.
takeVector();
2773 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2774 unsigned ConstNum = UniqueConstantVec.size();
2775 for (
int &
Idx : Mask) {
2778 unsigned SrcRegIdx =
Idx / NewElts;
2779 if (Inputs[SrcRegIdx].
isUndef()) {
2783 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2784 if (It != UniqueConstantVec.end()) {
2786 NewElts * std::distance(UniqueConstantVec.begin(), It);
2787 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2790 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2791 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2793 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2794 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2796 copy(UniqueConstantVec, std::begin(Inputs));
2797 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2800 MakeUniqueInputs(OrigMask);
2802 copy(Inputs, std::begin(OrigInputs));
2808 unsigned FirstMaskIdx =
High * NewElts;
2811 assert(!Output &&
"Expected default initialized initial value.");
2812 TryPeekThroughShufflesInputs(Mask);
2813 MakeUniqueInputs(Mask);
2815 copy(Inputs, std::begin(TmpInputs));
2818 bool SecondIteration =
false;
2819 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2824 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2825 SecondIteration =
true;
2826 return SecondIteration;
2829 Mask, std::size(Inputs), std::size(Inputs),
2831 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2832 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2835 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2837 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2838 DAG.getUNDEF(NewVT), Mask);
2839 Inputs[
Idx] = Output;
2841 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2844 if (AccumulateResults(Idx1)) {
2847 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2849 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2850 Inputs[Idx2], Mask);
2854 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2856 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2857 TmpInputs[Idx2], Mask);
2859 Inputs[Idx1] = Output;
2861 copy(OrigInputs, std::begin(Inputs));
2866 EVT OVT =
N->getValueType(0);
2873 const Align Alignment =
2874 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2876 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2877 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2878 Chain =
Hi.getValue(1);
2882 ReplaceValueWith(
SDValue(
N, 1), Chain);
2887 EVT DstVTLo, DstVTHi;
2888 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2892 EVT SrcVT =
N->getOperand(0).getValueType();
2894 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2896 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2898 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2899 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2905 GetSplitVector(
N->getOperand(0), InLo, InHi);
2914 EVT VT =
N->getValueType(0);
2918 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
2922 DAG.getVectorIdxConstant(0,
DL));
2930 EVT VT =
N->getValueType(0);
2937 Align Alignment = DAG.getReducedAlign(VT,
false);
2943 auto &MF = DAG.getMachineFunction();
2957 DAG.getConstant(1,
DL, PtrVT));
2959 DAG.getConstant(EltWidth,
DL, PtrVT));
2961 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
2963 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
2964 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
2965 DAG.getUNDEF(PtrVT), Stride, TrueMask,
2968 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
2970 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
2972 DAG.getVectorIdxConstant(0,
DL));
2978void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
2980 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2981 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2982 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2986 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
2988 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
2994void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
2995 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2996 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2997 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3001 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3003 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3005 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3006 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3017bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3022 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3025 switch (
N->getOpcode()) {
3028 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3036 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3042 case ISD::VP_TRUNCATE:
3044 Res = SplitVecOp_TruncateHelper(
N);
3047 case ISD::VP_FP_ROUND:
3051 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3054 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3056 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3057 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3060 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3063 case ISD::VP_SCATTER:
3064 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3067 case ISD::VP_GATHER:
3068 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3071 Res = SplitVecOp_VSELECT(
N, OpNo);
3077 case ISD::VP_SINT_TO_FP:
3078 case ISD::VP_UINT_TO_FP:
3079 if (
N->getValueType(0).bitsLT(
3080 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3081 Res = SplitVecOp_TruncateHelper(
N);
3083 Res = SplitVecOp_UnaryOp(
N);
3087 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3091 case ISD::VP_FP_TO_SINT:
3092 case ISD::VP_FP_TO_UINT:
3103 Res = SplitVecOp_UnaryOp(
N);
3106 Res = SplitVecOp_FPOpDifferentTypes(
N);
3112 Res = SplitVecOp_ExtVecInRegOp(
N);
3130 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3134 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3136 case ISD::VP_REDUCE_FADD:
3137 case ISD::VP_REDUCE_SEQ_FADD:
3138 case ISD::VP_REDUCE_FMUL:
3139 case ISD::VP_REDUCE_SEQ_FMUL:
3140 case ISD::VP_REDUCE_ADD:
3141 case ISD::VP_REDUCE_MUL:
3142 case ISD::VP_REDUCE_AND:
3143 case ISD::VP_REDUCE_OR:
3144 case ISD::VP_REDUCE_XOR:
3145 case ISD::VP_REDUCE_SMAX:
3146 case ISD::VP_REDUCE_SMIN:
3147 case ISD::VP_REDUCE_UMAX:
3148 case ISD::VP_REDUCE_UMIN:
3149 case ISD::VP_REDUCE_FMAX:
3150 case ISD::VP_REDUCE_FMIN:
3151 case ISD::VP_REDUCE_FMAXIMUM:
3152 case ISD::VP_REDUCE_FMINIMUM:
3153 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3155 case ISD::VP_CTTZ_ELTS:
3156 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3157 Res = SplitVecOp_VP_CttzElements(
N);
3162 if (!Res.
getNode())
return false;
3169 if (
N->isStrictFPOpcode())
3171 "Invalid operand expansion");
3174 "Invalid operand expansion");
3176 ReplaceValueWith(
SDValue(
N, 0), Res);
3180SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3183 assert(OpNo == 0 &&
"Illegal operand must be mask");
3190 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3193 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3194 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3195 "Lo and Hi have differing types");
3198 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3199 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3201 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3202 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3203 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3204 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3214SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3215 EVT ResVT =
N->getValueType(0);
3219 SDValue VecOp =
N->getOperand(OpNo);
3221 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3222 GetSplitVector(VecOp,
Lo,
Hi);
3224 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3230 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3234 EVT ResVT =
N->getValueType(0);
3243 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3244 GetSplitVector(VecOp,
Lo,
Hi);
3246 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3252 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3255SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3256 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3257 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3259 unsigned Opc =
N->getOpcode();
3260 EVT ResVT =
N->getValueType(0);
3264 SDValue VecOp =
N->getOperand(OpNo);
3266 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3267 GetSplitVector(VecOp,
Lo,
Hi);
3270 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3273 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3278 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3279 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3284 EVT ResVT =
N->getValueType(0);
3287 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3288 EVT InVT =
Lo.getValueType();
3293 if (
N->isStrictFPOpcode()) {
3294 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3295 { N->getOperand(0), Lo });
3296 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3297 { N->getOperand(0), Hi });
3306 ReplaceValueWith(
SDValue(
N, 1), Ch);
3307 }
else if (
N->getNumOperands() == 3) {
3308 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3309 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3310 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3311 std::tie(EVLLo, EVLHi) =
3312 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3313 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3314 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3316 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3317 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3327 EVT ResVT =
N->getValueType(0);
3329 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3333 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3339 Lo = BitConvertToInteger(
Lo);
3340 Hi = BitConvertToInteger(
Hi);
3342 if (DAG.getDataLayout().isBigEndian())
3350 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3352 EVT ResVT =
N->getValueType(0);
3360 GetSplitVector(SubVec,
Lo,
Hi);
3363 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3369 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3371 return SecondInsertion;
3374SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3376 EVT SubVT =
N->getValueType(0);
3381 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3383 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3386 if (IdxVal < LoEltsMin) {
3388 "Extracted subvector crosses vector split!");
3391 N->getOperand(0).getValueType().isScalableVector())
3393 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3398 "Extracting scalable subvector from fixed-width unsupported");
3406 "subvector from a scalable predicate vector");
3412 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3414 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3415 auto &MF = DAG.getMachineFunction();
3419 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3426 SubVT, dl, Store, StackPtr,
3430SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3439 GetSplitVector(Vec,
Lo,
Hi);
3441 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3443 if (IdxVal < LoElts)
3447 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3448 Idx.getValueType())), 0);
3452 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3468 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3470 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3471 auto &MF = DAG.getMachineFunction();
3474 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3482 if (
N->getValueType(0).bitsLT(EltVT)) {
3483 SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
3485 return DAG.getZExtOrTrunc(Load, dl,
N->getValueType(0));
3488 return DAG.getExtLoad(
3499 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3507 SplitVecRes_Gather(
N,
Lo,
Hi);
3510 ReplaceValueWith(
SDValue(
N, 0), Res);
3515 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3519 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3521 SDValue EVL =
N->getVectorLength();
3523 Align Alignment =
N->getOriginalAlign();
3529 GetSplitVector(
Data, DataLo, DataHi);
3531 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3536 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3539 GetSplitVector(Mask, MaskLo, MaskHi);
3541 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3544 EVT MemoryVT =
N->getMemoryVT();
3545 EVT LoMemVT, HiMemVT;
3546 bool HiIsEmpty =
false;
3547 std::tie(LoMemVT, HiMemVT) =
3548 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3552 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3560 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3561 N->getAddressingMode(),
N->isTruncatingStore(),
3562 N->isCompressingStore());
3569 N->isCompressingStore());
3577 MPI =
N->getPointerInfo().getWithOffset(
3580 MMO = DAG.getMachineFunction().getMachineMemOperand(
3582 Alignment,
N->getAAInfo(),
N->getRanges());
3584 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3585 N->getAddressingMode(),
N->isTruncatingStore(),
3586 N->isCompressingStore());
3595 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3596 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3603 GetSplitVector(
Data, LoData, HiData);
3605 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3607 EVT LoMemVT, HiMemVT;
3608 bool HiIsEmpty =
false;
3609 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3615 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3616 else if (getTypeAction(
Mask.getValueType()) ==
3618 GetSplitVector(Mask, LoMask, HiMask);
3620 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3623 std::tie(LoEVL, HiEVL) =
3624 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3628 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3629 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3630 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3641 EVT PtrVT =
N->getBasePtr().getValueType();
3644 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3647 Align Alignment =
N->getOriginalAlign();
3655 Alignment,
N->getAAInfo(),
N->getRanges());
3658 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3659 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3660 N->isCompressingStore());
3669 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3673 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3676 Align Alignment =
N->getOriginalAlign();
3682 GetSplitVector(
Data, DataLo, DataHi);
3684 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3689 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3692 GetSplitVector(Mask, MaskLo, MaskHi);
3694 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3697 EVT MemoryVT =
N->getMemoryVT();
3698 EVT LoMemVT, HiMemVT;
3699 bool HiIsEmpty =
false;
3700 std::tie(LoMemVT, HiMemVT) =
3701 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3709 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3710 N->getAddressingMode(),
N->isTruncatingStore(),
3711 N->isCompressingStore());
3720 N->isCompressingStore());
3728 MPI =
N->getPointerInfo().getWithOffset(
3731 MMO = DAG.getMachineFunction().getMachineMemOperand(
3733 Alignment,
N->getAAInfo(),
N->getRanges());
3735 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3736 N->getAddressingMode(),
N->isTruncatingStore(),
3737 N->isCompressingStore());
3750 EVT MemoryVT =
N->getMemoryVT();
3751 Align Alignment =
N->getOriginalAlign();
3759 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3760 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3763 auto *VPSC = cast<VPScatterSDNode>(
N);
3764 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3769 EVT LoMemVT, HiMemVT;
3770 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3775 GetSplitVector(Ops.Data, DataLo, DataHi);
3777 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3781 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3782 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3784 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3788 if (getTypeAction(Ops.Index.getValueType()) ==
3790 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3792 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3800 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3801 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3803 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3804 MSC->getIndexType(), MSC->isTruncatingStore());
3809 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3810 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3811 MMO, MSC->getIndexType(),
3812 MSC->isTruncatingStore());
3814 auto *VPSC = cast<VPScatterSDNode>(
N);
3816 std::tie(EVLLo, EVLHi) =
3817 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3819 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3820 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3821 VPSC->getIndexType());
3826 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3827 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3828 VPSC->getIndexType());
3832 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3833 assert(OpNo == 1 &&
"Can only split the stored value");
3836 bool isTruncating =
N->isTruncatingStore();
3839 EVT MemoryVT =
N->getMemoryVT();
3840 Align Alignment =
N->getOriginalAlign();
3844 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3846 EVT LoMemVT, HiMemVT;
3847 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3854 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3855 Alignment, MMOFlags, AAInfo);
3857 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3861 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3864 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3865 HiMemVT, Alignment, MMOFlags, AAInfo);
3867 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3881 EVT EltVT =
N->getValueType(0).getVectorElementType();
3883 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3886 DAG.getVectorIdxConstant(i,
DL)));
3890 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3911 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3912 SDValue InVec =
N->getOperand(OpNo);
3914 EVT OutVT =
N->getValueType(0);
3922 EVT LoOutVT, HiOutVT;
3923 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
3924 assert(LoOutVT == HiOutVT &&
"Unequal split?");
3929 if (isTypeLegal(LoOutVT) ||
3930 InElementSize <= OutElementSize * 2)
3931 return SplitVecOp_UnaryOp(
N);
3940 return SplitVecOp_UnaryOp(
N);
3944 GetSplitVector(InVec, InLoVec, InHiVec);
3950 EVT HalfElementVT = IsFloat ?
3952 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
3959 if (
N->isStrictFPOpcode()) {
3960 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3961 {N->getOperand(0), InLoVec});
3962 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3963 {N->getOperand(0), InHiVec});
3969 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
3970 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
3982 if (
N->isStrictFPOpcode()) {
3986 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
3994 DAG.getTargetConstant(
4000 assert(
N->getValueType(0).isVector() &&
4001 N->getOperand(0).getValueType().isVector() &&
4002 "Operand types must be vectors");
4004 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4006 GetSplitVector(
N->getOperand(0), Lo0, Hi0);
4007 GetSplitVector(
N->getOperand(1), Lo1, Hi1);
4018 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4019 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4020 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4021 std::tie(EVLLo, EVLHi) =
4022 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4023 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4024 N->getOperand(2), MaskLo, EVLLo);
4025 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4026 N->getOperand(2), MaskHi, EVLHi);
4030 EVT OpVT =
N->getOperand(0).getValueType();
4033 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4039 EVT ResVT =
N->getValueType(0);
4042 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4043 EVT InVT =
Lo.getValueType();
4048 if (
N->isStrictFPOpcode()) {
4049 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4050 { N->getOperand(0), Lo, N->getOperand(2) });
4051 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4052 { N->getOperand(0), Hi, N->getOperand(2) });
4056 Lo.getValue(1),
Hi.getValue(1));
4057 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4058 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4059 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4060 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4061 std::tie(EVLLo, EVLHi) =
4062 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4063 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4064 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4078SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4081 EVT LHSLoVT, LHSHiVT;
4082 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4084 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4085 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4088 std::tie(LHSLo, LHSHi) =
4089 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4092 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4094 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4095 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4101 EVT ResVT =
N->getValueType(0);
4104 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4105 EVT InVT =
Lo.getValueType();
4111 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4112 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4119 EVT ResVT =
N->getValueType(0);
4123 GetSplitVector(VecOp,
Lo,
Hi);
4125 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4126 auto [EVLLo, EVLHi] =
4128 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4134 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4136 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4137 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4144void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4145 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4148 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4153 auto unrollExpandedOp = [&]() {
4158 EVT VT =
N->getValueType(0);
4168 switch (
N->getOpcode()) {
4171 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4179 Res = WidenVecRes_ADDRSPACECAST(
N);
4186 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4190 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4194 Res = WidenVecRes_ScalarOp(
N);
4199 case ISD::VP_SELECT:
4201 Res = WidenVecRes_Select(
N);
4205 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4206 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4208 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4211 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4213 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4214 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4217 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4220 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4222 case ISD::VP_GATHER:
4223 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4226 Res = WidenVecRes_VECTOR_REVERSE(
N);
4234 case ISD::OR:
case ISD::VP_OR:
4243 case ISD::VP_FMINIMUM:
4245 case ISD::VP_FMAXIMUM:
4276 case ISD::VP_FCOPYSIGN:
4277 Res = WidenVecRes_Binary(
N);
4282 if (unrollExpandedOp())
4297 Res = WidenVecRes_BinaryCanTrap(
N);
4306 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4309#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4310 case ISD::STRICT_##DAGN:
4311#include "llvm/IR/ConstrainedOps.def"
4312 Res = WidenVecRes_StrictFP(
N);
4321 Res = WidenVecRes_OverflowOp(
N, ResNo);
4325 Res = WidenVecRes_FCOPYSIGN(
N);
4330 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4335 if (!unrollExpandedOp())
4336 Res = WidenVecRes_ExpOp(
N);
4342 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4347 case ISD::VP_FP_EXTEND:
4349 case ISD::VP_FP_ROUND:
4351 case ISD::VP_FP_TO_SINT:
4353 case ISD::VP_FP_TO_UINT:
4355 case ISD::VP_SIGN_EXTEND:
4357 case ISD::VP_SINT_TO_FP:
4358 case ISD::VP_TRUNCATE:
4361 case ISD::VP_UINT_TO_FP:
4363 case ISD::VP_ZERO_EXTEND:
4364 Res = WidenVecRes_Convert(
N);
4369 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4375 case ISD::VP_LLRINT:
4376 Res = WidenVecRes_XRINT(
N);
4396 if (unrollExpandedOp())
4406 case ISD::VP_BITREVERSE:
4412 case ISD::VP_CTLZ_ZERO_UNDEF:
4418 case ISD::VP_CTTZ_ZERO_UNDEF:
4423 case ISD::VP_FFLOOR:
4425 case ISD::VP_FNEARBYINT:
4426 case ISD::VP_FROUND:
4427 case ISD::VP_FROUNDEVEN:
4428 case ISD::VP_FROUNDTOZERO:
4432 Res = WidenVecRes_Unary(
N);
4439 Res = WidenVecRes_Ternary(
N);
4445 SetWidenedVector(
SDValue(
N, ResNo), Res);
4452 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4453 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4454 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4455 if (
N->getNumOperands() == 3)
4456 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4458 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4459 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4463 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4464 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4471 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4472 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4473 if (
N->getNumOperands() == 2)
4474 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4477 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4478 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4482 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4483 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4486SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4490 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4491 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4493 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4502 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4505 if (ConcatEnd == 1) {
4506 VT = ConcatOps[0].getValueType();
4508 return ConcatOps[0];
4511 SDLoc dl(ConcatOps[0]);
4518 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4519 int Idx = ConcatEnd - 1;
4520 VT = ConcatOps[
Idx--].getValueType();
4521 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4534 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4535 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4539 ConcatOps[
Idx+1] = VecOp;
4540 ConcatEnd =
Idx + 2;
4546 unsigned RealVals = ConcatEnd -
Idx - 1;
4547 unsigned SubConcatEnd = 0;
4548 unsigned SubConcatIdx =
Idx + 1;
4549 while (SubConcatEnd < RealVals)
4550 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4551 while (SubConcatEnd < OpsToConcat)
4552 SubConcatOps[SubConcatEnd++] = undefVec;
4554 NextVT, SubConcatOps);
4555 ConcatEnd = SubConcatIdx + 1;
4560 if (ConcatEnd == 1) {
4561 VT = ConcatOps[0].getValueType();
4563 return ConcatOps[0];
4568 if (NumOps != ConcatEnd ) {
4570 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4571 ConcatOps[j] = UndefVal;
4579 unsigned Opcode =
N->getOpcode();
4587 NumElts = NumElts / 2;
4591 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4593 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4594 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4595 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4607 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4608 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4609 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4612 unsigned ConcatEnd = 0;
4620 while (CurNumElts != 0) {
4621 while (CurNumElts >= NumElts) {
4623 DAG.getVectorIdxConstant(
Idx, dl));
4625 DAG.getVectorIdxConstant(
Idx, dl));
4626 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4628 CurNumElts -= NumElts;
4631 NumElts = NumElts / 2;
4636 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4638 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4640 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4641 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4652 switch (
N->getOpcode()) {
4655 return WidenVecRes_STRICT_FSETCC(
N);
4662 return WidenVecRes_Convert_StrictFP(
N);
4668 unsigned NumOpers =
N->getNumOperands();
4669 unsigned Opcode =
N->getOpcode();
4676 NumElts = NumElts / 2;
4687 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4691 unsigned ConcatEnd = 0;
4698 for (
unsigned i = 1; i < NumOpers; ++i) {
4704 Oper = GetWidenedVector(Oper);
4710 DAG.getUNDEF(WideOpVT), Oper,
4711 DAG.getVectorIdxConstant(0, dl));
4723 while (CurNumElts != 0) {
4724 while (CurNumElts >= NumElts) {
4727 for (
unsigned i = 0; i < NumOpers; ++i) {
4730 EVT OpVT =
Op.getValueType();
4736 DAG.getVectorIdxConstant(
Idx, dl));
4742 EVT OperVT[] = {VT, MVT::Other};
4744 ConcatOps[ConcatEnd++] = Oper;
4747 CurNumElts -= NumElts;
4750 NumElts = NumElts / 2;
4755 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4758 for (
unsigned i = 0; i < NumOpers; ++i) {
4761 EVT OpVT =
Op.getValueType();
4765 DAG.getVectorIdxConstant(
Idx, dl));
4770 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4772 ConcatOps[ConcatEnd++] = Oper;
4781 if (Chains.
size() == 1)
4782 NewChain = Chains[0];
4785 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4790SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4792 EVT ResVT =
N->getValueType(0);
4793 EVT OvVT =
N->getValueType(1);
4794 EVT WideResVT, WideOvVT;
4804 WideLHS = GetWidenedVector(
N->getOperand(0));
4805 WideRHS = GetWidenedVector(
N->getOperand(1));
4815 N->getOperand(0), Zero);
4818 N->getOperand(1), Zero);
4821 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4822 SDNode *WideNode = DAG.getNode(
4823 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4826 unsigned OtherNo = 1 - ResNo;
4827 EVT OtherVT =
N->getValueType(OtherNo);
4834 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4837 return SDValue(WideNode, ResNo);
4850 unsigned Opcode =
N->getOpcode();
4859 InOp = ZExtPromotedInteger(InOp);
4870 InOp = GetWidenedVector(
N->getOperand(0));
4873 if (InVTEC == WidenEC) {
4874 if (
N->getNumOperands() == 1)
4875 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4876 if (
N->getNumOperands() == 3) {
4877 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4880 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
4882 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
4905 unsigned NumConcat =
4910 if (
N->getNumOperands() == 1)
4911 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
4912 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
4917 DAG.getVectorIdxConstant(0,
DL));
4919 if (
N->getNumOperands() == 1)
4920 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
4921 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
4930 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4931 for (
unsigned i=0; i < MinElts; ++i) {
4933 DAG.getVectorIdxConstant(i,
DL));
4934 if (
N->getNumOperands() == 1)
4935 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
4937 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
4940 return DAG.getBuildVector(WidenVT,
DL, Ops);
4949 EVT SrcVT = Src.getValueType();
4953 Src = GetWidenedVector(Src);
4954 SrcVT = Src.getValueType();
4961 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
4970 EVT SrcVT = Src.getValueType();
4974 Src = GetWidenedVector(Src);
4975 SrcVT = Src.getValueType();
4982 if (
N->getNumOperands() == 1)
4983 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
4985 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
4986 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4990 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
4993SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5004 unsigned Opcode =
N->getOpcode();
5010 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5015 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5016 for (
unsigned i=0; i < MinElts; ++i) {
5018 DAG.getVectorIdxConstant(i,
DL));
5019 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5023 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5025 return DAG.getBuildVector(WidenVT,
DL, Ops);
5028SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5029 unsigned Opcode =
N->getOpcode();
5042 InOp = GetWidenedVector(InOp);
5049 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5056 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5058 DAG.getVectorIdxConstant(i,
DL));
5075 while (Ops.
size() != WidenNumElts)
5078 return DAG.getBuildVector(WidenVT,
DL, Ops);
5084 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5085 return WidenVecRes_BinaryCanTrap(
N);
5095SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5096 SDValue FpValue =
N->getOperand(0);
5100 SDValue Arg = GetWidenedVector(FpValue);
5101 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5107 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5109 SDValue ExpOp =
RHS.getValueType().isVector() ? GetWidenedVector(RHS) :
RHS;
5111 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5117 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5118 if (
N->getNumOperands() == 1)
5119 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5121 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5122 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5126 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5127 {InOp,
Mask,
N->getOperand(2)});
5133 cast<VTSDNode>(
N->getOperand(1))->getVT()
5134 .getVectorElementType(),
5136 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5137 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5138 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5141SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5142 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5143 return GetWidenedVector(WidenVec);
5148 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5149 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5151 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5152 AddrSpaceCastN->getSrcAddressSpace(),
5153 AddrSpaceCastN->getDestAddressSpace());
5159 EVT VT =
N->getValueType(0);
5163 switch (getTypeAction(InVT)) {
5177 SDValue NInOp = GetPromotedInteger(InOp);
5179 if (WidenVT.
bitsEq(NInVT)) {
5182 if (DAG.getDataLayout().isBigEndian()) {
5187 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5206 InOp = GetWidenedVector(InOp);
5208 if (WidenVT.
bitsEq(InVT))
5218 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5223 unsigned NewNumParts = WidenSize / InSize;
5236 EVT OrigInVT =
N->getOperand(0).getValueType();
5249 if (WidenSize % InSize == 0) {
5256 DAG.ExtractVectorElements(InOp, Ops);
5257 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5269 return CreateStackStoreLoad(InOp, WidenVT);
5275 EVT VT =
N->getValueType(0);
5279 EVT EltVT =
N->getOperand(0).getValueType();
5286 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5287 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5289 return DAG.getBuildVector(WidenVT, dl, NewOps);
5293 EVT InVT =
N->getOperand(0).getValueType();
5296 unsigned NumOperands =
N->getNumOperands();
5298 bool InputWidened =
false;
5302 if (WidenNumElts % NumInElts == 0) {
5304 unsigned NumConcat = WidenNumElts / NumInElts;
5305 SDValue UndefVal = DAG.getUNDEF(InVT);
5307 for (
unsigned i=0; i < NumOperands; ++i)
5308 Ops[i] =
N->getOperand(i);
5309 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5314 InputWidened =
true;
5318 for (i=1; i < NumOperands; ++i)
5319 if (!
N->getOperand(i).isUndef())
5322 if (i == NumOperands)
5325 return GetWidenedVector(
N->getOperand(0));
5327 if (NumOperands == 2) {
5329 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5335 for (
unsigned i = 0; i < NumInElts; ++i) {
5337 MaskOps[i + NumInElts] = i + WidenNumElts;
5339 return DAG.getVectorShuffle(WidenVT, dl,
5340 GetWidenedVector(
N->getOperand(0)),
5341 GetWidenedVector(
N->getOperand(1)),
5348 "Cannot use build vectors to widen CONCAT_VECTOR result");
5356 for (
unsigned i=0; i < NumOperands; ++i) {
5359 InOp = GetWidenedVector(InOp);
5360 for (
unsigned j = 0;
j < NumInElts; ++
j)
5362 DAG.getVectorIdxConstant(j, dl));
5364 SDValue UndefVal = DAG.getUNDEF(EltVT);
5365 for (;
Idx < WidenNumElts; ++
Idx)
5366 Ops[
Idx] = UndefVal;
5367 return DAG.getBuildVector(WidenVT, dl, Ops);
5370SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5371 EVT VT =
N->getValueType(0);
5373 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5380SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5381 EVT VT =
N->getValueType(0);
5388 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5390 InOp = GetWidenedVector(InOp);
5396 if (IdxVal == 0 && InVT == WidenVT)
5403 assert(IdxVal % VTNumElts == 0 &&
5404 "Expected Idx to be a multiple of subvector minimum vector length");
5405 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5418 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5419 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5420 "down type's element count");
5427 for (;
I < VTNumElts / GCD; ++
I)
5430 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5431 for (;
I < WidenNumElts / GCD; ++
I)
5438 "EXTRACT_SUBVECTOR for scalable vectors");
5445 for (i = 0; i < VTNumElts; ++i)
5447 DAG.getVectorIdxConstant(IdxVal + i, dl));
5449 SDValue UndefVal = DAG.getUNDEF(EltVT);
5450 for (; i < WidenNumElts; ++i)
5452 return DAG.getBuildVector(WidenVT, dl, Ops);
5463SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5464 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5467 N->getOperand(1),
N->getOperand(2));
5480 if (!
LD->getMemoryVT().isByteSized()) {
5484 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5493 EVT LdVT =
LD->getMemoryVT();
5504 const auto *MMO =
LD->getMemOperand();
5506 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5520 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5522 Result = GenWidenVectorLoads(LdChain, LD);
5529 if (LdChain.
size() == 1)
5530 NewChain = LdChain[0];
5536 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5547 SDValue EVL =
N->getVectorLength();
5554 "Unable to widen binary VP op");
5555 Mask = GetWidenedVector(Mask);
5556 assert(
Mask.getValueType().getVectorElementCount() ==
5559 "Unable to widen vector load");
5562 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5563 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5564 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5578 "Unable to widen VP strided load");
5579 Mask = GetWidenedVector(Mask);
5582 assert(
Mask.getValueType().getVectorElementCount() ==
5584 "Data and mask vectors should have the same number of elements");
5586 SDValue Res = DAG.getStridedLoadVP(
5587 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5588 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5589 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5590 N->isExpandingLoad());
5602 EVT MaskVT =
Mask.getValueType();
5603 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5611 Mask = ModifyToType(Mask, WideMaskVT,
true);
5613 SDValue Res = DAG.getMaskedLoad(
5614 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5615 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5616 ExtType,
N->isExpandingLoad());
5627 EVT MaskVT =
Mask.getValueType();
5628 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5637 Mask = ModifyToType(Mask, WideMaskVT,
true);
5642 Index.getValueType().getScalarType(),
5650 N->getMemoryVT().getScalarType(), NumElts);
5651 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5652 WideMemVT, dl, Ops,
N->getMemOperand(),
5653 N->getIndexType(),
N->getExtensionType());
5670 N->getMemoryVT().getScalarType(), WideEC);
5671 Mask = GetWidenedMask(Mask, WideEC);
5674 Mask,
N->getVectorLength()};
5675 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5676 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5686 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5714 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5715 return N->getOperand(OpNo).getValueType();
5723 N =
N.getOperand(0);
5725 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5726 if (!
N->getOperand(i)->isUndef())
5728 N =
N.getOperand(0);
5732 N =
N.getOperand(0);
5734 N =
N.getOperand(0);
5761 { MaskVT, MVT::Other }, Ops);
5762 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5772 if (MaskScalarBits < ToMaskScalBits) {
5776 }
else if (MaskScalarBits > ToMaskScalBits) {
5782 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5784 "Mask should have the right element size by now.");
5787 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5789 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5794 EVT SubVT =
Mask->getValueType(0);
5800 assert((
Mask->getValueType(0) == ToMaskVT) &&
5801 "A mask of ToMaskVT should have been produced by now.");
5822 EVT CondVT =
Cond->getValueType(0);
5826 EVT VSelVT =
N->getValueType(0);
5838 EVT FinalVT = VSelVT;
5850 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
5868 EVT ToMaskVT = VSelVT;
5875 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5891 if (ScalarBits0 != ScalarBits1) {
5892 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
5893 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
5905 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
5906 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
5910 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5923 unsigned Opcode =
N->getOpcode();
5925 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
5926 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5927 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5929 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
5935 Cond1 = GetWidenedVector(Cond1);
5943 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
5944 SDValue Res = ModifyToType(SplitSelect, WidenVT);
5949 Cond1 = ModifyToType(Cond1, CondWidenVT);
5952 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5953 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5955 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
5956 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
5958 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
5962 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
5963 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
5966 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
5971 return DAG.getUNDEF(WidenVT);
5975 EVT VT =
N->getValueType(0);
5982 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5983 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
5987 for (
unsigned i = 0; i != NumElts; ++i) {
5988 int Idx =
N->getMaskElt(i);
5989 if (
Idx < (
int)NumElts)
5994 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
5996 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6000 EVT VT =
N->getValueType(0);
6005 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6011 unsigned IdxVal = WidenNumElts - VTNumElts;
6024 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6027 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6028 "down type's element count");
6031 for (; i < VTNumElts / GCD; ++i)
6034 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6035 for (; i < WidenNumElts / GCD; ++i)
6044 for (
unsigned i = 0; i != VTNumElts; ++i) {
6045 Mask.push_back(IdxVal + i);
6047 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6050 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6055 assert(
N->getValueType(0).isVector() &&
6056 N->getOperand(0).getValueType().isVector() &&
6057 "Operands must be vectors");
6071 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6072 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6079 InOp1 = GetWidenedVector(InOp1);
6080 InOp2 = GetWidenedVector(InOp2);
6082 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6083 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6090 "Input not widened to expected type!");
6092 if (
N->getOpcode() == ISD::VP_SETCC) {
6095 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6096 N->getOperand(2), Mask,
N->getOperand(4));
6103 assert(
N->getValueType(0).isVector() &&
6104 N->getOperand(1).getValueType().isVector() &&
6105 "Operands must be vectors");
6106 EVT VT =
N->getValueType(0);
6117 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6122 for (
unsigned i = 0; i != NumElts; ++i) {
6124 DAG.getVectorIdxConstant(i, dl));
6126 DAG.getVectorIdxConstant(i, dl));
6128 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6129 {Chain, LHSElem, RHSElem, CC});
6130 Chains[i] = Scalars[i].getValue(1);
6131 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6132 DAG.getBoolConstant(
true, dl, EltVT, VT),
6133 DAG.getBoolConstant(
false, dl, EltVT, VT));
6137 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6139 return DAG.getBuildVector(WidenVT, dl, Scalars);
6145bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6146 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6150 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6153 switch (
N->getOpcode()) {
6156 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6167 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6168 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6169 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6170 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6175 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6177 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6178 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6180 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6181 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6189 Res = WidenVecOp_UnrollVectorOp(
N);
6196 Res = WidenVecOp_EXTEND(
N);
6212 Res = WidenVecOp_Convert(
N);
6217 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6235 Res = WidenVecOp_VECREDUCE(
N);
6239 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6241 case ISD::VP_REDUCE_FADD:
6242 case ISD::VP_REDUCE_SEQ_FADD:
6243 case ISD::VP_REDUCE_FMUL:
6244 case ISD::VP_REDUCE_SEQ_FMUL:
6245 case ISD::VP_REDUCE_ADD:
6246 case ISD::VP_REDUCE_MUL:
6247 case ISD::VP_REDUCE_AND:
6248 case ISD::VP_REDUCE_OR:
6249 case ISD::VP_REDUCE_XOR:
6250 case ISD::VP_REDUCE_SMAX:
6251 case ISD::VP_REDUCE_SMIN:
6252 case ISD::VP_REDUCE_UMAX:
6253 case ISD::VP_REDUCE_UMIN:
6254 case ISD::VP_REDUCE_FMAX:
6255 case ISD::VP_REDUCE_FMIN:
6256 case ISD::VP_REDUCE_FMAXIMUM:
6257 case ISD::VP_REDUCE_FMINIMUM:
6258 Res = WidenVecOp_VP_REDUCE(
N);
6260 case ISD::VP_CTTZ_ELTS:
6261 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6262 Res = WidenVecOp_VP_CttzElements(
N);
6267 if (!Res.
getNode())
return false;
6275 if (
N->isStrictFPOpcode())
6277 "Invalid operand expansion");
6280 "Invalid operand expansion");
6282 ReplaceValueWith(
SDValue(
N, 0), Res);
6288 EVT VT =
N->getValueType(0);
6293 "Unexpected type action");
6294 InOp = GetWidenedVector(InOp);
6297 "Input wasn't widened!");
6308 FixedEltVT == InEltVT) {
6310 "Not enough elements in the fixed type for the operand!");
6312 "We can't have the same type as we started with!");
6315 DAG.getUNDEF(FixedVT), InOp,
6316 DAG.getVectorIdxConstant(0,
DL));
6319 DAG.getVectorIdxConstant(0,
DL));
6328 return WidenVecOp_Convert(
N);
6333 switch (
N->getOpcode()) {
6349 return DAG.UnrollVectorOp(
N);
6354 EVT ResultVT =
N->getValueType(0);
6356 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6365 {WideArg,
Test},
N->getFlags());
6372 DAG.getVectorIdxConstant(0,
DL));
6374 EVT OpVT =
N->getOperand(0).getValueType();
6377 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6382 EVT VT =
N->getValueType(0);
6385 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6388 "Unexpected type action");
6389 InOp = GetWidenedVector(InOp);
6391 unsigned Opcode =
N->getOpcode();
6397 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6399 if (
N->isStrictFPOpcode()) {
6401 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6402 {
N->getOperand(0), InOp,
N->getOperand(2) });
6404 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6405 {
N->getOperand(0), InOp });
6411 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6413 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6416 DAG.getVectorIdxConstant(0, dl));
6424 if (
N->isStrictFPOpcode()) {
6427 for (
unsigned i=0; i < NumElts; ++i) {
6429 DAG.getVectorIdxConstant(i, dl));
6430 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6434 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6436 for (
unsigned i = 0; i < NumElts; ++i)
6437 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6439 InOp, DAG.getVectorIdxConstant(i, dl)));
6442 return DAG.getBuildVector(VT, dl, Ops);
6446 EVT DstVT =
N->getValueType(0);
6447 SDValue Src = GetWidenedVector(
N->getOperand(0));
6448 EVT SrcVT = Src.getValueType();
6457 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6460 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6464 return DAG.UnrollVectorOp(
N);
6468 EVT VT =
N->getValueType(0);
6469 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6477 if (!VT.
isVector() && VT != MVT::x86mmx &&
6484 DAG.getVectorIdxConstant(0, dl));
6498 .divideCoefficientBy(EltSize);
6503 DAG.getVectorIdxConstant(0, dl));
6508 return CreateStackStoreLoad(InOp, VT);
6512 EVT VT =
N->getValueType(0);
6514 EVT InVT =
N->getOperand(0).getValueType();
6519 unsigned NumOperands =
N->getNumOperands();
6522 for (i = 1; i < NumOperands; ++i)
6523 if (!
N->getOperand(i).isUndef())
6526 if (i == NumOperands)
6527 return GetWidenedVector(
N->getOperand(0));
6537 for (
unsigned i=0; i < NumOperands; ++i) {
6541 "Unexpected type action");
6542 InOp = GetWidenedVector(InOp);
6543 for (
unsigned j = 0;
j < NumInElts; ++
j)
6545 DAG.getVectorIdxConstant(j, dl));
6547 return DAG.getBuildVector(VT, dl, Ops);
6550SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6551 EVT VT =
N->getValueType(0);
6556 SubVec = GetWidenedVector(SubVec);
6562 bool IndicesValid =
false;
6565 IndicesValid =
true;
6569 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6570 Attribute::VScaleRange);
6575 IndicesValid =
true;
6581 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6586 "INSERT_SUBVECTOR");
6589SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6590 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6592 N->getValueType(0), InOp,
N->getOperand(1));
6595SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6596 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6598 N->getValueType(0), InOp,
N->getOperand(1));
6601SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6602 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6603 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6611 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6614 if (
ST->isTruncatingStore())
6633 StVal = GetWidenedVector(StVal);
6637 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6638 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6639 EVL, StVT,
ST->getMemOperand(),
6640 ST->getAddressingMode());
6644 if (GenWidenVectorStores(StChain, ST)) {
6645 if (StChain.
size() == 1)
6654SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6655 assert((OpNo == 1 || OpNo == 3) &&
6656 "Can widen only data or mask operand of vp_store");
6664 StVal = GetWidenedVector(StVal);
6670 "Unable to widen VP store");
6671 Mask = GetWidenedVector(Mask);
6673 Mask = GetWidenedVector(Mask);
6679 "Unable to widen VP store");
6680 StVal = GetWidenedVector(StVal);
6683 assert(
Mask.getValueType().getVectorElementCount() ==
6685 "Mask and data vectors should have the same number of elements");
6686 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6687 ST->getOffset(), Mask,
ST->getVectorLength(),
6688 ST->getMemoryVT(),
ST->getMemOperand(),
6689 ST->getAddressingMode(),
ST->isTruncatingStore(),
6690 ST->isCompressingStore());
6695 assert((OpNo == 1 || OpNo == 4) &&
6696 "Can widen only data or mask operand of vp_strided_store");
6705 "Unable to widen VP strided store");
6709 "Unable to widen VP strided store");
6711 StVal = GetWidenedVector(StVal);
6712 Mask = GetWidenedVector(Mask);
6715 Mask.getValueType().getVectorElementCount() &&
6716 "Data and mask vectors should have the same number of elements");
6718 return DAG.getStridedStoreVP(
6725SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6726 assert((OpNo == 1 || OpNo == 4) &&
6727 "Can widen only data or mask operand of mstore");
6730 EVT MaskVT =
Mask.getValueType();
6736 StVal = GetWidenedVector(StVal);
6743 Mask = ModifyToType(Mask, WideMaskVT,
true);
6747 Mask = ModifyToType(Mask, WideMaskVT,
true);
6753 StVal = ModifyToType(StVal, WideVT);
6756 assert(
Mask.getValueType().getVectorNumElements() ==
6758 "Mask and data vectors should have the same number of elements");
6765SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6766 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6767 auto *MG = cast<MaskedGatherSDNode>(
N);
6768 SDValue DataOp = MG->getPassThru();
6770 SDValue Scale = MG->getScale();
6778 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6779 MG->getMemOperand(), MG->getIndexType(),
6780 MG->getExtensionType());
6786SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6795 DataOp = GetWidenedVector(DataOp);
6799 EVT IndexVT =
Index.getValueType();
6805 EVT MaskVT =
Mask.getValueType();
6808 Mask = ModifyToType(Mask, WideMaskVT,
true);
6813 }
else if (OpNo == 4) {
6821 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
6826SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
6835 DataOp = GetWidenedVector(DataOp);
6838 Mask = GetWidenedMask(Mask, WideEC);
6841 }
else if (OpNo == 3) {
6850 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
6855 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
6856 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6858 EVT VT =
N->getValueType(0);
6873 SVT, InOp0, InOp1,
N->getOperand(2));
6880 DAG.getVectorIdxConstant(0, dl));
6882 EVT OpVT =
N->getOperand(0).getValueType();
6885 return DAG.getNode(ExtendCode, dl, VT,
CC);
6895 EVT VT =
N->getValueType(0);
6897 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6904 for (
unsigned i = 0; i != NumElts; ++i) {
6906 DAG.getVectorIdxConstant(i, dl));
6908 DAG.getVectorIdxConstant(i, dl));
6910 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6911 {Chain, LHSElem, RHSElem, CC});
6912 Chains[i] = Scalars[i].getValue(1);
6913 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6914 DAG.getBoolConstant(
true, dl, EltVT, VT),
6915 DAG.getBoolConstant(
false, dl, EltVT, VT));
6919 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6921 return DAG.getBuildVector(VT, dl, Scalars);
6926 SDValue Op = GetWidenedVector(
N->getOperand(0));
6927 EVT OrigVT =
N->getOperand(0).getValueType();
6928 EVT WideVT =
Op.getValueType();
6932 unsigned Opc =
N->getOpcode();
6934 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6935 assert(NeutralElem &&
"Neutral element must exist");
6942 unsigned GCD = std::gcd(OrigElts, WideElts);
6945 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6946 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6948 DAG.getVectorIdxConstant(
Idx, dl));
6949 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6952 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6954 DAG.getVectorIdxConstant(
Idx, dl));
6956 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6966 EVT WideVT =
Op.getValueType();
6970 unsigned Opc =
N->getOpcode();
6972 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6979 unsigned GCD = std::gcd(OrigElts, WideElts);
6982 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6983 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6985 DAG.getVectorIdxConstant(
Idx, dl));
6986 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6989 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6991 DAG.getVectorIdxConstant(
Idx, dl));
6993 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6997 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7000 SDValue Op = GetWidenedVector(
N->getOperand(1));
7002 Op.getValueType().getVectorElementCount());
7004 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7005 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7013 EVT VT =
N->getValueType(0);
7024 DAG.getVectorIdxConstant(0,
DL));
7034 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7035 {Source, Mask, N->getOperand(2)},
N->getFlags());
7052 unsigned WidenEx = 0) {
7057 unsigned AlignInBits =
Align*8;
7060 EVT RetVT = WidenEltVT;
7061 if (!Scalable && Width == WidenEltWidth)
7075 (WidenWidth % MemVTWidth) == 0 &&
7077 (MemVTWidth <= Width ||
7078 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7079 if (MemVTWidth == WidenWidth)
7098 (WidenWidth % MemVTWidth) == 0 &&
7100 (MemVTWidth <= Width ||
7101 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7110 return std::nullopt;
7121 unsigned Start,
unsigned End) {
7122 SDLoc dl(LdOps[Start]);
7123 EVT LdTy = LdOps[Start].getValueType();
7131 for (
unsigned i = Start + 1; i !=
End; ++i) {
7132 EVT NewLdTy = LdOps[i].getValueType();
7133 if (NewLdTy != LdTy) {
7154 EVT LdVT =
LD->getMemoryVT();
7168 TypeSize WidthDiff = WidenWidth - LdWidth;
7175 std::optional<EVT> FirstVT =
7176 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7183 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7188 std::optional<EVT> NewVT = FirstVT;
7190 TypeSize NewVTWidth = FirstVTWidth;
7192 RemainingWidth -= NewVTWidth;
7199 NewVTWidth = NewVT->getSizeInBits();
7205 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7206 LD->getOriginalAlign(), MMOFlags, AAInfo);
7210 if (MemVTs.
empty()) {
7212 if (!FirstVT->isVector()) {
7219 if (FirstVT == WidenVT)
7224 unsigned NumConcat =
7227 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7228 ConcatOps[0] = LdOp;
7229 for (
unsigned i = 1; i != NumConcat; ++i)
7230 ConcatOps[i] = UndefVal;
7242 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7245 for (
EVT MemVT : MemVTs) {
7246 Align NewAlign = ScaledOffset == 0
7247 ?
LD->getOriginalAlign()
7250 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7254 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7259 if (!LdOps[0].getValueType().
isVector())
7269 EVT LdTy = LdOps[i].getValueType();
7272 for (--i; i >= 0; --i) {
7273 LdTy = LdOps[i].getValueType();
7280 ConcatOps[--
Idx] = LdOps[i];
7281 for (--i; i >= 0; --i) {
7282 EVT NewLdTy = LdOps[i].getValueType();
7283 if (NewLdTy != LdTy) {
7294 WidenOps[j] = ConcatOps[
Idx+j];
7295 for (;
j != NumOps; ++
j)
7296 WidenOps[j] = DAG.getUNDEF(LdTy);
7303 ConcatOps[--
Idx] = LdOps[i];
7314 SDValue UndefVal = DAG.getUNDEF(LdTy);
7317 for (; i !=
End-
Idx; ++i)
7318 WidenOps[i] = ConcatOps[
Idx+i];
7319 for (; i != NumOps; ++i)
7320 WidenOps[i] = UndefVal;
7332 EVT LdVT =
LD->getMemoryVT();
7345 "not yet supported");
7356 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7357 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7363 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7364 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7365 LD->getOriginalAlign(), MMOFlags, AAInfo);
7370 SDValue UndefVal = DAG.getUNDEF(EltVT);
7371 for (; i != WidenNumElts; ++i)
7374 return DAG.getBuildVector(WidenVT, dl, Ops);
7386 SDValue ValOp = GetWidenedVector(
ST->getValue());
7389 EVT StVT =
ST->getMemoryVT();
7397 "Mismatch between store and value types");
7411 std::optional<EVT> NewVT =
7416 TypeSize NewVTWidth = NewVT->getSizeInBits();
7419 StWidth -= NewVTWidth;
7420 MemVTs.
back().second++;
7424 for (
const auto &Pair : MemVTs) {
7425 EVT NewVT = Pair.first;
7426 unsigned Count = Pair.second;
7432 Align NewAlign = ScaledOffset == 0
7433 ?
ST->getOriginalAlign()
7436 DAG.getVectorIdxConstant(
Idx, dl));
7437 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7442 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7454 DAG.getVectorIdxConstant(
Idx++, dl));
7456 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7460 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7474 bool FillWithZeroes) {
7479 "input and widen element type must match");
7481 "cannot modify scalable vectors in this way");
7493 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7496 for (
unsigned i = 1; i != NumConcat; ++i)
7504 DAG.getVectorIdxConstant(0, dl));
7507 "Scalable vectors should have been handled already.");
7515 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7519 DAG.getVectorIdxConstant(
Idx, dl));
7521 SDValue UndefVal = DAG.getUNDEF(EltVT);
7522 for (;
Idx < WidenNumElts; ++
Idx)
7523 Ops[
Idx] = UndefVal;
7525 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7526 if (!FillWithZeroes)
7530 "We expect to never want to FillWithZeroes for non-integral types.");
7533 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7534 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7536 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7537 DAG.getBuildVector(NVT, dl,
MaskOps));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.