35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
118 R = ScalarizeVecRes_UnaryOp(
N);
121 R = ScalarizeVecRes_ADDRSPACECAST(
N);
124 R = ScalarizeVecRes_FFREXP(
N, ResNo);
169 R = ScalarizeVecRes_BinOp(
N);
174 R = ScalarizeVecRes_TernaryOp(
N);
177#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
178 case ISD::STRICT_##DAGN:
179#include "llvm/IR/ConstrainedOps.def"
180 R = ScalarizeVecRes_StrictFPOp(
N);
185 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
194 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
204 R = ScalarizeVecRes_FIX(
N);
210 SetScalarizedVector(
SDValue(
N, ResNo), R);
214 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
215 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
217 LHS.getValueType(), LHS, RHS,
N->getFlags());
221 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
222 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
223 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
229 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
230 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
236SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
237 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
238 "Unexpected vector type!");
239 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
241 EVT VT0 =
N->getValueType(0);
242 EVT VT1 =
N->getValueType(1);
247 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
251 unsigned OtherNo = 1 - ResNo;
252 EVT OtherVT =
N->getValueType(OtherNo);
254 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
258 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
261 return SDValue(ScalarNode, ResNo);
265 EVT VT =
N->getValueType(0).getVectorElementType();
266 unsigned NumOpers =
N->getNumOperands();
268 EVT ValueVTs[] = {VT, MVT::Other};
277 for (
unsigned i = 1; i < NumOpers; ++i) {
283 Oper = GetScalarizedVector(Oper);
294 Opers,
N->getFlags());
305 EVT ResVT =
N->getValueType(0);
306 EVT OvVT =
N->getValueType(1);
310 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
311 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
316 ScalarLHS = ElemsLHS[0];
317 ScalarRHS = ElemsRHS[0];
323 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
327 unsigned OtherNo = 1 - ResNo;
328 EVT OtherVT =
N->getValueType(OtherNo);
330 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
334 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
337 return SDValue(ScalarNode, ResNo);
342 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
343 return GetScalarizedVector(
Op);
348 if (
Op.getValueType().isVector()
349 &&
Op.getValueType().getVectorNumElements() == 1
350 && !isSimpleLegalType(
Op.getValueType()))
351 Op = GetScalarizedVector(
Op);
352 EVT NewVT =
N->getValueType(0).getVectorElementType();
357SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
358 EVT EltVT =
N->getValueType(0).getVectorElementType();
367SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
369 N->getValueType(0).getVectorElementType(),
370 N->getOperand(0),
N->getOperand(1));
376 EVT OpVT =
Op.getValueType();
380 Op = GetScalarizedVector(
Op);
387 N->getValueType(0).getVectorElementType(),
Op,
392 SDValue Op = GetScalarizedVector(
N->getOperand(0));
397SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
401 EVT EltVT =
N->getValueType(0).getVectorElementType();
402 if (
Op.getValueType() != EltVT)
409 assert(
N->isUnindexed() &&
"Indexed vector load?");
413 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
414 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
415 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
416 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
426 EVT DestVT =
N->getValueType(0).getVectorElementType();
428 EVT OpVT =
Op.getValueType();
438 Op = GetScalarizedVector(
Op);
448 EVT EltVT =
N->getValueType(0).getVectorElementType();
450 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
459 EVT OpVT =
Op.getValueType();
461 EVT EltVT =
N->getValueType(0).getVectorElementType();
464 Op = GetScalarizedVector(
Op);
470 switch (
N->getOpcode()) {
482SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
483 EVT DestVT =
N->getValueType(0).getVectorElementType();
485 EVT OpVT =
Op.getValueType();
495 Op = GetScalarizedVector(
Op);
501 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
502 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
503 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
507SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
510 EVT EltVT =
N->getValueType(0).getVectorElementType();
519 EVT OpVT =
Cond.getValueType();
532 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
546 EVT OpVT =
Cond->getOperand(0).getValueType();
553 EVT CondVT =
Cond.getValueType();
554 if (ScalarBool != VecBool) {
555 switch (ScalarBool) {
576 auto BoolVT = getSetCCResultType(CondVT);
577 if (BoolVT.bitsLT(CondVT))
582 GetScalarizedVector(
N->getOperand(2)));
586 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
588 LHS.getValueType(),
N->getOperand(0), LHS,
589 GetScalarizedVector(
N->getOperand(2)));
593 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
595 N->getOperand(0),
N->getOperand(1),
596 LHS, GetScalarizedVector(
N->getOperand(3)),
601 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
604SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
606 SDValue Arg =
N->getOperand(2).getOperand(0);
608 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
609 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
610 return GetScalarizedVector(
N->getOperand(
Op));
613SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
615 EVT SrcVT = Src.getValueType();
620 Src = GetScalarizedVector(Src);
626 EVT DstVT =
N->getValueType(0).getVectorElementType();
627 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
631 assert(
N->getValueType(0).isVector() &&
632 N->getOperand(0).getValueType().isVector() &&
633 "Operand types must be vectors");
636 EVT OpVT =
LHS.getValueType();
637 EVT NVT =
N->getValueType(0).getVectorElementType();
642 LHS = GetScalarizedVector(LHS);
643 RHS = GetScalarizedVector(RHS);
659 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
667 EVT ResultVT =
N->getValueType(0).getVectorElementType();
670 Arg = GetScalarizedVector(Arg);
683 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
690bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
695 switch (
N->getOpcode()) {
698 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
705 Res = ScalarizeVecOp_BITCAST(
N);
717 Res = ScalarizeVecOp_UnaryOp(
N);
723 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
726 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
729 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
732 Res = ScalarizeVecOp_VSELECT(
N);
735 Res = ScalarizeVecOp_VSETCC(
N);
738 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
741 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
744 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
747 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
750 Res = ScalarizeVecOp_FP_EXTEND(
N);
767 Res = ScalarizeVecOp_VECREDUCE(
N);
771 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
776 if (!Res.
getNode())
return false;
784 "Invalid operand expansion");
786 ReplaceValueWith(
SDValue(
N, 0), Res);
793 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
795 N->getValueType(0), Elt);
801 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
802 "Unexpected vector type!");
803 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
805 N->getValueType(0).getScalarType(), Elt);
813SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
814 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
815 "Unexpected vector type!");
816 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
818 {
N->getValueType(0).getScalarType(), MVT::Other },
819 {
N->getOperand(0), Elt });
829 ReplaceValueWith(
SDValue(
N, 0), Res);
834SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
836 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
837 Ops[i] = GetScalarizedVector(
N->getOperand(i));
843SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
844 EVT VT =
N->getValueType(0);
845 SDValue Res = GetScalarizedVector(
N->getOperand(0));
857 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
858 EVT VT =
N->getValueType(0);
868 assert(
N->getValueType(0).isVector() &&
869 N->getOperand(0).getValueType().isVector() &&
870 "Operand types must be vectors");
871 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
873 EVT VT =
N->getValueType(0);
874 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
875 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
877 EVT OpVT =
N->getOperand(0).getValueType();
889 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
897 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
898 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
901 if (
N->isTruncatingStore())
903 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
904 N->getBasePtr(),
N->getPointerInfo(),
905 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
906 N->getMemOperand()->getFlags(),
N->getAAInfo());
908 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
909 N->getBasePtr(),
N->getPointerInfo(),
910 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
916SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
917 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
918 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
920 N->getValueType(0).getVectorElementType(), Elt,
925SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
927 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
928 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
932 {
N->getOperand(0), Elt,
N->getOperand(2) });
941 ReplaceValueWith(
SDValue(
N, 0), Res);
948 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
950 N->getValueType(0).getVectorElementType(), Elt);
956SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
957 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
961 {
N->getOperand(0), Elt});
970 ReplaceValueWith(
SDValue(
N, 0), Res);
975 SDValue Res = GetScalarizedVector(
N->getOperand(0));
982SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
990 AccOp,
Op,
N->getFlags());
1001void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1006 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1009 switch (
N->getOpcode()) {
1012 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1024 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1039 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1042 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1046 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1049 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1051 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1052 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1055 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1058 case ISD::VP_GATHER:
1059 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1063 SplitVecRes_SETCC(
N,
Lo,
Hi);
1066 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1069 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1072 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1075 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1078 SplitVecRes_VECTOR_INTERLEAVE(
N);
1081 SplitVecRes_VAARG(
N,
Lo,
Hi);
1087 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1093 case ISD::VP_BITREVERSE:
1101 case ISD::VP_CTLZ_ZERO_UNDEF:
1103 case ISD::VP_CTTZ_ZERO_UNDEF:
1114 case ISD::VP_FFLOOR:
1119 case ISD::VP_FNEARBYINT:
1124 case ISD::VP_FP_EXTEND:
1126 case ISD::VP_FP_ROUND:
1128 case ISD::VP_FP_TO_SINT:
1130 case ISD::VP_FP_TO_UINT:
1136 case ISD::VP_LLRINT:
1138 case ISD::VP_FROUND:
1140 case ISD::VP_FROUNDEVEN:
1144 case ISD::VP_FROUNDTOZERO:
1146 case ISD::VP_SINT_TO_FP:
1148 case ISD::VP_TRUNCATE:
1150 case ISD::VP_UINT_TO_FP:
1152 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1155 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1158 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1164 case ISD::VP_SIGN_EXTEND:
1165 case ISD::VP_ZERO_EXTEND:
1166 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1180 case ISD::VP_FMINIMUM:
1182 case ISD::VP_FMAXIMUM:
1188 case ISD::OR:
case ISD::VP_OR:
1208 case ISD::VP_FCOPYSIGN:
1209 SplitVecRes_BinOp(
N,
Lo,
Hi);
1216 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1219#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1220 case ISD::STRICT_##DAGN:
1221#include "llvm/IR/ConstrainedOps.def"
1222 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1227 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1236 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1246 SplitVecRes_FIX(
N,
Lo,
Hi);
1248 case ISD::EXPERIMENTAL_VP_REVERSE:
1249 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1258void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1267 DL,
Ptr.getValueType(),
1268 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1270 Flags.setNoUnsignedWrap(
true);
1272 *ScaledOffset += IncrementSize;
1276 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1282std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1283 return SplitMask(Mask,
SDLoc(Mask));
1286std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1289 EVT MaskVT =
Mask.getValueType();
1291 GetSplitVector(Mask, MaskLo, MaskHi);
1294 return std::make_pair(MaskLo, MaskHi);
1299 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1301 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1305 unsigned Opcode =
N->getOpcode();
1306 if (
N->getNumOperands() == 2) {
1312 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1313 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1316 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1319 std::tie(EVLLo, EVLHi) =
1320 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1323 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1325 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1331 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1333 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1335 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1339 unsigned Opcode =
N->getOpcode();
1340 if (
N->getNumOperands() == 3) {
1346 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1347 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1350 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1353 std::tie(EVLLo, EVLHi) =
1354 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1357 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1359 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1364 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1366 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1370 unsigned Opcode =
N->getOpcode();
1389 switch (getTypeAction(InVT)) {
1404 GetExpandedOp(InOp,
Lo,
Hi);
1415 GetSplitVector(InOp,
Lo,
Hi);
1436 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1459 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1461 unsigned NumSubvectors =
N->getNumOperands() / 2;
1462 if (NumSubvectors == 1) {
1463 Lo =
N->getOperand(0);
1464 Hi =
N->getOperand(1);
1478void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1500 GetSplitVector(Vec,
Lo,
Hi);
1503 EVT LoVT =
Lo.getValueType();
1512 unsigned IdxVal =
Idx->getAsZExtVal();
1513 if (IdxVal + SubElems <= LoElems) {
1521 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1547 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1551 auto *
Load = cast<LoadSDNode>(
Lo);
1553 IncrementPointer(Load, LoVT, MPI, StackPtr);
1556 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1565 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1570 EVT RHSVT =
RHS.getValueType();
1573 GetSplitVector(RHS, RHSLo, RHSHi);
1590 SDValue FpValue =
N->getOperand(0);
1592 GetSplitVector(FpValue, ArgLo, ArgHi);
1605 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1609 std::tie(LoVT, HiVT) =
1620 unsigned Opcode =
N->getOpcode();
1627 GetSplitVector(N0, InLo, InHi);
1634 EVT OutLoVT, OutHiVT;
1637 assert((2 * OutNumElements) <= InNumElements &&
1638 "Illegal extend vector in reg split");
1648 for (
unsigned i = 0; i != OutNumElements; ++i)
1649 SplitHi[i] = i + OutNumElements;
1652 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1653 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1658 unsigned NumOps =
N->getNumOperands();
1672 for (
unsigned i = 1; i < NumOps; ++i) {
1677 EVT InVT =
Op.getValueType();
1682 GetSplitVector(
Op, OpLo, OpHi);
1691 EVT LoValueVTs[] = {LoVT, MVT::Other};
1692 EVT HiValueVTs[] = {HiVT, MVT::Other};
1701 Lo.getValue(1),
Hi.getValue(1));
1705 ReplaceValueWith(
SDValue(
N, 1), Chain);
1708SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1710 EVT VT =
N->getValueType(0);
1721 else if (NE > ResNE)
1725 EVT ChainVTs[] = {EltVT, MVT::Other};
1729 for (i = 0; i !=
NE; ++i) {
1731 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1732 SDValue Operand =
N->getOperand(j);
1743 Scalar.getNode()->setFlags(
N->getFlags());
1751 for (; i < ResNE; ++i)
1756 ReplaceValueWith(
SDValue(
N, 1), Chain);
1763void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1766 EVT ResVT =
N->getValueType(0);
1767 EVT OvVT =
N->getValueType(1);
1768 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1772 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1774 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1775 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1781 unsigned Opcode =
N->getOpcode();
1793 unsigned OtherNo = 1 - ResNo;
1794 EVT OtherVT =
N->getValueType(OtherNo);
1796 SetSplitVector(
SDValue(
N, OtherNo),
1802 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1806void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1812 GetSplitVector(Vec,
Lo,
Hi);
1815 unsigned IdxVal = CIdx->getZExtValue();
1816 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1817 if (IdxVal < LoNumElts) {
1819 Lo.getValueType(),
Lo, Elt,
Idx);
1829 if (CustomLowerNode(
N,
N->getValueType(0),
true))
1870 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1873 auto Load = cast<LoadSDNode>(
Lo);
1875 IncrementPointer(Load, LoVT, MPI, StackPtr);
1877 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1881 if (LoVT !=
Lo.getValueType())
1883 if (HiVT !=
Hi.getValueType())
1891 assert(
N->getValueType(0).isScalableVector() &&
1892 "Only scalable vectors are supported for STEP_VECTOR");
1915 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
1935 EVT MemoryVT =
LD->getMemoryVT();
1939 EVT LoMemVT, HiMemVT;
1946 ReplaceValueWith(
SDValue(LD, 1), NewChain);
1951 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
1955 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
1958 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
1967 ReplaceValueWith(
SDValue(LD, 1), Ch);
1972 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
1981 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
1982 Align Alignment =
LD->getOriginalAlign();
1985 EVT MemoryVT =
LD->getMemoryVT();
1987 EVT LoMemVT, HiMemVT;
1988 bool HiIsEmpty =
false;
1989 std::tie(LoMemVT, HiMemVT) =
1995 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
1998 GetSplitVector(Mask, MaskLo, MaskHi);
2000 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2005 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2014 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2023 LD->isExpandingLoad());
2029 MPI =
LD->getPointerInfo().getWithOffset(
2034 Alignment,
LD->getAAInfo(),
LD->getRanges());
2037 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2038 LD->isExpandingLoad());
2048 ReplaceValueWith(
SDValue(LD, 1), Ch);
2054 "Indexed VP strided load during type legalization!");
2056 "Unexpected indexed variable-length load offset");
2063 EVT LoMemVT, HiMemVT;
2064 bool HiIsEmpty =
false;
2065 std::tie(LoMemVT, HiMemVT) =
2071 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2074 GetSplitVector(Mask, LoMask, HiMask);
2080 std::tie(LoEVL, HiEVL) =
2118 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2129 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2142 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2151 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2154 GetSplitVector(Mask, MaskLo, MaskHi);
2156 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2160 EVT LoMemVT, HiMemVT;
2161 bool HiIsEmpty =
false;
2162 std::tie(LoMemVT, HiMemVT) =
2165 SDValue PassThruLo, PassThruHi;
2167 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2169 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2212 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2229 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2230 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2232 auto *VPSC = cast<VPGatherSDNode>(
N);
2233 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2236 EVT MemoryVT =
N->getMemoryVT();
2237 Align Alignment =
N->getOriginalAlign();
2241 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2242 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2244 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2247 EVT LoMemVT, HiMemVT;
2252 if (getTypeAction(Ops.Index.getValueType()) ==
2254 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2256 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2263 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2264 SDValue PassThru = MGT->getPassThru();
2265 SDValue PassThruLo, PassThruHi;
2268 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2270 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2275 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2277 OpsLo, MMO, IndexTy, ExtType);
2279 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2281 OpsHi, MMO, IndexTy, ExtType);
2283 auto *VPGT = cast<VPGatherSDNode>(
N);
2285 std::tie(EVLLo, EVLHi) =
2286 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2288 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2290 MMO, VPGT->getIndexType());
2292 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2294 MMO, VPGT->getIndexType());
2304 ReplaceValueWith(
SDValue(
N, 1), Ch);
2308 assert(
N->getValueType(0).isVector() &&
2309 N->getOperand(0).getValueType().isVector() &&
2310 "Operand types must be vectors");
2318 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2320 GetSplitVector(
N->getOperand(0), LL, LH);
2324 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2326 GetSplitVector(
N->getOperand(1), RL, RH);
2331 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2332 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2334 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2335 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2336 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2337 std::tie(EVLLo, EVLHi) =
2338 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2339 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2341 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2355 EVT InVT =
N->getOperand(0).getValueType();
2357 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2362 unsigned Opcode =
N->getOpcode();
2363 if (
N->getNumOperands() <= 2) {
2365 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2366 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2374 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2375 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2378 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2381 std::tie(EVLLo, EVLHi) =
2382 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2395 EVT InVT =
N->getOperand(0).getValueType();
2397 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2401 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2402 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2403 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2408void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2416 EVT InVT =
N->getOperand(0).getValueType();
2418 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2422 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2423 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2424 Lo->setFlags(
N->getFlags());
2425 Hi->setFlags(
N->getFlags());
2431 unsigned OtherNo = 1 - ResNo;
2432 EVT OtherVT =
N->getValueType(OtherNo);
2440 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2447 EVT SrcVT =
N->getOperand(0).getValueType();
2448 EVT DestVT =
N->getValueType(0);
2471 EVT SplitLoVT, SplitHiVT;
2475 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2476 N->dump(&DAG);
dbgs() <<
"\n");
2477 if (!
N->isVPOpcode()) {
2480 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2491 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2492 N->getOperand(1),
N->getOperand(2));
2497 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2500 std::tie(EVLLo, EVLHi) =
2501 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2503 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2504 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2509 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2517 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2518 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2524 return N.getResNo() == 0 &&
2528 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2533 "Expected build vector node.");
2536 for (
unsigned I = 0;
I < NewElts; ++
I) {
2541 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2543 Ops[
I] = Input1.getOperand(
Idx);
2545 if (Ops[
I].getValueType().bitsGT(EltVT))
2548 return DAG.getBuildVector(NewVT,
DL, Ops);
2556 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2560 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2562 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2571 for (
auto &
P : ShufflesIdxs) {
2572 if (
P.second.size() < 2)
2576 for (
int &
Idx : Mask) {
2579 unsigned SrcRegIdx =
Idx / NewElts;
2580 if (Inputs[SrcRegIdx].
isUndef()) {
2585 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
2588 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2593 Idx = MaskElt % NewElts +
2594 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2600 Inputs[
P.second[0]] =
P.first.first;
2601 Inputs[
P.second[1]] =
P.first.second;
2604 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2608 for (
int &
Idx : Mask) {
2611 unsigned SrcRegIdx =
Idx / NewElts;
2612 if (Inputs[SrcRegIdx].
isUndef()) {
2617 getTypeAction(Inputs[SrcRegIdx].getValueType());
2619 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2620 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2623 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2625 if (UsedSubVector.count() > 1) {
2627 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2628 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2630 if (Pairs.
empty() || Pairs.
back().size() == 2)
2632 if (UsedSubVector.test(2 *
I)) {
2633 Pairs.
back().emplace_back(
I, 0);
2635 assert(UsedSubVector.test(2 *
I + 1) &&
2636 "Expected to be used one of the subvectors.");
2637 Pairs.
back().emplace_back(
I, 1);
2640 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2642 for (
int &
Idx : Mask) {
2645 unsigned SrcRegIdx =
Idx / NewElts;
2647 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2648 return Idxs.front().first == SrcRegIdx ||
2649 Idxs.back().first == SrcRegIdx;
2651 if (It == Pairs.
end())
2653 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2654 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2657 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2658 Inputs[Idxs.front().first] = DAG.
getNode(
2660 Inputs[Idxs.front().first].getValueType(),
2661 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2662 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2671 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2672 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].getNode());
2675 if (Shuffle->getOperand(0).getValueType() != NewVT)
2678 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2679 !Shuffle->isSplat()) {
2681 }
else if (!Inputs[
I].hasOneUse() &&
2682 !Shuffle->getOperand(1).isUndef()) {
2684 for (
int &
Idx : Mask) {
2687 unsigned SrcRegIdx =
Idx / NewElts;
2690 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2695 int OpIdx = MaskElt / NewElts;
2708 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2709 if (Shuffle->getOperand(OpIdx).isUndef())
2711 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2712 if (It == std::end(Inputs))
2714 int FoundOp = std::distance(std::begin(Inputs), It);
2717 for (
int &
Idx : Mask) {
2720 unsigned SrcRegIdx =
Idx / NewElts;
2723 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2728 int MaskIdx = MaskElt / NewElts;
2729 if (OpIdx == MaskIdx)
2730 Idx = MaskElt % NewElts + FoundOp * NewElts;
2733 Op = (OpIdx + 1) % 2;
2741 for (
int &
Idx : Mask) {
2744 unsigned SrcRegIdx =
Idx / NewElts;
2747 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2748 int OpIdx = MaskElt / NewElts;
2751 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2757 TryPeekThroughShufflesInputs(OrigMask);
2759 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2763 for (
const auto &
I : Inputs) {
2765 UniqueConstantInputs.
insert(
I);
2766 else if (!
I.isUndef())
2771 if (UniqueInputs.
size() != std::size(Inputs)) {
2772 auto &&UniqueVec = UniqueInputs.
takeVector();
2773 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2774 unsigned ConstNum = UniqueConstantVec.size();
2775 for (
int &
Idx : Mask) {
2778 unsigned SrcRegIdx =
Idx / NewElts;
2779 if (Inputs[SrcRegIdx].
isUndef()) {
2783 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2784 if (It != UniqueConstantVec.end()) {
2786 NewElts * std::distance(UniqueConstantVec.begin(), It);
2787 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2790 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2791 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2793 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2794 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2796 copy(UniqueConstantVec, std::begin(Inputs));
2797 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2800 MakeUniqueInputs(OrigMask);
2802 copy(Inputs, std::begin(OrigInputs));
2808 unsigned FirstMaskIdx =
High * NewElts;
2811 assert(!Output &&
"Expected default initialized initial value.");
2812 TryPeekThroughShufflesInputs(Mask);
2813 MakeUniqueInputs(Mask);
2815 copy(Inputs, std::begin(TmpInputs));
2818 bool SecondIteration =
false;
2819 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2824 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2825 SecondIteration =
true;
2826 return SecondIteration;
2829 Mask, std::size(Inputs), std::size(Inputs),
2831 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2832 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2835 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2837 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2838 DAG.getUNDEF(NewVT), Mask);
2839 Inputs[
Idx] = Output;
2841 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2844 if (AccumulateResults(Idx1)) {
2847 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2849 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2850 Inputs[Idx2], Mask);
2854 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2856 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2857 TmpInputs[Idx2], Mask);
2859 Inputs[Idx1] = Output;
2861 copy(OrigInputs, std::begin(Inputs));
2866 EVT OVT =
N->getValueType(0);
2873 const Align Alignment =
2874 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2876 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2877 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2878 Chain =
Hi.getValue(1);
2882 ReplaceValueWith(
SDValue(
N, 1), Chain);
2887 EVT DstVTLo, DstVTHi;
2888 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2892 EVT SrcVT =
N->getOperand(0).getValueType();
2894 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2896 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2898 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2899 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2905 GetSplitVector(
N->getOperand(0), InLo, InHi);
2914 EVT VT =
N->getValueType(0);
2918 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
2922 DAG.getVectorIdxConstant(0,
DL));
2930 EVT VT =
N->getValueType(0);
2937 Align Alignment = DAG.getReducedAlign(VT,
false);
2943 auto &MF = DAG.getMachineFunction();
2957 DAG.getConstant(1,
DL, PtrVT));
2959 DAG.getConstant(EltWidth,
DL, PtrVT));
2961 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
2963 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
2964 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
2965 DAG.getUNDEF(PtrVT), Stride, TrueMask,
2968 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
2970 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
2972 DAG.getVectorIdxConstant(0,
DL));
2978void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
2980 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2981 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2982 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2986 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
2988 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
2994void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
2995 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2996 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2997 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3001 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3003 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3005 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3006 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3017bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3022 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3025 switch (
N->getOpcode()) {
3028 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3036 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3042 case ISD::VP_TRUNCATE:
3044 Res = SplitVecOp_TruncateHelper(
N);
3047 case ISD::VP_FP_ROUND:
3051 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3054 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3056 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3057 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3060 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3063 case ISD::VP_SCATTER:
3064 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3067 case ISD::VP_GATHER:
3068 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3071 Res = SplitVecOp_VSELECT(
N, OpNo);
3077 case ISD::VP_SINT_TO_FP:
3078 case ISD::VP_UINT_TO_FP:
3079 if (
N->getValueType(0).bitsLT(
3080 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3081 Res = SplitVecOp_TruncateHelper(
N);
3083 Res = SplitVecOp_UnaryOp(
N);
3087 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3091 case ISD::VP_FP_TO_SINT:
3092 case ISD::VP_FP_TO_UINT:
3103 Res = SplitVecOp_UnaryOp(
N);
3106 Res = SplitVecOp_FPOpDifferentTypes(
N);
3112 Res = SplitVecOp_ExtVecInRegOp(
N);
3130 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3134 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3136 case ISD::VP_REDUCE_FADD:
3137 case ISD::VP_REDUCE_SEQ_FADD:
3138 case ISD::VP_REDUCE_FMUL:
3139 case ISD::VP_REDUCE_SEQ_FMUL:
3140 case ISD::VP_REDUCE_ADD:
3141 case ISD::VP_REDUCE_MUL:
3142 case ISD::VP_REDUCE_AND:
3143 case ISD::VP_REDUCE_OR:
3144 case ISD::VP_REDUCE_XOR:
3145 case ISD::VP_REDUCE_SMAX:
3146 case ISD::VP_REDUCE_SMIN:
3147 case ISD::VP_REDUCE_UMAX:
3148 case ISD::VP_REDUCE_UMIN:
3149 case ISD::VP_REDUCE_FMAX:
3150 case ISD::VP_REDUCE_FMIN:
3151 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3153 case ISD::VP_CTTZ_ELTS:
3154 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3155 Res = SplitVecOp_VP_CttzElements(
N);
3160 if (!Res.
getNode())
return false;
3167 if (
N->isStrictFPOpcode())
3169 "Invalid operand expansion");
3172 "Invalid operand expansion");
3174 ReplaceValueWith(
SDValue(
N, 0), Res);
3178SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3181 assert(OpNo == 0 &&
"Illegal operand must be mask");
3188 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3191 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3192 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3193 "Lo and Hi have differing types");
3196 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3197 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3199 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3200 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3201 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3202 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3212SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3213 EVT ResVT =
N->getValueType(0);
3217 SDValue VecOp =
N->getOperand(OpNo);
3219 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3220 GetSplitVector(VecOp,
Lo,
Hi);
3222 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3228 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3232 EVT ResVT =
N->getValueType(0);
3241 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3242 GetSplitVector(VecOp,
Lo,
Hi);
3244 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3250 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3253SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3254 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3255 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3257 unsigned Opc =
N->getOpcode();
3258 EVT ResVT =
N->getValueType(0);
3262 SDValue VecOp =
N->getOperand(OpNo);
3264 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3265 GetSplitVector(VecOp,
Lo,
Hi);
3268 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3271 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3276 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3277 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3282 EVT ResVT =
N->getValueType(0);
3285 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3286 EVT InVT =
Lo.getValueType();
3291 if (
N->isStrictFPOpcode()) {
3292 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3293 { N->getOperand(0), Lo });
3294 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3295 { N->getOperand(0), Hi });
3304 ReplaceValueWith(
SDValue(
N, 1), Ch);
3305 }
else if (
N->getNumOperands() == 3) {
3306 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3307 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3308 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3309 std::tie(EVLLo, EVLHi) =
3310 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3311 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3312 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3314 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3315 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3325 EVT ResVT =
N->getValueType(0);
3327 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3331 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3337 Lo = BitConvertToInteger(
Lo);
3338 Hi = BitConvertToInteger(
Hi);
3340 if (DAG.getDataLayout().isBigEndian())
3348 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3350 EVT ResVT =
N->getValueType(0);
3358 GetSplitVector(SubVec,
Lo,
Hi);
3361 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3367 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3369 return SecondInsertion;
3372SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3374 EVT SubVT =
N->getValueType(0);
3379 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3381 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3384 if (IdxVal < LoEltsMin) {
3386 "Extracted subvector crosses vector split!");
3389 N->getOperand(0).getValueType().isScalableVector())
3391 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3396 "Extracting scalable subvector from fixed-width unsupported");
3404 "subvector from a scalable predicate vector");
3410 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3412 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3413 auto &MF = DAG.getMachineFunction();
3417 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3424 SubVT, dl, Store, StackPtr,
3428SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3437 GetSplitVector(Vec,
Lo,
Hi);
3439 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3441 if (IdxVal < LoElts)
3445 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3446 Idx.getValueType())), 0);
3450 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3466 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3468 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3469 auto &MF = DAG.getMachineFunction();
3472 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3480 if (
N->getValueType(0).bitsLT(EltVT)) {
3481 SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
3483 return DAG.getZExtOrTrunc(Load, dl,
N->getValueType(0));
3486 return DAG.getExtLoad(
3497 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3505 SplitVecRes_Gather(
N,
Lo,
Hi);
3508 ReplaceValueWith(
SDValue(
N, 0), Res);
3513 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3517 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3519 SDValue EVL =
N->getVectorLength();
3521 Align Alignment =
N->getOriginalAlign();
3527 GetSplitVector(
Data, DataLo, DataHi);
3529 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3534 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3537 GetSplitVector(Mask, MaskLo, MaskHi);
3539 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3542 EVT MemoryVT =
N->getMemoryVT();
3543 EVT LoMemVT, HiMemVT;
3544 bool HiIsEmpty =
false;
3545 std::tie(LoMemVT, HiMemVT) =
3546 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3550 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3558 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3559 N->getAddressingMode(),
N->isTruncatingStore(),
3560 N->isCompressingStore());
3567 N->isCompressingStore());
3575 MPI =
N->getPointerInfo().getWithOffset(
3578 MMO = DAG.getMachineFunction().getMachineMemOperand(
3580 Alignment,
N->getAAInfo(),
N->getRanges());
3582 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3583 N->getAddressingMode(),
N->isTruncatingStore(),
3584 N->isCompressingStore());
3593 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3594 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3601 GetSplitVector(
Data, LoData, HiData);
3603 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3605 EVT LoMemVT, HiMemVT;
3606 bool HiIsEmpty =
false;
3607 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3613 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3614 else if (getTypeAction(
Mask.getValueType()) ==
3616 GetSplitVector(Mask, LoMask, HiMask);
3618 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3621 std::tie(LoEVL, HiEVL) =
3622 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3626 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3627 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3628 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3639 EVT PtrVT =
N->getBasePtr().getValueType();
3642 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3645 Align Alignment =
N->getOriginalAlign();
3653 Alignment,
N->getAAInfo(),
N->getRanges());
3656 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3657 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3658 N->isCompressingStore());
3667 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3671 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3674 Align Alignment =
N->getOriginalAlign();
3680 GetSplitVector(
Data, DataLo, DataHi);
3682 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3687 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3690 GetSplitVector(Mask, MaskLo, MaskHi);
3692 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3695 EVT MemoryVT =
N->getMemoryVT();
3696 EVT LoMemVT, HiMemVT;
3697 bool HiIsEmpty =
false;
3698 std::tie(LoMemVT, HiMemVT) =
3699 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3707 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3708 N->getAddressingMode(),
N->isTruncatingStore(),
3709 N->isCompressingStore());
3718 N->isCompressingStore());
3726 MPI =
N->getPointerInfo().getWithOffset(
3729 MMO = DAG.getMachineFunction().getMachineMemOperand(
3731 Alignment,
N->getAAInfo(),
N->getRanges());
3733 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3734 N->getAddressingMode(),
N->isTruncatingStore(),
3735 N->isCompressingStore());
3748 EVT MemoryVT =
N->getMemoryVT();
3749 Align Alignment =
N->getOriginalAlign();
3757 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3758 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3761 auto *VPSC = cast<VPScatterSDNode>(
N);
3762 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3767 EVT LoMemVT, HiMemVT;
3768 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3773 GetSplitVector(Ops.Data, DataLo, DataHi);
3775 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3779 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3780 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3782 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3786 if (getTypeAction(Ops.Index.getValueType()) ==
3788 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3790 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3798 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3799 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3801 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3802 MSC->getIndexType(), MSC->isTruncatingStore());
3807 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3808 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3809 MMO, MSC->getIndexType(),
3810 MSC->isTruncatingStore());
3812 auto *VPSC = cast<VPScatterSDNode>(
N);
3814 std::tie(EVLLo, EVLHi) =
3815 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3817 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3818 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3819 VPSC->getIndexType());
3824 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3825 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3826 VPSC->getIndexType());
3830 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3831 assert(OpNo == 1 &&
"Can only split the stored value");
3834 bool isTruncating =
N->isTruncatingStore();
3837 EVT MemoryVT =
N->getMemoryVT();
3838 Align Alignment =
N->getOriginalAlign();
3842 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3844 EVT LoMemVT, HiMemVT;
3845 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3852 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3853 Alignment, MMOFlags, AAInfo);
3855 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3859 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3862 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3863 HiMemVT, Alignment, MMOFlags, AAInfo);
3865 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3879 EVT EltVT =
N->getValueType(0).getVectorElementType();
3881 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3884 DAG.getVectorIdxConstant(i,
DL)));
3888 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3909 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3910 SDValue InVec =
N->getOperand(OpNo);
3912 EVT OutVT =
N->getValueType(0);
3920 EVT LoOutVT, HiOutVT;
3921 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
3922 assert(LoOutVT == HiOutVT &&
"Unequal split?");
3927 if (isTypeLegal(LoOutVT) ||
3928 InElementSize <= OutElementSize * 2)
3929 return SplitVecOp_UnaryOp(
N);
3938 return SplitVecOp_UnaryOp(
N);
3942 GetSplitVector(InVec, InLoVec, InHiVec);
3948 EVT HalfElementVT = IsFloat ?
3950 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
3957 if (
N->isStrictFPOpcode()) {
3958 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3959 {N->getOperand(0), InLoVec});
3960 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3961 {N->getOperand(0), InHiVec});
3967 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
3968 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
3980 if (
N->isStrictFPOpcode()) {
3984 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
3992 DAG.getTargetConstant(
3998 assert(
N->getValueType(0).isVector() &&
3999 N->getOperand(0).getValueType().isVector() &&
4000 "Operand types must be vectors");
4002 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4004 GetSplitVector(
N->getOperand(0), Lo0, Hi0);
4005 GetSplitVector(
N->getOperand(1), Lo1, Hi1);
4016 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4017 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4018 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4019 std::tie(EVLLo, EVLHi) =
4020 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4021 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4022 N->getOperand(2), MaskLo, EVLLo);
4023 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4024 N->getOperand(2), MaskHi, EVLHi);
4028 EVT OpVT =
N->getOperand(0).getValueType();
4031 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4037 EVT ResVT =
N->getValueType(0);
4040 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4041 EVT InVT =
Lo.getValueType();
4046 if (
N->isStrictFPOpcode()) {
4047 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4048 { N->getOperand(0), Lo, N->getOperand(2) });
4049 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4050 { N->getOperand(0), Hi, N->getOperand(2) });
4054 Lo.getValue(1),
Hi.getValue(1));
4055 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4056 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4057 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4058 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4059 std::tie(EVLLo, EVLHi) =
4060 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4061 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4062 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4076SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4079 EVT LHSLoVT, LHSHiVT;
4080 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4082 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4083 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4086 std::tie(LHSLo, LHSHi) =
4087 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4090 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4092 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4093 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4099 EVT ResVT =
N->getValueType(0);
4102 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4103 EVT InVT =
Lo.getValueType();
4109 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4110 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4117 EVT ResVT =
N->getValueType(0);
4121 GetSplitVector(VecOp,
Lo,
Hi);
4123 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4124 auto [EVLLo, EVLHi] =
4126 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4132 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4134 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4135 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4142void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4143 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4146 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4151 auto unrollExpandedOp = [&]() {
4156 EVT VT =
N->getValueType(0);
4166 switch (
N->getOpcode()) {
4169 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4177 Res = WidenVecRes_ADDRSPACECAST(
N);
4184 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4188 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4192 Res = WidenVecRes_ScalarOp(
N);
4197 case ISD::VP_SELECT:
4199 Res = WidenVecRes_Select(
N);
4203 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4204 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4206 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4209 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4211 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4212 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4215 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4218 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4220 case ISD::VP_GATHER:
4221 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4224 Res = WidenVecRes_VECTOR_REVERSE(
N);
4232 case ISD::OR:
case ISD::VP_OR:
4241 case ISD::VP_FMINIMUM:
4243 case ISD::VP_FMAXIMUM:
4274 case ISD::VP_FCOPYSIGN:
4275 Res = WidenVecRes_Binary(
N);
4280 if (unrollExpandedOp())
4295 Res = WidenVecRes_BinaryCanTrap(
N);
4304 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4307#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4308 case ISD::STRICT_##DAGN:
4309#include "llvm/IR/ConstrainedOps.def"
4310 Res = WidenVecRes_StrictFP(
N);
4319 Res = WidenVecRes_OverflowOp(
N, ResNo);
4323 Res = WidenVecRes_FCOPYSIGN(
N);
4328 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4333 if (!unrollExpandedOp())
4334 Res = WidenVecRes_ExpOp(
N);
4340 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4345 case ISD::VP_FP_EXTEND:
4347 case ISD::VP_FP_ROUND:
4349 case ISD::VP_FP_TO_SINT:
4351 case ISD::VP_FP_TO_UINT:
4353 case ISD::VP_SIGN_EXTEND:
4355 case ISD::VP_SINT_TO_FP:
4356 case ISD::VP_TRUNCATE:
4359 case ISD::VP_UINT_TO_FP:
4361 case ISD::VP_ZERO_EXTEND:
4362 Res = WidenVecRes_Convert(
N);
4367 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4373 case ISD::VP_LLRINT:
4374 Res = WidenVecRes_XRINT(
N);
4394 if (unrollExpandedOp())
4404 case ISD::VP_BITREVERSE:
4410 case ISD::VP_CTLZ_ZERO_UNDEF:
4416 case ISD::VP_CTTZ_ZERO_UNDEF:
4421 case ISD::VP_FFLOOR:
4423 case ISD::VP_FNEARBYINT:
4424 case ISD::VP_FROUND:
4425 case ISD::VP_FROUNDEVEN:
4426 case ISD::VP_FROUNDTOZERO:
4430 Res = WidenVecRes_Unary(
N);
4437 Res = WidenVecRes_Ternary(
N);
4443 SetWidenedVector(
SDValue(
N, ResNo), Res);
4450 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4451 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4452 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4453 if (
N->getNumOperands() == 3)
4454 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4456 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4457 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4461 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4462 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4469 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4470 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4471 if (
N->getNumOperands() == 2)
4472 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4475 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4476 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4480 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4481 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4484SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4488 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4489 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4491 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4500 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4503 if (ConcatEnd == 1) {
4504 VT = ConcatOps[0].getValueType();
4506 return ConcatOps[0];
4509 SDLoc dl(ConcatOps[0]);
4516 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4517 int Idx = ConcatEnd - 1;
4518 VT = ConcatOps[
Idx--].getValueType();
4519 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4532 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4533 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4537 ConcatOps[
Idx+1] = VecOp;
4538 ConcatEnd =
Idx + 2;
4544 unsigned RealVals = ConcatEnd -
Idx - 1;
4545 unsigned SubConcatEnd = 0;
4546 unsigned SubConcatIdx =
Idx + 1;
4547 while (SubConcatEnd < RealVals)
4548 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4549 while (SubConcatEnd < OpsToConcat)
4550 SubConcatOps[SubConcatEnd++] = undefVec;
4552 NextVT, SubConcatOps);
4553 ConcatEnd = SubConcatIdx + 1;
4558 if (ConcatEnd == 1) {
4559 VT = ConcatOps[0].getValueType();
4561 return ConcatOps[0];
4566 if (NumOps != ConcatEnd ) {
4568 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4569 ConcatOps[j] = UndefVal;
4577 unsigned Opcode =
N->getOpcode();
4585 NumElts = NumElts / 2;
4589 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4591 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4592 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4593 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4605 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4606 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4607 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4610 unsigned ConcatEnd = 0;
4618 while (CurNumElts != 0) {
4619 while (CurNumElts >= NumElts) {
4621 DAG.getVectorIdxConstant(
Idx, dl));
4623 DAG.getVectorIdxConstant(
Idx, dl));
4624 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4626 CurNumElts -= NumElts;
4629 NumElts = NumElts / 2;
4634 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4636 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4638 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4639 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4650 switch (
N->getOpcode()) {
4653 return WidenVecRes_STRICT_FSETCC(
N);
4660 return WidenVecRes_Convert_StrictFP(
N);
4666 unsigned NumOpers =
N->getNumOperands();
4667 unsigned Opcode =
N->getOpcode();
4674 NumElts = NumElts / 2;
4685 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4689 unsigned ConcatEnd = 0;
4696 for (
unsigned i = 1; i < NumOpers; ++i) {
4702 Oper = GetWidenedVector(Oper);
4708 DAG.getUNDEF(WideOpVT), Oper,
4709 DAG.getVectorIdxConstant(0, dl));
4721 while (CurNumElts != 0) {
4722 while (CurNumElts >= NumElts) {
4725 for (
unsigned i = 0; i < NumOpers; ++i) {
4728 EVT OpVT =
Op.getValueType();
4734 DAG.getVectorIdxConstant(
Idx, dl));
4740 EVT OperVT[] = {VT, MVT::Other};
4742 ConcatOps[ConcatEnd++] = Oper;
4745 CurNumElts -= NumElts;
4748 NumElts = NumElts / 2;
4753 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4756 for (
unsigned i = 0; i < NumOpers; ++i) {
4759 EVT OpVT =
Op.getValueType();
4763 DAG.getVectorIdxConstant(
Idx, dl));
4768 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4770 ConcatOps[ConcatEnd++] = Oper;
4779 if (Chains.
size() == 1)
4780 NewChain = Chains[0];
4783 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4788SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4790 EVT ResVT =
N->getValueType(0);
4791 EVT OvVT =
N->getValueType(1);
4792 EVT WideResVT, WideOvVT;
4802 WideLHS = GetWidenedVector(
N->getOperand(0));
4803 WideRHS = GetWidenedVector(
N->getOperand(1));
4813 N->getOperand(0), Zero);
4816 N->getOperand(1), Zero);
4819 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4820 SDNode *WideNode = DAG.getNode(
4821 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4824 unsigned OtherNo = 1 - ResNo;
4825 EVT OtherVT =
N->getValueType(OtherNo);
4832 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4835 return SDValue(WideNode, ResNo);
4848 unsigned Opcode =
N->getOpcode();
4857 InOp = ZExtPromotedInteger(InOp);
4868 InOp = GetWidenedVector(
N->getOperand(0));
4871 if (InVTEC == WidenEC) {
4872 if (
N->getNumOperands() == 1)
4873 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4874 if (
N->getNumOperands() == 3) {
4875 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4878 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
4880 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
4903 unsigned NumConcat =
4908 if (
N->getNumOperands() == 1)
4909 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
4910 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
4915 DAG.getVectorIdxConstant(0,
DL));
4917 if (
N->getNumOperands() == 1)
4918 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
4919 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
4928 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4929 for (
unsigned i=0; i < MinElts; ++i) {
4931 DAG.getVectorIdxConstant(i,
DL));
4932 if (
N->getNumOperands() == 1)
4933 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
4935 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
4938 return DAG.getBuildVector(WidenVT,
DL, Ops);
4947 EVT SrcVT = Src.getValueType();
4951 Src = GetWidenedVector(Src);
4952 SrcVT = Src.getValueType();
4959 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
4968 EVT SrcVT = Src.getValueType();
4972 Src = GetWidenedVector(Src);
4973 SrcVT = Src.getValueType();
4980 if (
N->getNumOperands() == 1)
4981 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
4983 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
4984 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4988 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
4991SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5002 unsigned Opcode =
N->getOpcode();
5008 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5013 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5014 for (
unsigned i=0; i < MinElts; ++i) {
5016 DAG.getVectorIdxConstant(i,
DL));
5017 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5021 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5023 return DAG.getBuildVector(WidenVT,
DL, Ops);
5026SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5027 unsigned Opcode =
N->getOpcode();
5040 InOp = GetWidenedVector(InOp);
5047 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5054 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5056 DAG.getVectorIdxConstant(i,
DL));
5073 while (Ops.
size() != WidenNumElts)
5076 return DAG.getBuildVector(WidenVT,
DL, Ops);
5082 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5083 return WidenVecRes_BinaryCanTrap(
N);
5093SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5094 SDValue FpValue =
N->getOperand(0);
5098 SDValue Arg = GetWidenedVector(FpValue);
5099 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5105 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5107 SDValue ExpOp =
RHS.getValueType().isVector() ? GetWidenedVector(RHS) :
RHS;
5109 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5115 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5116 if (
N->getNumOperands() == 1)
5117 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5119 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5120 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5124 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5125 {InOp,
Mask,
N->getOperand(2)});
5131 cast<VTSDNode>(
N->getOperand(1))->getVT()
5132 .getVectorElementType(),
5134 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5135 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5136 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5139SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5140 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5141 return GetWidenedVector(WidenVec);
5146 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5147 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5149 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5150 AddrSpaceCastN->getSrcAddressSpace(),
5151 AddrSpaceCastN->getDestAddressSpace());
5157 EVT VT =
N->getValueType(0);
5161 switch (getTypeAction(InVT)) {
5175 SDValue NInOp = GetPromotedInteger(InOp);
5177 if (WidenVT.
bitsEq(NInVT)) {
5180 if (DAG.getDataLayout().isBigEndian()) {
5185 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5204 InOp = GetWidenedVector(InOp);
5206 if (WidenVT.
bitsEq(InVT))
5216 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5221 unsigned NewNumParts = WidenSize / InSize;
5234 EVT OrigInVT =
N->getOperand(0).getValueType();
5247 if (WidenSize % InSize == 0) {
5254 DAG.ExtractVectorElements(InOp, Ops);
5255 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5267 return CreateStackStoreLoad(InOp, WidenVT);
5273 EVT VT =
N->getValueType(0);
5277 EVT EltVT =
N->getOperand(0).getValueType();
5284 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5285 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5287 return DAG.getBuildVector(WidenVT, dl, NewOps);
5291 EVT InVT =
N->getOperand(0).getValueType();
5294 unsigned NumOperands =
N->getNumOperands();
5296 bool InputWidened =
false;
5300 if (WidenNumElts % NumInElts == 0) {
5302 unsigned NumConcat = WidenNumElts / NumInElts;
5303 SDValue UndefVal = DAG.getUNDEF(InVT);
5305 for (
unsigned i=0; i < NumOperands; ++i)
5306 Ops[i] =
N->getOperand(i);
5307 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5312 InputWidened =
true;
5316 for (i=1; i < NumOperands; ++i)
5317 if (!
N->getOperand(i).isUndef())
5320 if (i == NumOperands)
5323 return GetWidenedVector(
N->getOperand(0));
5325 if (NumOperands == 2) {
5327 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5333 for (
unsigned i = 0; i < NumInElts; ++i) {
5335 MaskOps[i + NumInElts] = i + WidenNumElts;
5337 return DAG.getVectorShuffle(WidenVT, dl,
5338 GetWidenedVector(
N->getOperand(0)),
5339 GetWidenedVector(
N->getOperand(1)),
5346 "Cannot use build vectors to widen CONCAT_VECTOR result");
5354 for (
unsigned i=0; i < NumOperands; ++i) {
5357 InOp = GetWidenedVector(InOp);
5358 for (
unsigned j = 0;
j < NumInElts; ++
j)
5360 DAG.getVectorIdxConstant(j, dl));
5362 SDValue UndefVal = DAG.getUNDEF(EltVT);
5363 for (;
Idx < WidenNumElts; ++
Idx)
5364 Ops[
Idx] = UndefVal;
5365 return DAG.getBuildVector(WidenVT, dl, Ops);
5368SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5369 EVT VT =
N->getValueType(0);
5371 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5378SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5379 EVT VT =
N->getValueType(0);
5386 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5388 InOp = GetWidenedVector(InOp);
5394 if (IdxVal == 0 && InVT == WidenVT)
5401 assert(IdxVal % VTNumElts == 0 &&
5402 "Expected Idx to be a multiple of subvector minimum vector length");
5403 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5416 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5417 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5418 "down type's element count");
5425 for (;
I < VTNumElts / GCD; ++
I)
5428 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5429 for (;
I < WidenNumElts / GCD; ++
I)
5436 "EXTRACT_SUBVECTOR for scalable vectors");
5443 for (i = 0; i < VTNumElts; ++i)
5445 DAG.getVectorIdxConstant(IdxVal + i, dl));
5447 SDValue UndefVal = DAG.getUNDEF(EltVT);
5448 for (; i < WidenNumElts; ++i)
5450 return DAG.getBuildVector(WidenVT, dl, Ops);
5461SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5462 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5465 N->getOperand(1),
N->getOperand(2));
5478 if (!
LD->getMemoryVT().isByteSized()) {
5482 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5491 EVT LdVT =
LD->getMemoryVT();
5502 const auto *MMO =
LD->getMemOperand();
5504 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5518 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5520 Result = GenWidenVectorLoads(LdChain, LD);
5527 if (LdChain.
size() == 1)
5528 NewChain = LdChain[0];
5534 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5545 SDValue EVL =
N->getVectorLength();
5552 "Unable to widen binary VP op");
5553 Mask = GetWidenedVector(Mask);
5554 assert(
Mask.getValueType().getVectorElementCount() ==
5557 "Unable to widen vector load");
5560 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5561 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5562 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5576 "Unable to widen VP strided load");
5577 Mask = GetWidenedVector(Mask);
5580 assert(
Mask.getValueType().getVectorElementCount() ==
5582 "Data and mask vectors should have the same number of elements");
5584 SDValue Res = DAG.getStridedLoadVP(
5585 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5586 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5587 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5588 N->isExpandingLoad());
5600 EVT MaskVT =
Mask.getValueType();
5601 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5609 Mask = ModifyToType(Mask, WideMaskVT,
true);
5611 SDValue Res = DAG.getMaskedLoad(
5612 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5613 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5614 ExtType,
N->isExpandingLoad());
5625 EVT MaskVT =
Mask.getValueType();
5626 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5635 Mask = ModifyToType(Mask, WideMaskVT,
true);
5640 Index.getValueType().getScalarType(),
5648 N->getMemoryVT().getScalarType(), NumElts);
5649 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5650 WideMemVT, dl, Ops,
N->getMemOperand(),
5651 N->getIndexType(),
N->getExtensionType());
5668 N->getMemoryVT().getScalarType(), WideEC);
5669 Mask = GetWidenedMask(Mask, WideEC);
5672 Mask,
N->getVectorLength()};
5673 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5674 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5684 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5712 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5713 return N->getOperand(OpNo).getValueType();
5721 N =
N.getOperand(0);
5723 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5724 if (!
N->getOperand(i)->isUndef())
5726 N =
N.getOperand(0);
5730 N =
N.getOperand(0);
5732 N =
N.getOperand(0);
5759 { MaskVT, MVT::Other }, Ops);
5760 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5770 if (MaskScalarBits < ToMaskScalBits) {
5774 }
else if (MaskScalarBits > ToMaskScalBits) {
5780 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5782 "Mask should have the right element size by now.");
5785 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5787 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5792 EVT SubVT =
Mask->getValueType(0);
5798 assert((
Mask->getValueType(0) == ToMaskVT) &&
5799 "A mask of ToMaskVT should have been produced by now.");
5820 EVT CondVT =
Cond->getValueType(0);
5824 EVT VSelVT =
N->getValueType(0);
5836 EVT FinalVT = VSelVT;
5848 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
5866 EVT ToMaskVT = VSelVT;
5873 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5889 if (ScalarBits0 != ScalarBits1) {
5890 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
5891 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
5903 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
5904 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
5908 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5921 unsigned Opcode =
N->getOpcode();
5923 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
5924 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5925 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5927 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
5933 Cond1 = GetWidenedVector(Cond1);
5941 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
5942 SDValue Res = ModifyToType(SplitSelect, WidenVT);
5947 Cond1 = ModifyToType(Cond1, CondWidenVT);
5950 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5951 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5953 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
5954 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
5956 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
5960 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
5961 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
5964 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
5969 return DAG.getUNDEF(WidenVT);
5973 EVT VT =
N->getValueType(0);
5980 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5981 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
5985 for (
unsigned i = 0; i != NumElts; ++i) {
5986 int Idx =
N->getMaskElt(i);
5987 if (
Idx < (
int)NumElts)
5992 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
5994 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
5998 EVT VT =
N->getValueType(0);
6003 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6009 unsigned IdxVal = WidenNumElts - VTNumElts;
6022 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6025 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6026 "down type's element count");
6029 for (; i < VTNumElts / GCD; ++i)
6032 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6033 for (; i < WidenNumElts / GCD; ++i)
6042 for (
unsigned i = 0; i != VTNumElts; ++i) {
6043 Mask.push_back(IdxVal + i);
6045 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6048 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6053 assert(
N->getValueType(0).isVector() &&
6054 N->getOperand(0).getValueType().isVector() &&
6055 "Operands must be vectors");
6069 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6070 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6077 InOp1 = GetWidenedVector(InOp1);
6078 InOp2 = GetWidenedVector(InOp2);
6080 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6081 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6088 "Input not widened to expected type!");
6090 if (
N->getOpcode() == ISD::VP_SETCC) {
6093 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6094 N->getOperand(2), Mask,
N->getOperand(4));
6101 assert(
N->getValueType(0).isVector() &&
6102 N->getOperand(1).getValueType().isVector() &&
6103 "Operands must be vectors");
6104 EVT VT =
N->getValueType(0);
6115 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6120 for (
unsigned i = 0; i != NumElts; ++i) {
6122 DAG.getVectorIdxConstant(i, dl));
6124 DAG.getVectorIdxConstant(i, dl));
6126 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6127 {Chain, LHSElem, RHSElem, CC});
6128 Chains[i] = Scalars[i].getValue(1);
6129 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6130 DAG.getBoolConstant(
true, dl, EltVT, VT),
6131 DAG.getBoolConstant(
false, dl, EltVT, VT));
6135 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6137 return DAG.getBuildVector(WidenVT, dl, Scalars);
6143bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6144 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6148 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6151 switch (
N->getOpcode()) {
6154 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6165 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6166 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6167 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6168 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6173 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6175 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6176 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6178 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6179 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6187 Res = WidenVecOp_UnrollVectorOp(
N);
6194 Res = WidenVecOp_EXTEND(
N);
6210 Res = WidenVecOp_Convert(
N);
6215 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6233 Res = WidenVecOp_VECREDUCE(
N);
6237 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6239 case ISD::VP_REDUCE_FADD:
6240 case ISD::VP_REDUCE_SEQ_FADD:
6241 case ISD::VP_REDUCE_FMUL:
6242 case ISD::VP_REDUCE_SEQ_FMUL:
6243 case ISD::VP_REDUCE_ADD:
6244 case ISD::VP_REDUCE_MUL:
6245 case ISD::VP_REDUCE_AND:
6246 case ISD::VP_REDUCE_OR:
6247 case ISD::VP_REDUCE_XOR:
6248 case ISD::VP_REDUCE_SMAX:
6249 case ISD::VP_REDUCE_SMIN:
6250 case ISD::VP_REDUCE_UMAX:
6251 case ISD::VP_REDUCE_UMIN:
6252 case ISD::VP_REDUCE_FMAX:
6253 case ISD::VP_REDUCE_FMIN:
6254 Res = WidenVecOp_VP_REDUCE(
N);
6256 case ISD::VP_CTTZ_ELTS:
6257 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6258 Res = WidenVecOp_VP_CttzElements(
N);
6263 if (!Res.
getNode())
return false;
6271 if (
N->isStrictFPOpcode())
6273 "Invalid operand expansion");
6276 "Invalid operand expansion");
6278 ReplaceValueWith(
SDValue(
N, 0), Res);
6284 EVT VT =
N->getValueType(0);
6289 "Unexpected type action");
6290 InOp = GetWidenedVector(InOp);
6293 "Input wasn't widened!");
6304 FixedEltVT == InEltVT) {
6306 "Not enough elements in the fixed type for the operand!");
6308 "We can't have the same type as we started with!");
6311 DAG.getUNDEF(FixedVT), InOp,
6312 DAG.getVectorIdxConstant(0,
DL));
6315 DAG.getVectorIdxConstant(0,
DL));
6324 return WidenVecOp_Convert(
N);
6329 switch (
N->getOpcode()) {
6345 return DAG.UnrollVectorOp(
N);
6350 EVT ResultVT =
N->getValueType(0);
6352 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6361 {WideArg,
Test},
N->getFlags());
6368 DAG.getVectorIdxConstant(0,
DL));
6370 EVT OpVT =
N->getOperand(0).getValueType();
6373 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6378 EVT VT =
N->getValueType(0);
6381 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6384 "Unexpected type action");
6385 InOp = GetWidenedVector(InOp);
6387 unsigned Opcode =
N->getOpcode();
6393 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6395 if (
N->isStrictFPOpcode()) {
6397 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6398 {
N->getOperand(0), InOp,
N->getOperand(2) });
6400 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6401 {
N->getOperand(0), InOp });
6407 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6409 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6412 DAG.getVectorIdxConstant(0, dl));
6420 if (
N->isStrictFPOpcode()) {
6423 for (
unsigned i=0; i < NumElts; ++i) {
6425 DAG.getVectorIdxConstant(i, dl));
6426 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6430 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6432 for (
unsigned i = 0; i < NumElts; ++i)
6433 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6435 InOp, DAG.getVectorIdxConstant(i, dl)));
6438 return DAG.getBuildVector(VT, dl, Ops);
6442 EVT DstVT =
N->getValueType(0);
6443 SDValue Src = GetWidenedVector(
N->getOperand(0));
6444 EVT SrcVT = Src.getValueType();
6453 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6456 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6460 return DAG.UnrollVectorOp(
N);
6464 EVT VT =
N->getValueType(0);
6465 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6473 if (!VT.
isVector() && VT != MVT::x86mmx &&
6480 DAG.getVectorIdxConstant(0, dl));
6494 .divideCoefficientBy(EltSize);
6499 DAG.getVectorIdxConstant(0, dl));
6504 return CreateStackStoreLoad(InOp, VT);
6508 EVT VT =
N->getValueType(0);
6510 EVT InVT =
N->getOperand(0).getValueType();
6515 unsigned NumOperands =
N->getNumOperands();
6518 for (i = 1; i < NumOperands; ++i)
6519 if (!
N->getOperand(i).isUndef())
6522 if (i == NumOperands)
6523 return GetWidenedVector(
N->getOperand(0));
6533 for (
unsigned i=0; i < NumOperands; ++i) {
6537 "Unexpected type action");
6538 InOp = GetWidenedVector(InOp);
6539 for (
unsigned j = 0;
j < NumInElts; ++
j)
6541 DAG.getVectorIdxConstant(j, dl));
6543 return DAG.getBuildVector(VT, dl, Ops);
6546SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6547 EVT VT =
N->getValueType(0);
6552 SubVec = GetWidenedVector(SubVec);
6558 bool IndicesValid =
false;
6561 IndicesValid =
true;
6565 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6566 Attribute::VScaleRange);
6571 IndicesValid =
true;
6577 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6582 "INSERT_SUBVECTOR");
6585SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6586 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6588 N->getValueType(0), InOp,
N->getOperand(1));
6591SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6592 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6594 N->getValueType(0), InOp,
N->getOperand(1));
6597SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6598 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6599 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6607 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6610 if (
ST->isTruncatingStore())
6629 StVal = GetWidenedVector(StVal);
6633 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6634 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6635 EVL, StVT,
ST->getMemOperand(),
6636 ST->getAddressingMode());
6640 if (GenWidenVectorStores(StChain, ST)) {
6641 if (StChain.
size() == 1)
6650SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6651 assert((OpNo == 1 || OpNo == 3) &&
6652 "Can widen only data or mask operand of vp_store");
6660 StVal = GetWidenedVector(StVal);
6666 "Unable to widen VP store");
6667 Mask = GetWidenedVector(Mask);
6669 Mask = GetWidenedVector(Mask);
6675 "Unable to widen VP store");
6676 StVal = GetWidenedVector(StVal);
6679 assert(
Mask.getValueType().getVectorElementCount() ==
6681 "Mask and data vectors should have the same number of elements");
6682 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6683 ST->getOffset(), Mask,
ST->getVectorLength(),
6684 ST->getMemoryVT(),
ST->getMemOperand(),
6685 ST->getAddressingMode(),
ST->isTruncatingStore(),
6686 ST->isCompressingStore());
6691 assert((OpNo == 1 || OpNo == 4) &&
6692 "Can widen only data or mask operand of vp_strided_store");
6701 "Unable to widen VP strided store");
6705 "Unable to widen VP strided store");
6707 StVal = GetWidenedVector(StVal);
6708 Mask = GetWidenedVector(Mask);
6711 Mask.getValueType().getVectorElementCount() &&
6712 "Data and mask vectors should have the same number of elements");
6714 return DAG.getStridedStoreVP(
6721SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6722 assert((OpNo == 1 || OpNo == 4) &&
6723 "Can widen only data or mask operand of mstore");
6726 EVT MaskVT =
Mask.getValueType();
6732 StVal = GetWidenedVector(StVal);
6739 Mask = ModifyToType(Mask, WideMaskVT,
true);
6743 Mask = ModifyToType(Mask, WideMaskVT,
true);
6749 StVal = ModifyToType(StVal, WideVT);
6752 assert(
Mask.getValueType().getVectorNumElements() ==
6754 "Mask and data vectors should have the same number of elements");
6761SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6762 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6763 auto *MG = cast<MaskedGatherSDNode>(
N);
6764 SDValue DataOp = MG->getPassThru();
6766 SDValue Scale = MG->getScale();
6774 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6775 MG->getMemOperand(), MG->getIndexType(),
6776 MG->getExtensionType());
6782SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6791 DataOp = GetWidenedVector(DataOp);
6795 EVT IndexVT =
Index.getValueType();
6801 EVT MaskVT =
Mask.getValueType();
6804 Mask = ModifyToType(Mask, WideMaskVT,
true);
6809 }
else if (OpNo == 4) {
6817 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
6822SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
6831 DataOp = GetWidenedVector(DataOp);
6834 Mask = GetWidenedMask(Mask, WideEC);
6837 }
else if (OpNo == 3) {
6846 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
6851 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
6852 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6854 EVT VT =
N->getValueType(0);
6869 SVT, InOp0, InOp1,
N->getOperand(2));
6876 DAG.getVectorIdxConstant(0, dl));
6878 EVT OpVT =
N->getOperand(0).getValueType();
6881 return DAG.getNode(ExtendCode, dl, VT,
CC);
6891 EVT VT =
N->getValueType(0);
6893 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6900 for (
unsigned i = 0; i != NumElts; ++i) {
6902 DAG.getVectorIdxConstant(i, dl));
6904 DAG.getVectorIdxConstant(i, dl));
6906 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6907 {Chain, LHSElem, RHSElem, CC});
6908 Chains[i] = Scalars[i].getValue(1);
6909 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6910 DAG.getBoolConstant(
true, dl, EltVT, VT),
6911 DAG.getBoolConstant(
false, dl, EltVT, VT));
6915 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6917 return DAG.getBuildVector(VT, dl, Scalars);
6922 SDValue Op = GetWidenedVector(
N->getOperand(0));
6923 EVT OrigVT =
N->getOperand(0).getValueType();
6924 EVT WideVT =
Op.getValueType();
6928 unsigned Opc =
N->getOpcode();
6930 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6931 assert(NeutralElem &&
"Neutral element must exist");
6938 unsigned GCD = std::gcd(OrigElts, WideElts);
6941 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6942 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6944 DAG.getVectorIdxConstant(
Idx, dl));
6945 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6948 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6950 DAG.getVectorIdxConstant(
Idx, dl));
6952 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6962 EVT WideVT =
Op.getValueType();
6966 unsigned Opc =
N->getOpcode();
6968 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6975 unsigned GCD = std::gcd(OrigElts, WideElts);
6978 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6979 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6981 DAG.getVectorIdxConstant(
Idx, dl));
6982 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6985 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6987 DAG.getVectorIdxConstant(
Idx, dl));
6989 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6993 assert(
N->isVPOpcode() &&
"Expected VP opcode");
6996 SDValue Op = GetWidenedVector(
N->getOperand(1));
6998 Op.getValueType().getVectorElementCount());
7000 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7001 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7009 EVT VT =
N->getValueType(0);
7020 DAG.getVectorIdxConstant(0,
DL));
7030 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7031 {Source, Mask, N->getOperand(2)},
N->getFlags());
7048 unsigned WidenEx = 0) {
7053 unsigned AlignInBits =
Align*8;
7056 EVT RetVT = WidenEltVT;
7057 if (!Scalable && Width == WidenEltWidth)
7071 (WidenWidth % MemVTWidth) == 0 &&
7073 (MemVTWidth <= Width ||
7074 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7075 if (MemVTWidth == WidenWidth)
7094 (WidenWidth % MemVTWidth) == 0 &&
7096 (MemVTWidth <= Width ||
7097 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7106 return std::nullopt;
7117 unsigned Start,
unsigned End) {
7118 SDLoc dl(LdOps[Start]);
7119 EVT LdTy = LdOps[Start].getValueType();
7127 for (
unsigned i = Start + 1; i !=
End; ++i) {
7128 EVT NewLdTy = LdOps[i].getValueType();
7129 if (NewLdTy != LdTy) {
7150 EVT LdVT =
LD->getMemoryVT();
7164 TypeSize WidthDiff = WidenWidth - LdWidth;
7171 std::optional<EVT> FirstVT =
7172 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7179 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7184 std::optional<EVT> NewVT = FirstVT;
7186 TypeSize NewVTWidth = FirstVTWidth;
7188 RemainingWidth -= NewVTWidth;
7195 NewVTWidth = NewVT->getSizeInBits();
7201 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7202 LD->getOriginalAlign(), MMOFlags, AAInfo);
7206 if (MemVTs.
empty()) {
7208 if (!FirstVT->isVector()) {
7215 if (FirstVT == WidenVT)
7220 unsigned NumConcat =
7223 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7224 ConcatOps[0] = LdOp;
7225 for (
unsigned i = 1; i != NumConcat; ++i)
7226 ConcatOps[i] = UndefVal;
7238 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7241 for (
EVT MemVT : MemVTs) {
7242 Align NewAlign = ScaledOffset == 0
7243 ?
LD->getOriginalAlign()
7246 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7250 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7255 if (!LdOps[0].getValueType().
isVector())
7265 EVT LdTy = LdOps[i].getValueType();
7268 for (--i; i >= 0; --i) {
7269 LdTy = LdOps[i].getValueType();
7276 ConcatOps[--
Idx] = LdOps[i];
7277 for (--i; i >= 0; --i) {
7278 EVT NewLdTy = LdOps[i].getValueType();
7279 if (NewLdTy != LdTy) {
7290 WidenOps[j] = ConcatOps[
Idx+j];
7291 for (;
j != NumOps; ++
j)
7292 WidenOps[j] = DAG.getUNDEF(LdTy);
7299 ConcatOps[--
Idx] = LdOps[i];
7310 SDValue UndefVal = DAG.getUNDEF(LdTy);
7313 for (; i !=
End-
Idx; ++i)
7314 WidenOps[i] = ConcatOps[
Idx+i];
7315 for (; i != NumOps; ++i)
7316 WidenOps[i] = UndefVal;
7328 EVT LdVT =
LD->getMemoryVT();
7341 "not yet supported");
7352 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7353 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7359 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7360 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7361 LD->getOriginalAlign(), MMOFlags, AAInfo);
7366 SDValue UndefVal = DAG.getUNDEF(EltVT);
7367 for (; i != WidenNumElts; ++i)
7370 return DAG.getBuildVector(WidenVT, dl, Ops);
7382 SDValue ValOp = GetWidenedVector(
ST->getValue());
7385 EVT StVT =
ST->getMemoryVT();
7393 "Mismatch between store and value types");
7407 std::optional<EVT> NewVT =
7412 TypeSize NewVTWidth = NewVT->getSizeInBits();
7415 StWidth -= NewVTWidth;
7416 MemVTs.
back().second++;
7420 for (
const auto &Pair : MemVTs) {
7421 EVT NewVT = Pair.first;
7422 unsigned Count = Pair.second;
7428 Align NewAlign = ScaledOffset == 0
7429 ?
ST->getOriginalAlign()
7432 DAG.getVectorIdxConstant(
Idx, dl));
7433 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7438 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7450 DAG.getVectorIdxConstant(
Idx++, dl));
7452 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7456 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7470 bool FillWithZeroes) {
7475 "input and widen element type must match");
7477 "cannot modify scalable vectors in this way");
7489 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7492 for (
unsigned i = 1; i != NumConcat; ++i)
7500 DAG.getVectorIdxConstant(0, dl));
7503 "Scalable vectors should have been handled already.");
7511 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7515 DAG.getVectorIdxConstant(
Idx, dl));
7517 SDValue UndefVal = DAG.getUNDEF(EltVT);
7518 for (;
Idx < WidenNumElts; ++
Idx)
7519 Ops[
Idx] = UndefVal;
7521 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7522 if (!FillWithZeroes)
7526 "We expect to never want to FillWithZeroes for non-integral types.");
7529 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7530 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7532 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7533 DAG.getBuildVector(NVT, dl,
MaskOps));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.