35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
125 R = ScalarizeVecRes_UnaryOp(
N);
128 R = ScalarizeVecRes_ADDRSPACECAST(
N);
131 R = ScalarizeVecRes_FFREXP(
N, ResNo);
182 R = ScalarizeVecRes_BinOp(
N);
187 R = ScalarizeVecRes_CMP(
N);
193 R = ScalarizeVecRes_TernaryOp(
N);
196#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
197 case ISD::STRICT_##DAGN:
198#include "llvm/IR/ConstrainedOps.def"
199 R = ScalarizeVecRes_StrictFPOp(
N);
204 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
213 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
223 R = ScalarizeVecRes_FIX(
N);
229 SetScalarizedVector(
SDValue(
N, ResNo), R);
233 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
234 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
236 LHS.getValueType(), LHS, RHS,
N->getFlags());
244 if (getTypeAction(
LHS.getValueType()) ==
246 LHS = GetScalarizedVector(LHS);
247 RHS = GetScalarizedVector(RHS);
249 EVT VT =
LHS.getValueType().getVectorElementType();
257 N->getValueType(0).getVectorElementType(), LHS, RHS);
261 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
262 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
263 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
269 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
270 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
276SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
277 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
278 "Unexpected vector type!");
279 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
281 EVT VT0 =
N->getValueType(0);
282 EVT VT1 =
N->getValueType(1);
287 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
291 unsigned OtherNo = 1 - ResNo;
292 EVT OtherVT =
N->getValueType(OtherNo);
294 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
298 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
301 return SDValue(ScalarNode, ResNo);
305 EVT VT =
N->getValueType(0).getVectorElementType();
306 unsigned NumOpers =
N->getNumOperands();
308 EVT ValueVTs[] = {VT, MVT::Other};
317 for (
unsigned i = 1; i < NumOpers; ++i) {
323 Oper = GetScalarizedVector(Oper);
334 Opers,
N->getFlags());
345 EVT ResVT =
N->getValueType(0);
346 EVT OvVT =
N->getValueType(1);
350 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
351 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
356 ScalarLHS = ElemsLHS[0];
357 ScalarRHS = ElemsRHS[0];
363 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
367 unsigned OtherNo = 1 - ResNo;
368 EVT OtherVT =
N->getValueType(OtherNo);
370 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
374 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
377 return SDValue(ScalarNode, ResNo);
382 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
383 return GetScalarizedVector(
Op);
388 if (
Op.getValueType().isVector()
389 &&
Op.getValueType().getVectorNumElements() == 1
390 && !isSimpleLegalType(
Op.getValueType()))
391 Op = GetScalarizedVector(
Op);
392 EVT NewVT =
N->getValueType(0).getVectorElementType();
397SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
398 EVT EltVT =
N->getValueType(0).getVectorElementType();
407SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
409 N->getValueType(0).getVectorElementType(),
410 N->getOperand(0),
N->getOperand(1));
416 EVT OpVT =
Op.getValueType();
420 Op = GetScalarizedVector(
Op);
427 N->getValueType(0).getVectorElementType(),
Op,
432 SDValue Op = GetScalarizedVector(
N->getOperand(0));
437SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
441 EVT EltVT =
N->getValueType(0).getVectorElementType();
442 if (
Op.getValueType() != EltVT)
449 assert(
N->isUnindexed() &&
"Indexed vector load?");
453 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
454 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
455 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
456 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
466 EVT DestVT =
N->getValueType(0).getVectorElementType();
468 EVT OpVT =
Op.getValueType();
478 Op = GetScalarizedVector(
Op);
488 EVT EltVT =
N->getValueType(0).getVectorElementType();
490 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
499 EVT OpVT =
Op.getValueType();
501 EVT EltVT =
N->getValueType(0).getVectorElementType();
504 Op = GetScalarizedVector(
Op);
510 switch (
N->getOpcode()) {
522SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
523 EVT DestVT =
N->getValueType(0).getVectorElementType();
525 EVT OpVT =
Op.getValueType();
535 Op = GetScalarizedVector(
Op);
541 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
542 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
543 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
547SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
550 EVT EltVT =
N->getValueType(0).getVectorElementType();
559 EVT OpVT =
Cond.getValueType();
572 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
586 EVT OpVT =
Cond->getOperand(0).getValueType();
593 EVT CondVT =
Cond.getValueType();
594 if (ScalarBool != VecBool) {
595 switch (ScalarBool) {
616 auto BoolVT = getSetCCResultType(CondVT);
617 if (BoolVT.bitsLT(CondVT))
622 GetScalarizedVector(
N->getOperand(2)));
626 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
628 LHS.getValueType(),
N->getOperand(0), LHS,
629 GetScalarizedVector(
N->getOperand(2)));
633 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
635 N->getOperand(0),
N->getOperand(1),
636 LHS, GetScalarizedVector(
N->getOperand(3)),
641 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
644SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
646 SDValue Arg =
N->getOperand(2).getOperand(0);
648 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
649 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
650 return GetScalarizedVector(
N->getOperand(
Op));
653SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
655 EVT SrcVT = Src.getValueType();
660 Src = GetScalarizedVector(Src);
666 EVT DstVT =
N->getValueType(0).getVectorElementType();
667 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
671 assert(
N->getValueType(0).isVector() &&
672 N->getOperand(0).getValueType().isVector() &&
673 "Operand types must be vectors");
676 EVT OpVT =
LHS.getValueType();
677 EVT NVT =
N->getValueType(0).getVectorElementType();
682 LHS = GetScalarizedVector(LHS);
683 RHS = GetScalarizedVector(RHS);
699 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
707 EVT ResultVT =
N->getValueType(0).getVectorElementType();
710 Arg = GetScalarizedVector(Arg);
723 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
730bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
735 switch (
N->getOpcode()) {
738 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
745 Res = ScalarizeVecOp_BITCAST(
N);
757 Res = ScalarizeVecOp_UnaryOp(
N);
763 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
766 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
769 Res = ScalarizeVecOp_INSERT_SUBVECTOR(
N, OpNo);
772 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
775 Res = ScalarizeVecOp_VSELECT(
N);
778 Res = ScalarizeVecOp_VSETCC(
N);
781 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
784 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
787 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
790 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
793 Res = ScalarizeVecOp_FP_EXTEND(
N);
810 Res = ScalarizeVecOp_VECREDUCE(
N);
814 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
818 Res = ScalarizeVecOp_CMP(
N);
823 if (!Res.
getNode())
return false;
831 "Invalid operand expansion");
833 ReplaceValueWith(
SDValue(
N, 0), Res);
840 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
842 N->getValueType(0), Elt);
848 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
849 "Unexpected vector type!");
850 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
852 N->getValueType(0).getScalarType(), Elt);
860SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
861 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
862 "Unexpected vector type!");
863 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
865 {
N->getValueType(0).getScalarType(), MVT::Other },
866 {
N->getOperand(0), Elt });
876 ReplaceValueWith(
SDValue(
N, 0), Res);
881SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
883 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
884 Ops[i] = GetScalarizedVector(
N->getOperand(i));
890SDValue DAGTypeLegalizer::ScalarizeVecOp_INSERT_SUBVECTOR(
SDNode *
N,
894 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
895 SDValue ContainingVec =
N->getOperand(0);
903SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
904 EVT VT =
N->getValueType(0);
905 SDValue Res = GetScalarizedVector(
N->getOperand(0));
917 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
918 EVT VT =
N->getValueType(0);
928 assert(
N->getValueType(0).isVector() &&
929 N->getOperand(0).getValueType().isVector() &&
930 "Operand types must be vectors");
931 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
933 EVT VT =
N->getValueType(0);
934 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
935 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
937 EVT OpVT =
N->getOperand(0).getValueType();
949 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
957 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
958 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
961 if (
N->isTruncatingStore())
963 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
964 N->getBasePtr(),
N->getPointerInfo(),
965 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
966 N->getMemOperand()->getFlags(),
N->getAAInfo());
968 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
969 N->getBasePtr(),
N->getPointerInfo(),
970 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
976SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
977 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
978 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
980 N->getValueType(0).getVectorElementType(), Elt,
985SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
987 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
988 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
992 {
N->getOperand(0), Elt,
N->getOperand(2) });
1001 ReplaceValueWith(
SDValue(
N, 0), Res);
1008 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
1010 N->getValueType(0).getVectorElementType(), Elt);
1016SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
1017 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1021 {
N->getOperand(0), Elt});
1030 ReplaceValueWith(
SDValue(
N, 0), Res);
1035 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1042SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1048 SDValue Op = GetScalarizedVector(VecOp);
1050 AccOp,
Op,
N->getFlags());
1054 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1055 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1057 EVT ResVT =
N->getValueType(0).getVectorElementType();
1070void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1075 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1078 switch (
N->getOpcode()) {
1081 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1093 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1106 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(
N,
Lo,
Hi);
break;
1109 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1112 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1116 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1119 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1121 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1122 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1125 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1128 case ISD::VP_GATHER:
1129 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1132 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
1136 SplitVecRes_SETCC(
N,
Lo,
Hi);
1139 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1142 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1145 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1148 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1151 SplitVecRes_VECTOR_INTERLEAVE(
N);
1154 SplitVecRes_VAARG(
N,
Lo,
Hi);
1160 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1166 case ISD::VP_BITREVERSE:
1174 case ISD::VP_CTLZ_ZERO_UNDEF:
1176 case ISD::VP_CTTZ_ZERO_UNDEF:
1191 case ISD::VP_FFLOOR:
1196 case ISD::VP_FNEARBYINT:
1201 case ISD::VP_FP_EXTEND:
1203 case ISD::VP_FP_ROUND:
1205 case ISD::VP_FP_TO_SINT:
1207 case ISD::VP_FP_TO_UINT:
1213 case ISD::VP_LLRINT:
1215 case ISD::VP_FROUND:
1217 case ISD::VP_FROUNDEVEN:
1224 case ISD::VP_FROUNDTOZERO:
1226 case ISD::VP_SINT_TO_FP:
1228 case ISD::VP_TRUNCATE:
1230 case ISD::VP_UINT_TO_FP:
1232 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1235 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1238 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1244 case ISD::VP_SIGN_EXTEND:
1245 case ISD::VP_ZERO_EXTEND:
1246 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1265 case ISD::VP_FMINNUM:
1268 case ISD::VP_FMAXNUM:
1270 case ISD::VP_FMINIMUM:
1272 case ISD::VP_FMAXIMUM:
1278 case ISD::OR:
case ISD::VP_OR:
1298 case ISD::VP_FCOPYSIGN:
1299 SplitVecRes_BinOp(
N,
Lo,
Hi);
1306 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1310 SplitVecRes_CMP(
N,
Lo,
Hi);
1313#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1314 case ISD::STRICT_##DAGN:
1315#include "llvm/IR/ConstrainedOps.def"
1316 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1321 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1330 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1340 SplitVecRes_FIX(
N,
Lo,
Hi);
1342 case ISD::EXPERIMENTAL_VP_REVERSE:
1343 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1352void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1361 DL,
Ptr.getValueType(),
1362 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1364 Flags.setNoUnsignedWrap(
true);
1366 *ScaledOffset += IncrementSize;
1370 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1376std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1377 return SplitMask(Mask,
SDLoc(Mask));
1380std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1383 EVT MaskVT =
Mask.getValueType();
1385 GetSplitVector(Mask, MaskLo, MaskHi);
1388 return std::make_pair(MaskLo, MaskHi);
1393 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1395 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1399 unsigned Opcode =
N->getOpcode();
1400 if (
N->getNumOperands() == 2) {
1406 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1407 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1410 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1413 std::tie(EVLLo, EVLHi) =
1414 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1417 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1419 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1425 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1427 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1429 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1433 unsigned Opcode =
N->getOpcode();
1434 if (
N->getNumOperands() == 3) {
1440 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1441 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1444 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1447 std::tie(EVLLo, EVLHi) =
1448 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1451 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1453 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1463 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1465 GetSplitVector(LHS, LHSLo, LHSHi);
1466 GetSplitVector(RHS, RHSLo, RHSHi);
1468 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1469 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1472 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1473 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1474 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1479 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1481 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1485 unsigned Opcode =
N->getOpcode();
1504 switch (getTypeAction(InVT)) {
1519 GetExpandedOp(InOp,
Lo,
Hi);
1530 GetSplitVector(InOp,
Lo,
Hi);
1551 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1574 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1576 unsigned NumSubvectors =
N->getNumOperands() / 2;
1577 if (NumSubvectors == 1) {
1578 Lo =
N->getOperand(0);
1579 Hi =
N->getOperand(1);
1593void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1615 GetSplitVector(Vec,
Lo,
Hi);
1618 EVT LoVT =
Lo.getValueType();
1627 unsigned IdxVal =
Idx->getAsZExtVal();
1628 if (IdxVal + SubElems <= LoElems) {
1636 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1662 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1666 auto *
Load = cast<LoadSDNode>(
Lo);
1668 IncrementPointer(Load, LoVT, MPI, StackPtr);
1671 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1680 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1685 EVT RHSVT =
RHS.getValueType();
1688 GetSplitVector(RHS, RHSLo, RHSHi);
1705 SDValue FpValue =
N->getOperand(0);
1707 GetSplitVector(FpValue, ArgLo, ArgHi);
1720 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1724 std::tie(LoVT, HiVT) =
1735 unsigned Opcode =
N->getOpcode();
1742 GetSplitVector(N0, InLo, InHi);
1749 EVT OutLoVT, OutHiVT;
1752 assert((2 * OutNumElements) <= InNumElements &&
1753 "Illegal extend vector in reg split");
1763 for (
unsigned i = 0; i != OutNumElements; ++i)
1764 SplitHi[i] = i + OutNumElements;
1767 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1768 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1773 unsigned NumOps =
N->getNumOperands();
1787 for (
unsigned i = 1; i < NumOps; ++i) {
1792 EVT InVT =
Op.getValueType();
1797 GetSplitVector(
Op, OpLo, OpHi);
1806 EVT LoValueVTs[] = {LoVT, MVT::Other};
1807 EVT HiValueVTs[] = {HiVT, MVT::Other};
1816 Lo.getValue(1),
Hi.getValue(1));
1820 ReplaceValueWith(
SDValue(
N, 1), Chain);
1823SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1825 EVT VT =
N->getValueType(0);
1836 else if (NE > ResNE)
1840 EVT ChainVTs[] = {EltVT, MVT::Other};
1844 for (i = 0; i !=
NE; ++i) {
1846 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1847 SDValue Operand =
N->getOperand(j);
1858 Scalar.getNode()->setFlags(
N->getFlags());
1866 for (; i < ResNE; ++i)
1871 ReplaceValueWith(
SDValue(
N, 1), Chain);
1878void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1881 EVT ResVT =
N->getValueType(0);
1882 EVT OvVT =
N->getValueType(1);
1883 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1887 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1889 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1890 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1896 unsigned Opcode =
N->getOpcode();
1908 unsigned OtherNo = 1 - ResNo;
1909 EVT OtherVT =
N->getValueType(OtherNo);
1911 SetSplitVector(
SDValue(
N, OtherNo),
1917 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1921void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1927 GetSplitVector(Vec,
Lo,
Hi);
1930 unsigned IdxVal = CIdx->getZExtValue();
1931 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1932 if (IdxVal < LoNumElts) {
1934 Lo.getValueType(),
Lo, Elt,
Idx);
1980 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1983 auto Load = cast<LoadSDNode>(
Lo);
1985 IncrementPointer(Load, LoVT, MPI, StackPtr);
1987 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1991 if (LoVT !=
Lo.getValueType())
1993 if (HiVT !=
Hi.getValueType())
2001 assert(
N->getValueType(0).isScalableVector() &&
2002 "Only scalable vectors are supported for STEP_VECTOR");
2025 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2038 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
2039 auto [EVLLo, EVLHi] = DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2040 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0), MaskLo, EVLLo);
2041 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT,
N->getOperand(0), MaskHi, EVLHi);
2055 EVT MemoryVT =
LD->getMemoryVT();
2059 EVT LoMemVT, HiMemVT;
2066 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2071 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2075 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2078 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2087 ReplaceValueWith(
SDValue(LD, 1), Ch);
2092 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2101 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2102 Align Alignment =
LD->getOriginalAlign();
2105 EVT MemoryVT =
LD->getMemoryVT();
2107 EVT LoMemVT, HiMemVT;
2108 bool HiIsEmpty =
false;
2109 std::tie(LoMemVT, HiMemVT) =
2115 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2118 GetSplitVector(Mask, MaskLo, MaskHi);
2120 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2125 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2134 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2143 LD->isExpandingLoad());
2149 MPI =
LD->getPointerInfo().getWithOffset(
2154 Alignment,
LD->getAAInfo(),
LD->getRanges());
2157 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2158 LD->isExpandingLoad());
2168 ReplaceValueWith(
SDValue(LD, 1), Ch);
2174 "Indexed VP strided load during type legalization!");
2176 "Unexpected indexed variable-length load offset");
2183 EVT LoMemVT, HiMemVT;
2184 bool HiIsEmpty =
false;
2185 std::tie(LoMemVT, HiMemVT) =
2191 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2194 GetSplitVector(Mask, LoMask, HiMask);
2200 std::tie(LoEVL, HiEVL) =
2238 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2249 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2262 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2271 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2274 GetSplitVector(Mask, MaskLo, MaskHi);
2276 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2280 EVT LoMemVT, HiMemVT;
2281 bool HiIsEmpty =
false;
2282 std::tie(LoMemVT, HiMemVT) =
2285 SDValue PassThruLo, PassThruHi;
2287 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2289 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2332 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2349 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2350 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2352 auto *VPSC = cast<VPGatherSDNode>(
N);
2353 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2356 EVT MemoryVT =
N->getMemoryVT();
2357 Align Alignment =
N->getOriginalAlign();
2361 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2362 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2364 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2367 EVT LoMemVT, HiMemVT;
2372 if (getTypeAction(Ops.Index.getValueType()) ==
2374 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2376 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2383 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2384 SDValue PassThru = MGT->getPassThru();
2385 SDValue PassThruLo, PassThruHi;
2388 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2390 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2395 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2397 OpsLo, MMO, IndexTy, ExtType);
2399 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2401 OpsHi, MMO, IndexTy, ExtType);
2403 auto *VPGT = cast<VPGatherSDNode>(
N);
2405 std::tie(EVLLo, EVLHi) =
2406 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2408 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2410 MMO, VPGT->getIndexType());
2412 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2414 MMO, VPGT->getIndexType());
2424 ReplaceValueWith(
SDValue(
N, 1), Ch);
2438 EVT VecVT =
N->getValueType(0);
2441 bool HasCustomLowering =
false;
2448 HasCustomLowering =
true;
2454 SDValue Passthru =
N->getOperand(2);
2455 if (!HasCustomLowering || !Passthru.
isUndef()) {
2464 std::tie(LoMask, HiMask) = SplitMask(
N->getOperand(1));
2474 MF, cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex());
2483 Chain = DAG.
getStore(Chain,
DL,
Lo, StackPtr, PtrInfo);
2492 assert(
N->getValueType(0).isVector() &&
2493 N->getOperand(0).getValueType().isVector() &&
2494 "Operand types must be vectors");
2502 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2504 GetSplitVector(
N->getOperand(0), LL, LH);
2508 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2510 GetSplitVector(
N->getOperand(1), RL, RH);
2515 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2516 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2518 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2519 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2520 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2521 std::tie(EVLLo, EVLHi) =
2522 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2523 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2525 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2539 EVT InVT =
N->getOperand(0).getValueType();
2541 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2546 unsigned Opcode =
N->getOpcode();
2547 if (
N->getNumOperands() <= 2) {
2549 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2550 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2558 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2559 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2562 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2565 std::tie(EVLLo, EVLHi) =
2566 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2579 EVT InVT =
N->getOperand(0).getValueType();
2581 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2585 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2586 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2587 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2592void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2600 EVT InVT =
N->getOperand(0).getValueType();
2602 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2606 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2607 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2608 Lo->setFlags(
N->getFlags());
2609 Hi->setFlags(
N->getFlags());
2615 unsigned OtherNo = 1 - ResNo;
2616 EVT OtherVT =
N->getValueType(OtherNo);
2624 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2631 EVT SrcVT =
N->getOperand(0).getValueType();
2632 EVT DestVT =
N->getValueType(0);
2655 EVT SplitLoVT, SplitHiVT;
2659 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2660 N->dump(&DAG);
dbgs() <<
"\n");
2661 if (!
N->isVPOpcode()) {
2664 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2675 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2676 N->getOperand(1),
N->getOperand(2));
2681 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2684 std::tie(EVLLo, EVLHi) =
2685 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2687 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2688 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2693 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2701 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2702 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2708 return N.getResNo() == 0 &&
2712 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2717 "Expected build vector node.");
2720 for (
unsigned I = 0;
I < NewElts; ++
I) {
2725 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2727 Ops[
I] = Input1.getOperand(
Idx);
2729 if (Ops[
I].getValueType().bitsGT(EltVT))
2732 return DAG.getBuildVector(NewVT,
DL, Ops);
2740 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2744 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2746 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2755 for (
auto &
P : ShufflesIdxs) {
2756 if (
P.second.size() < 2)
2760 for (
int &
Idx : Mask) {
2763 unsigned SrcRegIdx =
Idx / NewElts;
2764 if (Inputs[SrcRegIdx].
isUndef()) {
2769 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2772 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2777 Idx = MaskElt % NewElts +
2778 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2784 Inputs[
P.second[0]] =
P.first.first;
2785 Inputs[
P.second[1]] =
P.first.second;
2788 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2792 for (
int &
Idx : Mask) {
2795 unsigned SrcRegIdx =
Idx / NewElts;
2796 if (Inputs[SrcRegIdx].
isUndef()) {
2801 getTypeAction(Inputs[SrcRegIdx].getValueType());
2803 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2804 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2807 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2809 if (UsedSubVector.count() > 1) {
2811 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2812 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2814 if (Pairs.
empty() || Pairs.
back().size() == 2)
2816 if (UsedSubVector.test(2 *
I)) {
2817 Pairs.
back().emplace_back(
I, 0);
2819 assert(UsedSubVector.test(2 *
I + 1) &&
2820 "Expected to be used one of the subvectors.");
2821 Pairs.
back().emplace_back(
I, 1);
2824 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2826 for (
int &
Idx : Mask) {
2829 unsigned SrcRegIdx =
Idx / NewElts;
2831 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2832 return Idxs.front().first == SrcRegIdx ||
2833 Idxs.back().first == SrcRegIdx;
2835 if (It == Pairs.
end())
2837 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2838 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2841 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2842 Inputs[Idxs.front().first] = DAG.
getNode(
2844 Inputs[Idxs.front().first].getValueType(),
2845 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2846 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2855 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2856 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2859 if (Shuffle->getOperand(0).getValueType() != NewVT)
2862 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2863 !Shuffle->isSplat()) {
2865 }
else if (!Inputs[
I].hasOneUse() &&
2866 !Shuffle->getOperand(1).isUndef()) {
2868 for (
int &
Idx : Mask) {
2871 unsigned SrcRegIdx =
Idx / NewElts;
2874 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2879 int OpIdx = MaskElt / NewElts;
2892 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2893 if (Shuffle->getOperand(OpIdx).isUndef())
2895 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2896 if (It == std::end(Inputs))
2898 int FoundOp = std::distance(std::begin(Inputs), It);
2901 for (
int &
Idx : Mask) {
2904 unsigned SrcRegIdx =
Idx / NewElts;
2907 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2912 int MaskIdx = MaskElt / NewElts;
2913 if (OpIdx == MaskIdx)
2914 Idx = MaskElt % NewElts + FoundOp * NewElts;
2917 Op = (OpIdx + 1) % 2;
2925 for (
int &
Idx : Mask) {
2928 unsigned SrcRegIdx =
Idx / NewElts;
2931 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2932 int OpIdx = MaskElt / NewElts;
2935 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2941 TryPeekThroughShufflesInputs(OrigMask);
2943 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2947 for (
const auto &
I : Inputs) {
2949 UniqueConstantInputs.
insert(
I);
2950 else if (!
I.isUndef())
2955 if (UniqueInputs.
size() != std::size(Inputs)) {
2956 auto &&UniqueVec = UniqueInputs.
takeVector();
2957 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2958 unsigned ConstNum = UniqueConstantVec.size();
2959 for (
int &
Idx : Mask) {
2962 unsigned SrcRegIdx =
Idx / NewElts;
2963 if (Inputs[SrcRegIdx].
isUndef()) {
2967 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2968 if (It != UniqueConstantVec.end()) {
2970 NewElts * std::distance(UniqueConstantVec.begin(), It);
2971 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2974 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2975 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2977 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2978 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2980 copy(UniqueConstantVec, std::begin(Inputs));
2981 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2984 MakeUniqueInputs(OrigMask);
2986 copy(Inputs, std::begin(OrigInputs));
2992 unsigned FirstMaskIdx =
High * NewElts;
2995 assert(!Output &&
"Expected default initialized initial value.");
2996 TryPeekThroughShufflesInputs(Mask);
2997 MakeUniqueInputs(Mask);
2999 copy(Inputs, std::begin(TmpInputs));
3002 bool SecondIteration =
false;
3003 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
3008 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
3009 SecondIteration =
true;
3010 return SecondIteration;
3013 Mask, std::size(Inputs), std::size(Inputs),
3015 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
3016 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3019 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
3021 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
3022 DAG.getUNDEF(NewVT), Mask);
3023 Inputs[
Idx] = Output;
3025 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
3028 if (AccumulateResults(Idx1)) {
3031 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
3033 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
3034 Inputs[Idx2], Mask);
3038 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
3040 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
3041 TmpInputs[Idx2], Mask);
3043 Inputs[Idx1] = Output;
3045 copy(OrigInputs, std::begin(Inputs));
3050 EVT OVT =
N->getValueType(0);
3057 const Align Alignment =
3058 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
3060 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
3061 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
3062 Chain =
Hi.getValue(1);
3066 ReplaceValueWith(
SDValue(
N, 1), Chain);
3071 EVT DstVTLo, DstVTHi;
3072 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
3076 EVT SrcVT =
N->getOperand(0).getValueType();
3078 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
3080 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
3082 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
3083 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
3089 GetSplitVector(
N->getOperand(0), InLo, InHi);
3101 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
3106 EVT VT =
N->getValueType(0);
3113 Align Alignment = DAG.getReducedAlign(VT,
false);
3119 auto &MF = DAG.getMachineFunction();
3133 DAG.getConstant(1,
DL, PtrVT));
3135 DAG.getConstant(EltWidth,
DL, PtrVT));
3137 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3139 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3140 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3141 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3144 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3146 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3149void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3151 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3152 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3153 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3157 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3159 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3165void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3166 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3167 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3168 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3172 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3174 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3176 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3177 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3188bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3193 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3196 switch (
N->getOpcode()) {
3199 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3208 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3214 case ISD::VP_TRUNCATE:
3216 Res = SplitVecOp_TruncateHelper(
N);
3219 case ISD::VP_FP_ROUND:
3223 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3226 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3228 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3229 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3232 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3235 case ISD::VP_SCATTER:
3236 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3239 case ISD::VP_GATHER:
3240 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3243 Res = SplitVecOp_VSELECT(
N, OpNo);
3249 case ISD::VP_SINT_TO_FP:
3250 case ISD::VP_UINT_TO_FP:
3251 if (
N->getValueType(0).bitsLT(
3252 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3253 Res = SplitVecOp_TruncateHelper(
N);
3255 Res = SplitVecOp_UnaryOp(
N);
3259 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3263 case ISD::VP_FP_TO_SINT:
3264 case ISD::VP_FP_TO_UINT:
3275 Res = SplitVecOp_UnaryOp(
N);
3278 Res = SplitVecOp_FPOpDifferentTypes(
N);
3283 Res = SplitVecOp_CMP(
N);
3289 Res = SplitVecOp_ExtVecInRegOp(
N);
3307 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3311 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3313 case ISD::VP_REDUCE_FADD:
3314 case ISD::VP_REDUCE_SEQ_FADD:
3315 case ISD::VP_REDUCE_FMUL:
3316 case ISD::VP_REDUCE_SEQ_FMUL:
3317 case ISD::VP_REDUCE_ADD:
3318 case ISD::VP_REDUCE_MUL:
3319 case ISD::VP_REDUCE_AND:
3320 case ISD::VP_REDUCE_OR:
3321 case ISD::VP_REDUCE_XOR:
3322 case ISD::VP_REDUCE_SMAX:
3323 case ISD::VP_REDUCE_SMIN:
3324 case ISD::VP_REDUCE_UMAX:
3325 case ISD::VP_REDUCE_UMIN:
3326 case ISD::VP_REDUCE_FMAX:
3327 case ISD::VP_REDUCE_FMIN:
3328 case ISD::VP_REDUCE_FMAXIMUM:
3329 case ISD::VP_REDUCE_FMINIMUM:
3330 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3332 case ISD::VP_CTTZ_ELTS:
3333 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3334 Res = SplitVecOp_VP_CttzElements(
N);
3339 if (!Res.
getNode())
return false;
3346 if (
N->isStrictFPOpcode())
3348 "Invalid operand expansion");
3351 "Invalid operand expansion");
3353 ReplaceValueWith(
SDValue(
N, 0), Res);
3357SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3360 assert(OpNo == 0 &&
"Illegal operand must be mask");
3367 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3370 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3371 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3372 "Lo and Hi have differing types");
3375 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3376 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3378 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3379 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3380 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3381 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3391SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3392 EVT ResVT =
N->getValueType(0);
3396 SDValue VecOp =
N->getOperand(OpNo);
3398 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3399 GetSplitVector(VecOp,
Lo,
Hi);
3401 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3407 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3411 EVT ResVT =
N->getValueType(0);
3420 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3421 GetSplitVector(VecOp,
Lo,
Hi);
3423 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3429 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3432SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3433 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3434 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3436 unsigned Opc =
N->getOpcode();
3437 EVT ResVT =
N->getValueType(0);
3441 SDValue VecOp =
N->getOperand(OpNo);
3443 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3444 GetSplitVector(VecOp,
Lo,
Hi);
3447 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3450 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3455 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3456 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3461 EVT ResVT =
N->getValueType(0);
3464 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3465 EVT InVT =
Lo.getValueType();
3470 if (
N->isStrictFPOpcode()) {
3471 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3472 { N->getOperand(0), Lo });
3473 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3474 { N->getOperand(0), Hi });
3483 ReplaceValueWith(
SDValue(
N, 1), Ch);
3484 }
else if (
N->getNumOperands() == 3) {
3485 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3486 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3487 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3488 std::tie(EVLLo, EVLHi) =
3489 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3490 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3491 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3493 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3494 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3504 EVT ResVT =
N->getValueType(0);
3506 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3510 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3516 Lo = BitConvertToInteger(
Lo);
3517 Hi = BitConvertToInteger(
Hi);
3519 if (DAG.getDataLayout().isBigEndian())
3527 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3529 EVT ResVT =
N->getValueType(0);
3537 GetSplitVector(SubVec,
Lo,
Hi);
3540 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3546 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3548 return SecondInsertion;
3551SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3553 EVT SubVT =
N->getValueType(0);
3558 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3560 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3563 if (IdxVal < LoEltsMin) {
3565 "Extracted subvector crosses vector split!");
3568 N->getOperand(0).getValueType().isScalableVector())
3570 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3575 "Extracting scalable subvector from fixed-width unsupported");
3583 "subvector from a scalable predicate vector");
3589 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3591 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3592 auto &MF = DAG.getMachineFunction();
3596 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3603 SubVT, dl, Store, StackPtr,
3607SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3616 GetSplitVector(Vec,
Lo,
Hi);
3618 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3620 if (IdxVal < LoElts)
3624 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3625 Idx.getValueType())), 0);
3629 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3641 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3647 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3649 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3650 auto &MF = DAG.getMachineFunction();
3653 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3661 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3663 return DAG.getExtLoad(
3674 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3682 SplitVecRes_Gather(
N,
Lo,
Hi);
3685 ReplaceValueWith(
SDValue(
N, 0), Res);
3690 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3694 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3696 SDValue EVL =
N->getVectorLength();
3698 Align Alignment =
N->getOriginalAlign();
3704 GetSplitVector(
Data, DataLo, DataHi);
3706 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3711 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3714 GetSplitVector(Mask, MaskLo, MaskHi);
3716 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3719 EVT MemoryVT =
N->getMemoryVT();
3720 EVT LoMemVT, HiMemVT;
3721 bool HiIsEmpty =
false;
3722 std::tie(LoMemVT, HiMemVT) =
3723 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3727 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3735 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3736 N->getAddressingMode(),
N->isTruncatingStore(),
3737 N->isCompressingStore());
3744 N->isCompressingStore());
3752 MPI =
N->getPointerInfo().getWithOffset(
3755 MMO = DAG.getMachineFunction().getMachineMemOperand(
3757 Alignment,
N->getAAInfo(),
N->getRanges());
3759 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3760 N->getAddressingMode(),
N->isTruncatingStore(),
3761 N->isCompressingStore());
3770 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3771 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3778 GetSplitVector(
Data, LoData, HiData);
3780 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3782 EVT LoMemVT, HiMemVT;
3783 bool HiIsEmpty =
false;
3784 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3790 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3791 else if (getTypeAction(
Mask.getValueType()) ==
3793 GetSplitVector(Mask, LoMask, HiMask);
3795 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3798 std::tie(LoEVL, HiEVL) =
3799 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3803 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3804 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3805 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3816 EVT PtrVT =
N->getBasePtr().getValueType();
3819 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3822 Align Alignment =
N->getOriginalAlign();
3830 Alignment,
N->getAAInfo(),
N->getRanges());
3833 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3834 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3835 N->isCompressingStore());
3844 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3848 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3851 Align Alignment =
N->getOriginalAlign();
3857 GetSplitVector(
Data, DataLo, DataHi);
3859 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3864 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3867 GetSplitVector(Mask, MaskLo, MaskHi);
3869 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3872 EVT MemoryVT =
N->getMemoryVT();
3873 EVT LoMemVT, HiMemVT;
3874 bool HiIsEmpty =
false;
3875 std::tie(LoMemVT, HiMemVT) =
3876 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3884 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3885 N->getAddressingMode(),
N->isTruncatingStore(),
3886 N->isCompressingStore());
3895 N->isCompressingStore());
3903 MPI =
N->getPointerInfo().getWithOffset(
3906 MMO = DAG.getMachineFunction().getMachineMemOperand(
3908 Alignment,
N->getAAInfo(),
N->getRanges());
3910 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3911 N->getAddressingMode(),
N->isTruncatingStore(),
3912 N->isCompressingStore());
3925 EVT MemoryVT =
N->getMemoryVT();
3926 Align Alignment =
N->getOriginalAlign();
3934 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3935 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3938 auto *VPSC = cast<VPScatterSDNode>(
N);
3939 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3944 EVT LoMemVT, HiMemVT;
3945 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3950 GetSplitVector(Ops.Data, DataLo, DataHi);
3952 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3956 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3957 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3959 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3963 if (getTypeAction(Ops.Index.getValueType()) ==
3965 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3967 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3975 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3976 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3978 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3979 MSC->getIndexType(), MSC->isTruncatingStore());
3984 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3985 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3986 MMO, MSC->getIndexType(),
3987 MSC->isTruncatingStore());
3989 auto *VPSC = cast<VPScatterSDNode>(
N);
3991 std::tie(EVLLo, EVLHi) =
3992 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3994 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3995 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3996 VPSC->getIndexType());
4001 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
4002 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
4003 VPSC->getIndexType());
4007 assert(
N->isUnindexed() &&
"Indexed store of vector?");
4008 assert(OpNo == 1 &&
"Can only split the stored value");
4011 bool isTruncating =
N->isTruncatingStore();
4014 EVT MemoryVT =
N->getMemoryVT();
4015 Align Alignment =
N->getOriginalAlign();
4019 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
4021 EVT LoMemVT, HiMemVT;
4022 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4029 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
4030 Alignment, MMOFlags, AAInfo);
4032 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
4036 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
4039 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
4040 HiMemVT, Alignment, MMOFlags, AAInfo);
4042 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
4056 EVT EltVT =
N->getValueType(0).getVectorElementType();
4058 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
4061 DAG.getVectorIdxConstant(i,
DL)));
4065 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
4086 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
4087 SDValue InVec =
N->getOperand(OpNo);
4089 EVT OutVT =
N->getValueType(0);
4097 EVT LoOutVT, HiOutVT;
4098 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4099 assert(LoOutVT == HiOutVT &&
"Unequal split?");
4104 if (isTypeLegal(LoOutVT) ||
4105 InElementSize <= OutElementSize * 2)
4106 return SplitVecOp_UnaryOp(
N);
4115 return SplitVecOp_UnaryOp(
N);
4119 GetSplitVector(InVec, InLoVec, InHiVec);
4125 EVT HalfElementVT = IsFloat ?
4127 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4134 if (
N->isStrictFPOpcode()) {
4135 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4136 {N->getOperand(0), InLoVec});
4137 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4138 {N->getOperand(0), InHiVec});
4144 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4145 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4157 if (
N->isStrictFPOpcode()) {
4161 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4169 DAG.getTargetConstant(
4176 assert(
N->getValueType(0).isVector() &&
4177 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4178 "Operand types must be vectors");
4180 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4182 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4183 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4196 DAG.getVTList(PartResVT,
N->getValueType(1)),
4197 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4199 DAG.getVTList(PartResVT,
N->getValueType(1)),
4200 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4203 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4205 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4206 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4207 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4208 std::tie(EVLLo, EVLHi) =
4209 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4210 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4211 N->getOperand(2), MaskLo, EVLLo);
4212 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4213 N->getOperand(2), MaskHi, EVLHi);
4217 EVT OpVT =
N->getOperand(0).getValueType();
4220 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4226 EVT ResVT =
N->getValueType(0);
4229 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4230 EVT InVT =
Lo.getValueType();
4235 if (
N->isStrictFPOpcode()) {
4236 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4237 { N->getOperand(0), Lo, N->getOperand(2) });
4238 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4239 { N->getOperand(0), Hi, N->getOperand(2) });
4243 Lo.getValue(1),
Hi.getValue(1));
4244 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4245 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4246 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4247 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4248 std::tie(EVLLo, EVLHi) =
4249 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4250 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4251 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4265SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4268 EVT LHSLoVT, LHSHiVT;
4269 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4271 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4272 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4275 std::tie(LHSLo, LHSHi) =
4276 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4279 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4281 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4282 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4291 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4292 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4293 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4295 EVT ResVT =
N->getValueType(0);
4300 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4301 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4307 EVT ResVT =
N->getValueType(0);
4310 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4311 EVT InVT =
Lo.getValueType();
4317 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4318 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4325 EVT ResVT =
N->getValueType(0);
4329 GetSplitVector(VecOp,
Lo,
Hi);
4331 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4332 auto [EVLLo, EVLHi] =
4334 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4340 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4342 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4343 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4350void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4351 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4354 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4359 auto unrollExpandedOp = [&]() {
4364 EVT VT =
N->getValueType(0);
4374 switch (
N->getOpcode()) {
4377 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4385 Res = WidenVecRes_ADDRSPACECAST(
N);
4392 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4396 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4400 case ISD::EXPERIMENTAL_VP_SPLAT:
4401 Res = WidenVecRes_ScalarOp(
N);
4406 case ISD::VP_SELECT:
4408 Res = WidenVecRes_Select(
N);
4412 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4413 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4415 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4418 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4420 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4421 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4424 Res = WidenVecRes_VECTOR_COMPRESS(
N);
4427 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4430 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4432 case ISD::VP_GATHER:
4433 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4436 Res = WidenVecRes_VECTOR_REVERSE(
N);
4446 case ISD::OR:
case ISD::VP_OR:
4454 case ISD::VP_FMINNUM:
4457 case ISD::VP_FMAXNUM:
4459 case ISD::VP_FMINIMUM:
4461 case ISD::VP_FMAXIMUM:
4492 case ISD::VP_FCOPYSIGN:
4493 Res = WidenVecRes_Binary(
N);
4498 Res = WidenVecRes_CMP(
N);
4503 if (unrollExpandedOp())
4518 Res = WidenVecRes_BinaryCanTrap(
N);
4527 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4530#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4531 case ISD::STRICT_##DAGN:
4532#include "llvm/IR/ConstrainedOps.def"
4533 Res = WidenVecRes_StrictFP(
N);
4542 Res = WidenVecRes_OverflowOp(
N, ResNo);
4546 Res = WidenVecRes_FCOPYSIGN(
N);
4551 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4556 if (!unrollExpandedOp())
4557 Res = WidenVecRes_ExpOp(
N);
4563 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4568 case ISD::VP_FP_EXTEND:
4570 case ISD::VP_FP_ROUND:
4572 case ISD::VP_FP_TO_SINT:
4574 case ISD::VP_FP_TO_UINT:
4576 case ISD::VP_SIGN_EXTEND:
4578 case ISD::VP_SINT_TO_FP:
4579 case ISD::VP_TRUNCATE:
4582 case ISD::VP_UINT_TO_FP:
4584 case ISD::VP_ZERO_EXTEND:
4585 Res = WidenVecRes_Convert(
N);
4590 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4596 case ISD::VP_LLRINT:
4597 Res = WidenVecRes_XRINT(
N);
4624 if (unrollExpandedOp())
4634 case ISD::VP_BITREVERSE:
4640 case ISD::VP_CTLZ_ZERO_UNDEF:
4646 case ISD::VP_CTTZ_ZERO_UNDEF:
4651 case ISD::VP_FFLOOR:
4653 case ISD::VP_FNEARBYINT:
4654 case ISD::VP_FROUND:
4655 case ISD::VP_FROUNDEVEN:
4656 case ISD::VP_FROUNDTOZERO:
4660 Res = WidenVecRes_Unary(
N);
4667 Res = WidenVecRes_Ternary(
N);
4673 SetWidenedVector(
SDValue(
N, ResNo), Res);
4680 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4681 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4682 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4683 if (
N->getNumOperands() == 3)
4684 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4686 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4687 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4691 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4692 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4699 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4700 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4701 if (
N->getNumOperands() == 2)
4702 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4705 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4706 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4710 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4711 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4720 EVT OpVT =
LHS.getValueType();
4722 LHS = GetWidenedVector(LHS);
4723 RHS = GetWidenedVector(RHS);
4724 OpVT =
LHS.getValueType();
4730 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4736SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4740 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4741 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4743 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4752 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4755 if (ConcatEnd == 1) {
4756 VT = ConcatOps[0].getValueType();
4758 return ConcatOps[0];
4761 SDLoc dl(ConcatOps[0]);
4768 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4769 int Idx = ConcatEnd - 1;
4770 VT = ConcatOps[
Idx--].getValueType();
4771 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4784 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4785 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4789 ConcatOps[
Idx+1] = VecOp;
4790 ConcatEnd =
Idx + 2;
4796 unsigned RealVals = ConcatEnd -
Idx - 1;
4797 unsigned SubConcatEnd = 0;
4798 unsigned SubConcatIdx =
Idx + 1;
4799 while (SubConcatEnd < RealVals)
4800 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4801 while (SubConcatEnd < OpsToConcat)
4802 SubConcatOps[SubConcatEnd++] = undefVec;
4804 NextVT, SubConcatOps);
4805 ConcatEnd = SubConcatIdx + 1;
4810 if (ConcatEnd == 1) {
4811 VT = ConcatOps[0].getValueType();
4813 return ConcatOps[0];
4818 if (NumOps != ConcatEnd ) {
4820 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4821 ConcatOps[j] = UndefVal;
4829 unsigned Opcode =
N->getOpcode();
4837 NumElts = NumElts / 2;
4841 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4843 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4844 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4845 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4857 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4858 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4859 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
4862 N->getValueType(0).getVectorElementCount());
4863 return DAG.
getNode(*VPOpcode, dl, WidenVT, InOp1, InOp2, Mask, EVL,
4877 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4878 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4879 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4882 unsigned ConcatEnd = 0;
4890 while (CurNumElts != 0) {
4891 while (CurNumElts >= NumElts) {
4893 DAG.getVectorIdxConstant(
Idx, dl));
4895 DAG.getVectorIdxConstant(
Idx, dl));
4896 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4898 CurNumElts -= NumElts;
4901 NumElts = NumElts / 2;
4906 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4908 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4910 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4911 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4922 switch (
N->getOpcode()) {
4925 return WidenVecRes_STRICT_FSETCC(
N);
4932 return WidenVecRes_Convert_StrictFP(
N);
4938 unsigned NumOpers =
N->getNumOperands();
4939 unsigned Opcode =
N->getOpcode();
4946 NumElts = NumElts / 2;
4957 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4961 unsigned ConcatEnd = 0;
4968 for (
unsigned i = 1; i < NumOpers; ++i) {
4974 Oper = GetWidenedVector(Oper);
4980 DAG.getUNDEF(WideOpVT), Oper,
4981 DAG.getVectorIdxConstant(0, dl));
4993 while (CurNumElts != 0) {
4994 while (CurNumElts >= NumElts) {
4997 for (
unsigned i = 0; i < NumOpers; ++i) {
5000 EVT OpVT =
Op.getValueType();
5006 DAG.getVectorIdxConstant(
Idx, dl));
5012 EVT OperVT[] = {VT, MVT::Other};
5014 ConcatOps[ConcatEnd++] = Oper;
5017 CurNumElts -= NumElts;
5020 NumElts = NumElts / 2;
5025 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
5028 for (
unsigned i = 0; i < NumOpers; ++i) {
5031 EVT OpVT =
Op.getValueType();
5035 DAG.getVectorIdxConstant(
Idx, dl));
5040 EVT WidenVT[] = {WidenEltVT, MVT::Other};
5042 ConcatOps[ConcatEnd++] = Oper;
5051 if (Chains.
size() == 1)
5052 NewChain = Chains[0];
5055 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5060SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
5062 EVT ResVT =
N->getValueType(0);
5063 EVT OvVT =
N->getValueType(1);
5064 EVT WideResVT, WideOvVT;
5074 WideLHS = GetWidenedVector(
N->getOperand(0));
5075 WideRHS = GetWidenedVector(
N->getOperand(1));
5085 N->getOperand(0), Zero);
5088 N->getOperand(1), Zero);
5091 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
5092 SDNode *WideNode = DAG.getNode(
5093 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
5096 unsigned OtherNo = 1 - ResNo;
5097 EVT OtherVT =
N->getValueType(OtherNo);
5104 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
5107 return SDValue(WideNode, ResNo);
5120 unsigned Opcode =
N->getOpcode();
5129 InOp = ZExtPromotedInteger(InOp);
5140 InOp = GetWidenedVector(
N->getOperand(0));
5143 if (InVTEC == WidenEC) {
5144 if (
N->getNumOperands() == 1)
5145 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5146 if (
N->getNumOperands() == 3) {
5147 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5150 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5152 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5175 unsigned NumConcat =
5180 if (
N->getNumOperands() == 1)
5181 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
5182 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5187 DAG.getVectorIdxConstant(0,
DL));
5189 if (
N->getNumOperands() == 1)
5190 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
5191 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5200 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5201 for (
unsigned i=0; i < MinElts; ++i) {
5203 DAG.getVectorIdxConstant(i,
DL));
5204 if (
N->getNumOperands() == 1)
5205 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
5207 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5210 return DAG.getBuildVector(WidenVT,
DL, Ops);
5219 EVT SrcVT = Src.getValueType();
5223 Src = GetWidenedVector(Src);
5224 SrcVT = Src.getValueType();
5231 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5240 EVT SrcVT = Src.getValueType();
5244 Src = GetWidenedVector(Src);
5245 SrcVT = Src.getValueType();
5252 if (
N->getNumOperands() == 1)
5253 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5255 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5256 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5260 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5263SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5274 unsigned Opcode =
N->getOpcode();
5280 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5285 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5286 for (
unsigned i=0; i < MinElts; ++i) {
5288 DAG.getVectorIdxConstant(i,
DL));
5289 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5293 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5295 return DAG.getBuildVector(WidenVT,
DL, Ops);
5298SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5299 unsigned Opcode =
N->getOpcode();
5312 InOp = GetWidenedVector(InOp);
5319 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5326 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5328 DAG.getVectorIdxConstant(i,
DL));
5345 while (Ops.
size() != WidenNumElts)
5348 return DAG.getBuildVector(WidenVT,
DL, Ops);
5354 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5355 return WidenVecRes_BinaryCanTrap(
N);
5365SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5366 SDValue FpValue =
N->getOperand(0);
5370 SDValue Arg = GetWidenedVector(FpValue);
5371 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5377 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5379 EVT ExpVT =
RHS.getValueType();
5384 ExpOp = ModifyToType(RHS, WideExpVT);
5387 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5393 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5394 if (
N->getNumOperands() == 1)
5395 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5397 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5398 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5402 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5403 {InOp,
Mask,
N->getOperand(2)});
5409 cast<VTSDNode>(
N->getOperand(1))->getVT()
5410 .getVectorElementType(),
5412 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5413 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5414 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5417SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5418 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5419 return GetWidenedVector(WidenVec);
5424 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5425 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5427 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5428 AddrSpaceCastN->getSrcAddressSpace(),
5429 AddrSpaceCastN->getDestAddressSpace());
5435 EVT VT =
N->getValueType(0);
5439 switch (getTypeAction(InVT)) {
5453 SDValue NInOp = GetPromotedInteger(InOp);
5455 if (WidenVT.
bitsEq(NInVT)) {
5458 if (DAG.getDataLayout().isBigEndian()) {
5463 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5482 InOp = GetWidenedVector(InOp);
5484 if (WidenVT.
bitsEq(InVT))
5494 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5499 unsigned NewNumParts = WidenSize / InSize;
5512 EVT OrigInVT =
N->getOperand(0).getValueType();
5525 if (WidenSize % InSize == 0) {
5532 DAG.ExtractVectorElements(InOp, Ops);
5533 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5545 return CreateStackStoreLoad(InOp, WidenVT);
5551 EVT VT =
N->getValueType(0);
5555 EVT EltVT =
N->getOperand(0).getValueType();
5562 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5563 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5565 return DAG.getBuildVector(WidenVT, dl, NewOps);
5569 EVT InVT =
N->getOperand(0).getValueType();
5572 unsigned NumOperands =
N->getNumOperands();
5574 bool InputWidened =
false;
5578 if (WidenNumElts % NumInElts == 0) {
5580 unsigned NumConcat = WidenNumElts / NumInElts;
5581 SDValue UndefVal = DAG.getUNDEF(InVT);
5583 for (
unsigned i=0; i < NumOperands; ++i)
5584 Ops[i] =
N->getOperand(i);
5585 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5590 InputWidened =
true;
5594 for (i=1; i < NumOperands; ++i)
5595 if (!
N->getOperand(i).isUndef())
5598 if (i == NumOperands)
5601 return GetWidenedVector(
N->getOperand(0));
5603 if (NumOperands == 2) {
5605 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5611 for (
unsigned i = 0; i < NumInElts; ++i) {
5613 MaskOps[i + NumInElts] = i + WidenNumElts;
5615 return DAG.getVectorShuffle(WidenVT, dl,
5616 GetWidenedVector(
N->getOperand(0)),
5617 GetWidenedVector(
N->getOperand(1)),
5624 "Cannot use build vectors to widen CONCAT_VECTOR result");
5632 for (
unsigned i=0; i < NumOperands; ++i) {
5635 InOp = GetWidenedVector(InOp);
5636 for (
unsigned j = 0;
j < NumInElts; ++
j)
5638 DAG.getVectorIdxConstant(j, dl));
5640 SDValue UndefVal = DAG.getUNDEF(EltVT);
5641 for (;
Idx < WidenNumElts; ++
Idx)
5642 Ops[
Idx] = UndefVal;
5643 return DAG.getBuildVector(WidenVT, dl, Ops);
5646SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5647 EVT VT =
N->getValueType(0);
5649 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5656SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5657 EVT VT =
N->getValueType(0);
5664 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5666 InOp = GetWidenedVector(InOp);
5672 if (IdxVal == 0 && InVT == WidenVT)
5679 assert(IdxVal % VTNumElts == 0 &&
5680 "Expected Idx to be a multiple of subvector minimum vector length");
5681 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5694 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5695 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5696 "down type's element count");
5703 for (;
I < VTNumElts / GCD; ++
I)
5706 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5707 for (;
I < WidenNumElts / GCD; ++
I)
5714 "EXTRACT_SUBVECTOR for scalable vectors");
5721 for (i = 0; i < VTNumElts; ++i)
5723 DAG.getVectorIdxConstant(IdxVal + i, dl));
5725 SDValue UndefVal = DAG.getUNDEF(EltVT);
5726 for (; i < WidenNumElts; ++i)
5728 return DAG.getBuildVector(WidenVT, dl, Ops);
5739SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5740 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5743 N->getOperand(1),
N->getOperand(2));
5756 if (!
LD->getMemoryVT().isByteSized()) {
5760 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5769 EVT LdVT =
LD->getMemoryVT();
5780 const auto *MMO =
LD->getMemOperand();
5782 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5796 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5798 Result = GenWidenVectorLoads(LdChain, LD);
5805 if (LdChain.
size() == 1)
5806 NewChain = LdChain[0];
5812 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5823 SDValue EVL =
N->getVectorLength();
5830 "Unable to widen binary VP op");
5831 Mask = GetWidenedVector(Mask);
5832 assert(
Mask.getValueType().getVectorElementCount() ==
5835 "Unable to widen vector load");
5838 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5839 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5840 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5854 "Unable to widen VP strided load");
5855 Mask = GetWidenedVector(Mask);
5858 assert(
Mask.getValueType().getVectorElementCount() ==
5860 "Data and mask vectors should have the same number of elements");
5862 SDValue Res = DAG.getStridedLoadVP(
5863 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5864 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5865 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5866 N->isExpandingLoad());
5874SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(
SDNode *
N) {
5877 SDValue Passthru =
N->getOperand(2);
5881 Mask.getValueType().getVectorElementType(),
5884 SDValue WideVec = ModifyToType(Vec, WideVecVT);
5885 SDValue WideMask = ModifyToType(Mask, WideMaskVT,
true);
5886 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT);
5888 WideMask, WidePassthru);
5895 EVT MaskVT =
Mask.getValueType();
5896 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5904 Mask = ModifyToType(Mask, WideMaskVT,
true);
5906 SDValue Res = DAG.getMaskedLoad(
5907 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5908 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5909 ExtType,
N->isExpandingLoad());
5920 EVT MaskVT =
Mask.getValueType();
5921 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5930 Mask = ModifyToType(Mask, WideMaskVT,
true);
5935 Index.getValueType().getScalarType(),
5943 N->getMemoryVT().getScalarType(), NumElts);
5944 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5945 WideMemVT, dl, Ops,
N->getMemOperand(),
5946 N->getIndexType(),
N->getExtensionType());
5963 N->getMemoryVT().getScalarType(), WideEC);
5964 Mask = GetWidenedMask(Mask, WideEC);
5967 Mask,
N->getVectorLength()};
5968 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5969 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5979 if (
N->isVPOpcode())
5980 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0),
5981 N->getOperand(1),
N->getOperand(2));
5982 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
6010 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
6011 return N->getOperand(OpNo).getValueType();
6019 N =
N.getOperand(0);
6021 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
6022 if (!
N->getOperand(i)->isUndef())
6024 N =
N.getOperand(0);
6028 N =
N.getOperand(0);
6030 N =
N.getOperand(0);
6057 { MaskVT, MVT::Other }, Ops);
6058 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
6068 if (MaskScalarBits < ToMaskScalBits) {
6072 }
else if (MaskScalarBits > ToMaskScalBits) {
6078 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
6080 "Mask should have the right element size by now.");
6083 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
6085 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
6090 EVT SubVT =
Mask->getValueType(0);
6096 assert((
Mask->getValueType(0) == ToMaskVT) &&
6097 "A mask of ToMaskVT should have been produced by now.");
6118 EVT CondVT =
Cond->getValueType(0);
6122 EVT VSelVT =
N->getValueType(0);
6134 EVT FinalVT = VSelVT;
6146 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6164 EVT ToMaskVT = VSelVT;
6171 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6187 if (ScalarBits0 != ScalarBits1) {
6188 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6189 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6201 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6202 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6206 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6219 unsigned Opcode =
N->getOpcode();
6221 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6222 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6223 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6225 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6231 Cond1 = GetWidenedVector(Cond1);
6239 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6240 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6245 Cond1 = ModifyToType(Cond1, CondWidenVT);
6248 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6249 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6251 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6252 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6254 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6258 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6259 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6262 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6267 return DAG.getUNDEF(WidenVT);
6271 EVT VT =
N->getValueType(0);
6278 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6279 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6283 for (
unsigned i = 0; i != NumElts; ++i) {
6284 int Idx =
N->getMaskElt(i);
6285 if (
Idx < (
int)NumElts)
6290 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
6292 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6296 EVT VT =
N->getValueType(0);
6301 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6307 unsigned IdxVal = WidenNumElts - VTNumElts;
6320 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6323 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6324 "down type's element count");
6327 for (; i < VTNumElts / GCD; ++i)
6330 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6331 for (; i < WidenNumElts / GCD; ++i)
6340 for (
unsigned i = 0; i != VTNumElts; ++i) {
6341 Mask.push_back(IdxVal + i);
6343 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6346 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6351 assert(
N->getValueType(0).isVector() &&
6352 N->getOperand(0).getValueType().isVector() &&
6353 "Operands must be vectors");
6367 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6368 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6375 InOp1 = GetWidenedVector(InOp1);
6376 InOp2 = GetWidenedVector(InOp2);
6378 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6379 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6386 "Input not widened to expected type!");
6388 if (
N->getOpcode() == ISD::VP_SETCC) {
6391 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6392 N->getOperand(2), Mask,
N->getOperand(4));
6399 assert(
N->getValueType(0).isVector() &&
6400 N->getOperand(1).getValueType().isVector() &&
6401 "Operands must be vectors");
6402 EVT VT =
N->getValueType(0);
6413 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6418 for (
unsigned i = 0; i != NumElts; ++i) {
6420 DAG.getVectorIdxConstant(i, dl));
6422 DAG.getVectorIdxConstant(i, dl));
6424 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6425 {Chain, LHSElem, RHSElem, CC});
6426 Chains[i] = Scalars[i].getValue(1);
6427 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6428 DAG.getBoolConstant(
true, dl, EltVT, VT),
6429 DAG.getBoolConstant(
false, dl, EltVT, VT));
6433 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6435 return DAG.getBuildVector(WidenVT, dl, Scalars);
6441bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6442 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6446 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6449 switch (
N->getOpcode()) {
6452 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6463 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6464 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6465 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6466 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6471 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6473 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6474 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6476 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6477 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6485 Res = WidenVecOp_UnrollVectorOp(
N);
6492 Res = WidenVecOp_EXTEND(
N);
6497 Res = WidenVecOp_CMP(
N);
6513 Res = WidenVecOp_Convert(
N);
6518 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6521 case ISD::EXPERIMENTAL_VP_SPLAT:
6522 Res = WidenVecOp_VP_SPLAT(
N, OpNo);
6540 Res = WidenVecOp_VECREDUCE(
N);
6544 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6546 case ISD::VP_REDUCE_FADD:
6547 case ISD::VP_REDUCE_SEQ_FADD:
6548 case ISD::VP_REDUCE_FMUL:
6549 case ISD::VP_REDUCE_SEQ_FMUL:
6550 case ISD::VP_REDUCE_ADD:
6551 case ISD::VP_REDUCE_MUL:
6552 case ISD::VP_REDUCE_AND:
6553 case ISD::VP_REDUCE_OR:
6554 case ISD::VP_REDUCE_XOR:
6555 case ISD::VP_REDUCE_SMAX:
6556 case ISD::VP_REDUCE_SMIN:
6557 case ISD::VP_REDUCE_UMAX:
6558 case ISD::VP_REDUCE_UMIN:
6559 case ISD::VP_REDUCE_FMAX:
6560 case ISD::VP_REDUCE_FMIN:
6561 case ISD::VP_REDUCE_FMAXIMUM:
6562 case ISD::VP_REDUCE_FMINIMUM:
6563 Res = WidenVecOp_VP_REDUCE(
N);
6565 case ISD::VP_CTTZ_ELTS:
6566 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6567 Res = WidenVecOp_VP_CttzElements(
N);
6572 if (!Res.
getNode())
return false;
6580 if (
N->isStrictFPOpcode())
6582 "Invalid operand expansion");
6585 "Invalid operand expansion");
6587 ReplaceValueWith(
SDValue(
N, 0), Res);
6593 EVT VT =
N->getValueType(0);
6598 "Unexpected type action");
6599 InOp = GetWidenedVector(InOp);
6602 "Input wasn't widened!");
6613 FixedEltVT == InEltVT) {
6615 "Not enough elements in the fixed type for the operand!");
6617 "We can't have the same type as we started with!");
6620 DAG.getUNDEF(FixedVT), InOp,
6621 DAG.getVectorIdxConstant(0,
DL));
6624 DAG.getVectorIdxConstant(0,
DL));
6633 return WidenVecOp_Convert(
N);
6638 switch (
N->getOpcode()) {
6653 EVT OpVT =
N->getOperand(0).getValueType();
6654 EVT ResVT =
N->getValueType(0);
6662 DAG.getVectorIdxConstant(0, dl));
6664 DAG.getVectorIdxConstant(0, dl));
6670 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6671 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6673 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6680 return DAG.UnrollVectorOp(
N);
6685 EVT ResultVT =
N->getValueType(0);
6687 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6696 {WideArg,
Test},
N->getFlags());
6703 DAG.getVectorIdxConstant(0,
DL));
6705 EVT OpVT =
N->getOperand(0).getValueType();
6708 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6713 EVT VT =
N->getValueType(0);
6716 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6719 "Unexpected type action");
6720 InOp = GetWidenedVector(InOp);
6722 unsigned Opcode =
N->getOpcode();
6728 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6730 if (
N->isStrictFPOpcode()) {
6732 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6733 {
N->getOperand(0), InOp,
N->getOperand(2) });
6735 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6736 {
N->getOperand(0), InOp });
6742 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6744 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6747 DAG.getVectorIdxConstant(0, dl));
6755 if (
N->isStrictFPOpcode()) {
6758 for (
unsigned i=0; i < NumElts; ++i) {
6760 DAG.getVectorIdxConstant(i, dl));
6761 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6765 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6767 for (
unsigned i = 0; i < NumElts; ++i)
6768 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6770 InOp, DAG.getVectorIdxConstant(i, dl)));
6773 return DAG.getBuildVector(VT, dl, Ops);
6777 EVT DstVT =
N->getValueType(0);
6778 SDValue Src = GetWidenedVector(
N->getOperand(0));
6779 EVT SrcVT = Src.getValueType();
6788 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6791 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6795 return DAG.UnrollVectorOp(
N);
6799 EVT VT =
N->getValueType(0);
6800 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6808 if (!VT.
isVector() && VT != MVT::x86mmx &&
6815 DAG.getVectorIdxConstant(0, dl));
6829 .divideCoefficientBy(EltSize);
6834 DAG.getVectorIdxConstant(0, dl));
6839 return CreateStackStoreLoad(InOp, VT);
6843 EVT VT =
N->getValueType(0);
6845 EVT InVT =
N->getOperand(0).getValueType();
6850 unsigned NumOperands =
N->getNumOperands();
6853 for (i = 1; i < NumOperands; ++i)
6854 if (!
N->getOperand(i).isUndef())
6857 if (i == NumOperands)
6858 return GetWidenedVector(
N->getOperand(0));
6868 for (
unsigned i=0; i < NumOperands; ++i) {
6872 "Unexpected type action");
6873 InOp = GetWidenedVector(InOp);
6874 for (
unsigned j = 0;
j < NumInElts; ++
j)
6876 DAG.getVectorIdxConstant(j, dl));
6878 return DAG.getBuildVector(VT, dl, Ops);
6881SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6882 EVT VT =
N->getValueType(0);
6887 SubVec = GetWidenedVector(SubVec);
6893 bool IndicesValid =
false;
6896 IndicesValid =
true;
6900 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6901 Attribute::VScaleRange);
6906 IndicesValid =
true;
6912 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6917 "INSERT_SUBVECTOR");
6920SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6921 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6923 N->getValueType(0), InOp,
N->getOperand(1));
6926SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6927 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6929 N->getValueType(0), InOp,
N->getOperand(1));
6932SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6933 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6934 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6942 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6945 if (
ST->isTruncatingStore())
6964 StVal = GetWidenedVector(StVal);
6968 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6969 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6970 EVL, StVT,
ST->getMemOperand(),
6971 ST->getAddressingMode());
6975 if (GenWidenVectorStores(StChain, ST)) {
6976 if (StChain.
size() == 1)
6985SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(
SDNode *
N,
unsigned OpNo) {
6986 assert(OpNo == 1 &&
"Can widen only mask operand of vp_splat");
6987 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0),
6988 N->getOperand(0), GetWidenedVector(
N->getOperand(1)),
6992SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6993 assert((OpNo == 1 || OpNo == 3) &&
6994 "Can widen only data or mask operand of vp_store");
7002 StVal = GetWidenedVector(StVal);
7008 "Unable to widen VP store");
7009 Mask = GetWidenedVector(Mask);
7011 Mask = GetWidenedVector(Mask);
7017 "Unable to widen VP store");
7018 StVal = GetWidenedVector(StVal);
7021 assert(
Mask.getValueType().getVectorElementCount() ==
7023 "Mask and data vectors should have the same number of elements");
7024 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
7025 ST->getOffset(), Mask,
ST->getVectorLength(),
7026 ST->getMemoryVT(),
ST->getMemOperand(),
7027 ST->getAddressingMode(),
ST->isTruncatingStore(),
7028 ST->isCompressingStore());
7033 assert((OpNo == 1 || OpNo == 4) &&
7034 "Can widen only data or mask operand of vp_strided_store");
7043 "Unable to widen VP strided store");
7047 "Unable to widen VP strided store");
7049 StVal = GetWidenedVector(StVal);
7050 Mask = GetWidenedVector(Mask);
7053 Mask.getValueType().getVectorElementCount() &&
7054 "Data and mask vectors should have the same number of elements");
7056 return DAG.getStridedStoreVP(
7063SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
7064 assert((OpNo == 1 || OpNo == 4) &&
7065 "Can widen only data or mask operand of mstore");
7068 EVT MaskVT =
Mask.getValueType();
7074 StVal = GetWidenedVector(StVal);
7081 Mask = ModifyToType(Mask, WideMaskVT,
true);
7085 Mask = ModifyToType(Mask, WideMaskVT,
true);
7091 StVal = ModifyToType(StVal, WideVT);
7094 assert(
Mask.getValueType().getVectorNumElements() ==
7096 "Mask and data vectors should have the same number of elements");
7103SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
7104 assert(OpNo == 4 &&
"Can widen only the index of mgather");
7105 auto *MG = cast<MaskedGatherSDNode>(
N);
7106 SDValue DataOp = MG->getPassThru();
7108 SDValue Scale = MG->getScale();
7116 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
7117 MG->getMemOperand(), MG->getIndexType(),
7118 MG->getExtensionType());
7124SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
7133 DataOp = GetWidenedVector(DataOp);
7137 EVT IndexVT =
Index.getValueType();
7143 EVT MaskVT =
Mask.getValueType();
7146 Mask = ModifyToType(Mask, WideMaskVT,
true);
7151 }
else if (OpNo == 4) {
7159 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
7164SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
7173 DataOp = GetWidenedVector(DataOp);
7176 Mask = GetWidenedMask(Mask, WideEC);
7179 }
else if (OpNo == 3) {
7188 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7193 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7194 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7196 EVT VT =
N->getValueType(0);
7211 SVT, InOp0, InOp1,
N->getOperand(2));
7218 DAG.getVectorIdxConstant(0, dl));
7220 EVT OpVT =
N->getOperand(0).getValueType();
7223 return DAG.getNode(ExtendCode, dl, VT,
CC);
7233 EVT VT =
N->getValueType(0);
7235 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7242 for (
unsigned i = 0; i != NumElts; ++i) {
7244 DAG.getVectorIdxConstant(i, dl));
7246 DAG.getVectorIdxConstant(i, dl));
7248 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7249 {Chain, LHSElem, RHSElem, CC});
7250 Chains[i] = Scalars[i].getValue(1);
7251 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7252 DAG.getBoolConstant(
true, dl, EltVT, VT),
7253 DAG.getBoolConstant(
false, dl, EltVT, VT));
7257 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7259 return DAG.getBuildVector(VT, dl, Scalars);
7264 SDValue Op = GetWidenedVector(
N->getOperand(0));
7265 EVT OrigVT =
N->getOperand(0).getValueType();
7266 EVT WideVT =
Op.getValueType();
7270 unsigned Opc =
N->getOpcode();
7272 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7273 assert(NeutralElem &&
"Neutral element must exist");
7280 unsigned GCD = std::gcd(OrigElts, WideElts);
7283 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7284 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7286 DAG.getVectorIdxConstant(
Idx, dl));
7287 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7290 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7292 DAG.getVectorIdxConstant(
Idx, dl));
7294 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7304 EVT WideVT =
Op.getValueType();
7308 unsigned Opc =
N->getOpcode();
7310 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7317 unsigned GCD = std::gcd(OrigElts, WideElts);
7320 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7321 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7323 DAG.getVectorIdxConstant(
Idx, dl));
7324 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7327 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7329 DAG.getVectorIdxConstant(
Idx, dl));
7331 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7335 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7338 SDValue Op = GetWidenedVector(
N->getOperand(1));
7340 Op.getValueType().getVectorElementCount());
7342 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7343 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7351 EVT VT =
N->getValueType(0);
7362 DAG.getVectorIdxConstant(0,
DL));
7372 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7373 {Source, Mask, N->getOperand(2)},
N->getFlags());
7390 unsigned WidenEx = 0) {
7395 unsigned AlignInBits =
Align*8;
7397 EVT RetVT = WidenEltVT;
7402 if (Width == WidenEltWidth)
7413 (WidenWidth % MemVTWidth) == 0 &&
7415 (MemVTWidth <= Width ||
7416 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7417 if (MemVTWidth == WidenWidth)
7436 (WidenWidth % MemVTWidth) == 0 &&
7438 (MemVTWidth <= Width ||
7439 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7448 return std::nullopt;
7459 unsigned Start,
unsigned End) {
7460 SDLoc dl(LdOps[Start]);
7461 EVT LdTy = LdOps[Start].getValueType();
7469 for (
unsigned i = Start + 1; i !=
End; ++i) {
7470 EVT NewLdTy = LdOps[i].getValueType();
7471 if (NewLdTy != LdTy) {
7492 EVT LdVT =
LD->getMemoryVT();
7506 TypeSize WidthDiff = WidenWidth - LdWidth;
7513 std::optional<EVT> FirstVT =
7514 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7521 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7526 std::optional<EVT> NewVT = FirstVT;
7528 TypeSize NewVTWidth = FirstVTWidth;
7530 RemainingWidth -= NewVTWidth;
7537 NewVTWidth = NewVT->getSizeInBits();
7543 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7544 LD->getOriginalAlign(), MMOFlags, AAInfo);
7548 if (MemVTs.
empty()) {
7550 if (!FirstVT->isVector()) {
7557 if (FirstVT == WidenVT)
7562 unsigned NumConcat =
7565 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7566 ConcatOps[0] = LdOp;
7567 for (
unsigned i = 1; i != NumConcat; ++i)
7568 ConcatOps[i] = UndefVal;
7580 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7583 for (
EVT MemVT : MemVTs) {
7584 Align NewAlign = ScaledOffset == 0
7585 ?
LD->getOriginalAlign()
7588 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7592 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7597 if (!LdOps[0].getValueType().
isVector())
7607 EVT LdTy = LdOps[i].getValueType();
7610 for (--i; i >= 0; --i) {
7611 LdTy = LdOps[i].getValueType();
7618 ConcatOps[--
Idx] = LdOps[i];
7619 for (--i; i >= 0; --i) {
7620 EVT NewLdTy = LdOps[i].getValueType();
7621 if (NewLdTy != LdTy) {
7632 WidenOps[j] = ConcatOps[
Idx+j];
7633 for (;
j != NumOps; ++
j)
7634 WidenOps[j] = DAG.getUNDEF(LdTy);
7641 ConcatOps[--
Idx] = LdOps[i];
7652 SDValue UndefVal = DAG.getUNDEF(LdTy);
7655 for (; i !=
End-
Idx; ++i)
7656 WidenOps[i] = ConcatOps[
Idx+i];
7657 for (; i != NumOps; ++i)
7658 WidenOps[i] = UndefVal;
7670 EVT LdVT =
LD->getMemoryVT();
7683 "not yet supported");
7694 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7695 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7701 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7702 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7703 LD->getOriginalAlign(), MMOFlags, AAInfo);
7708 SDValue UndefVal = DAG.getUNDEF(EltVT);
7709 for (; i != WidenNumElts; ++i)
7712 return DAG.getBuildVector(WidenVT, dl, Ops);
7724 SDValue ValOp = GetWidenedVector(
ST->getValue());
7727 EVT StVT =
ST->getMemoryVT();
7735 "Mismatch between store and value types");
7749 std::optional<EVT> NewVT =
7754 TypeSize NewVTWidth = NewVT->getSizeInBits();
7757 StWidth -= NewVTWidth;
7758 MemVTs.
back().second++;
7762 for (
const auto &Pair : MemVTs) {
7763 EVT NewVT = Pair.first;
7764 unsigned Count = Pair.second;
7770 Align NewAlign = ScaledOffset == 0
7771 ?
ST->getOriginalAlign()
7774 DAG.getVectorIdxConstant(
Idx, dl));
7775 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7780 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7792 DAG.getVectorIdxConstant(
Idx++, dl));
7794 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7798 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7812 bool FillWithZeroes) {
7817 "input and widen element type must match");
7819 "cannot modify scalable vectors in this way");
7831 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7834 for (
unsigned i = 1; i != NumConcat; ++i)
7842 DAG.getVectorIdxConstant(0, dl));
7845 "Scalable vectors should have been handled already.");
7853 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7857 DAG.getVectorIdxConstant(
Idx, dl));
7859 SDValue UndefVal = DAG.getUNDEF(EltVT);
7860 for (;
Idx < WidenNumElts; ++
Idx)
7861 Ops[
Idx] = UndefVal;
7863 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7864 if (!FillWithZeroes)
7868 "We expect to never want to FillWithZeroes for non-integral types.");
7871 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7872 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7874 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7875 DAG.getBuildVector(NVT, dl,
MaskOps));
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.