35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
125 R = ScalarizeVecRes_UnaryOp(
N);
128 R = ScalarizeVecRes_ADDRSPACECAST(
N);
131 R = ScalarizeVecRes_FFREXP(
N, ResNo);
180 R = ScalarizeVecRes_BinOp(
N);
185 R = ScalarizeVecRes_CMP(
N);
191 R = ScalarizeVecRes_TernaryOp(
N);
194#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
195 case ISD::STRICT_##DAGN:
196#include "llvm/IR/ConstrainedOps.def"
197 R = ScalarizeVecRes_StrictFPOp(
N);
202 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
211 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
221 R = ScalarizeVecRes_FIX(
N);
227 SetScalarizedVector(
SDValue(
N, ResNo), R);
231 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
232 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
234 LHS.getValueType(), LHS, RHS,
N->getFlags());
242 if (getTypeAction(
LHS.getValueType()) ==
244 LHS = GetScalarizedVector(LHS);
245 RHS = GetScalarizedVector(RHS);
247 EVT VT =
LHS.getValueType().getVectorElementType();
255 N->getValueType(0).getVectorElementType(), LHS, RHS);
259 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
260 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
261 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
267 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
268 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
274SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
275 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
276 "Unexpected vector type!");
277 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
279 EVT VT0 =
N->getValueType(0);
280 EVT VT1 =
N->getValueType(1);
285 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
289 unsigned OtherNo = 1 - ResNo;
290 EVT OtherVT =
N->getValueType(OtherNo);
292 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
296 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
299 return SDValue(ScalarNode, ResNo);
303 EVT VT =
N->getValueType(0).getVectorElementType();
304 unsigned NumOpers =
N->getNumOperands();
306 EVT ValueVTs[] = {VT, MVT::Other};
315 for (
unsigned i = 1; i < NumOpers; ++i) {
321 Oper = GetScalarizedVector(Oper);
332 Opers,
N->getFlags());
343 EVT ResVT =
N->getValueType(0);
344 EVT OvVT =
N->getValueType(1);
348 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
349 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
354 ScalarLHS = ElemsLHS[0];
355 ScalarRHS = ElemsRHS[0];
361 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
365 unsigned OtherNo = 1 - ResNo;
366 EVT OtherVT =
N->getValueType(OtherNo);
368 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
372 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
375 return SDValue(ScalarNode, ResNo);
380 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
381 return GetScalarizedVector(
Op);
386 if (
Op.getValueType().isVector()
387 &&
Op.getValueType().getVectorNumElements() == 1
388 && !isSimpleLegalType(
Op.getValueType()))
389 Op = GetScalarizedVector(
Op);
390 EVT NewVT =
N->getValueType(0).getVectorElementType();
395SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
396 EVT EltVT =
N->getValueType(0).getVectorElementType();
405SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
407 N->getValueType(0).getVectorElementType(),
408 N->getOperand(0),
N->getOperand(1));
414 EVT OpVT =
Op.getValueType();
418 Op = GetScalarizedVector(
Op);
425 N->getValueType(0).getVectorElementType(),
Op,
430 SDValue Op = GetScalarizedVector(
N->getOperand(0));
435SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
439 EVT EltVT =
N->getValueType(0).getVectorElementType();
440 if (
Op.getValueType() != EltVT)
447 assert(
N->isUnindexed() &&
"Indexed vector load?");
451 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
452 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
453 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
454 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
464 EVT DestVT =
N->getValueType(0).getVectorElementType();
466 EVT OpVT =
Op.getValueType();
476 Op = GetScalarizedVector(
Op);
486 EVT EltVT =
N->getValueType(0).getVectorElementType();
488 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
497 EVT OpVT =
Op.getValueType();
499 EVT EltVT =
N->getValueType(0).getVectorElementType();
502 Op = GetScalarizedVector(
Op);
508 switch (
N->getOpcode()) {
520SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
521 EVT DestVT =
N->getValueType(0).getVectorElementType();
523 EVT OpVT =
Op.getValueType();
533 Op = GetScalarizedVector(
Op);
539 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
540 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
541 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
545SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
548 EVT EltVT =
N->getValueType(0).getVectorElementType();
557 EVT OpVT =
Cond.getValueType();
570 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
584 EVT OpVT =
Cond->getOperand(0).getValueType();
591 EVT CondVT =
Cond.getValueType();
592 if (ScalarBool != VecBool) {
593 switch (ScalarBool) {
614 auto BoolVT = getSetCCResultType(CondVT);
615 if (BoolVT.bitsLT(CondVT))
620 GetScalarizedVector(
N->getOperand(2)));
624 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
626 LHS.getValueType(),
N->getOperand(0), LHS,
627 GetScalarizedVector(
N->getOperand(2)));
631 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
633 N->getOperand(0),
N->getOperand(1),
634 LHS, GetScalarizedVector(
N->getOperand(3)),
639 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
642SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
644 SDValue Arg =
N->getOperand(2).getOperand(0);
646 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
647 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
648 return GetScalarizedVector(
N->getOperand(
Op));
651SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
653 EVT SrcVT = Src.getValueType();
658 Src = GetScalarizedVector(Src);
664 EVT DstVT =
N->getValueType(0).getVectorElementType();
665 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
669 assert(
N->getValueType(0).isVector() &&
670 N->getOperand(0).getValueType().isVector() &&
671 "Operand types must be vectors");
674 EVT OpVT =
LHS.getValueType();
675 EVT NVT =
N->getValueType(0).getVectorElementType();
680 LHS = GetScalarizedVector(LHS);
681 RHS = GetScalarizedVector(RHS);
697 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
705 EVT ResultVT =
N->getValueType(0).getVectorElementType();
708 Arg = GetScalarizedVector(Arg);
721 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
728bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
733 switch (
N->getOpcode()) {
736 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
743 Res = ScalarizeVecOp_BITCAST(
N);
755 Res = ScalarizeVecOp_UnaryOp(
N);
761 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
764 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
767 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
770 Res = ScalarizeVecOp_VSELECT(
N);
773 Res = ScalarizeVecOp_VSETCC(
N);
776 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
779 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
782 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
785 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
788 Res = ScalarizeVecOp_FP_EXTEND(
N);
805 Res = ScalarizeVecOp_VECREDUCE(
N);
809 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
813 Res = ScalarizeVecOp_CMP(
N);
818 if (!Res.
getNode())
return false;
826 "Invalid operand expansion");
828 ReplaceValueWith(
SDValue(
N, 0), Res);
835 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
837 N->getValueType(0), Elt);
843 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
844 "Unexpected vector type!");
845 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
847 N->getValueType(0).getScalarType(), Elt);
855SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
856 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
857 "Unexpected vector type!");
858 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
860 {
N->getValueType(0).getScalarType(), MVT::Other },
861 {
N->getOperand(0), Elt });
871 ReplaceValueWith(
SDValue(
N, 0), Res);
876SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
878 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
879 Ops[i] = GetScalarizedVector(
N->getOperand(i));
885SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
886 EVT VT =
N->getValueType(0);
887 SDValue Res = GetScalarizedVector(
N->getOperand(0));
899 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
900 EVT VT =
N->getValueType(0);
910 assert(
N->getValueType(0).isVector() &&
911 N->getOperand(0).getValueType().isVector() &&
912 "Operand types must be vectors");
913 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
915 EVT VT =
N->getValueType(0);
916 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
917 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
919 EVT OpVT =
N->getOperand(0).getValueType();
931 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
939 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
940 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
943 if (
N->isTruncatingStore())
945 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
946 N->getBasePtr(),
N->getPointerInfo(),
947 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
948 N->getMemOperand()->getFlags(),
N->getAAInfo());
950 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
951 N->getBasePtr(),
N->getPointerInfo(),
952 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
958SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
959 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
960 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
962 N->getValueType(0).getVectorElementType(), Elt,
967SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
969 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
970 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
974 {
N->getOperand(0), Elt,
N->getOperand(2) });
983 ReplaceValueWith(
SDValue(
N, 0), Res);
990 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
992 N->getValueType(0).getVectorElementType(), Elt);
998SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
999 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1003 {
N->getOperand(0), Elt});
1012 ReplaceValueWith(
SDValue(
N, 0), Res);
1017 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1024SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1030 SDValue Op = GetScalarizedVector(VecOp);
1032 AccOp,
Op,
N->getFlags());
1036 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1037 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1039 EVT ResVT =
N->getValueType(0).getVectorElementType();
1052void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1057 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1060 switch (
N->getOpcode()) {
1063 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1075 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1090 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1093 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1097 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1100 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1102 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1103 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1106 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1109 case ISD::VP_GATHER:
1110 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1114 SplitVecRes_SETCC(
N,
Lo,
Hi);
1117 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1120 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1123 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1126 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1129 SplitVecRes_VECTOR_INTERLEAVE(
N);
1132 SplitVecRes_VAARG(
N,
Lo,
Hi);
1138 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1144 case ISD::VP_BITREVERSE:
1152 case ISD::VP_CTLZ_ZERO_UNDEF:
1154 case ISD::VP_CTTZ_ZERO_UNDEF:
1169 case ISD::VP_FFLOOR:
1174 case ISD::VP_FNEARBYINT:
1179 case ISD::VP_FP_EXTEND:
1181 case ISD::VP_FP_ROUND:
1183 case ISD::VP_FP_TO_SINT:
1185 case ISD::VP_FP_TO_UINT:
1191 case ISD::VP_LLRINT:
1193 case ISD::VP_FROUND:
1195 case ISD::VP_FROUNDEVEN:
1202 case ISD::VP_FROUNDTOZERO:
1204 case ISD::VP_SINT_TO_FP:
1206 case ISD::VP_TRUNCATE:
1208 case ISD::VP_UINT_TO_FP:
1210 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1213 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1216 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1222 case ISD::VP_SIGN_EXTEND:
1223 case ISD::VP_ZERO_EXTEND:
1224 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1241 case ISD::VP_FMINNUM:
1244 case ISD::VP_FMAXNUM:
1246 case ISD::VP_FMINIMUM:
1248 case ISD::VP_FMAXIMUM:
1254 case ISD::OR:
case ISD::VP_OR:
1274 case ISD::VP_FCOPYSIGN:
1275 SplitVecRes_BinOp(
N,
Lo,
Hi);
1282 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1286 SplitVecRes_CMP(
N,
Lo,
Hi);
1289#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1290 case ISD::STRICT_##DAGN:
1291#include "llvm/IR/ConstrainedOps.def"
1292 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1297 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1306 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1316 SplitVecRes_FIX(
N,
Lo,
Hi);
1318 case ISD::EXPERIMENTAL_VP_REVERSE:
1319 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1328void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1337 DL,
Ptr.getValueType(),
1338 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1340 Flags.setNoUnsignedWrap(
true);
1342 *ScaledOffset += IncrementSize;
1346 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1352std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1353 return SplitMask(Mask,
SDLoc(Mask));
1356std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1359 EVT MaskVT =
Mask.getValueType();
1361 GetSplitVector(Mask, MaskLo, MaskHi);
1364 return std::make_pair(MaskLo, MaskHi);
1369 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1371 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1375 unsigned Opcode =
N->getOpcode();
1376 if (
N->getNumOperands() == 2) {
1382 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1383 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1386 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1389 std::tie(EVLLo, EVLHi) =
1390 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1393 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1395 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1401 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1403 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1405 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1409 unsigned Opcode =
N->getOpcode();
1410 if (
N->getNumOperands() == 3) {
1416 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1417 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1420 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1423 std::tie(EVLLo, EVLHi) =
1424 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1427 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1429 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1439 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1441 GetSplitVector(LHS, LHSLo, LHSHi);
1442 GetSplitVector(RHS, RHSLo, RHSHi);
1444 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1445 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1448 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1449 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1450 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1455 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1457 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1461 unsigned Opcode =
N->getOpcode();
1480 switch (getTypeAction(InVT)) {
1495 GetExpandedOp(InOp,
Lo,
Hi);
1506 GetSplitVector(InOp,
Lo,
Hi);
1527 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1550 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1552 unsigned NumSubvectors =
N->getNumOperands() / 2;
1553 if (NumSubvectors == 1) {
1554 Lo =
N->getOperand(0);
1555 Hi =
N->getOperand(1);
1569void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1591 GetSplitVector(Vec,
Lo,
Hi);
1594 EVT LoVT =
Lo.getValueType();
1603 unsigned IdxVal =
Idx->getAsZExtVal();
1604 if (IdxVal + SubElems <= LoElems) {
1612 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1638 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1642 auto *
Load = cast<LoadSDNode>(
Lo);
1644 IncrementPointer(Load, LoVT, MPI, StackPtr);
1647 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1656 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1661 EVT RHSVT =
RHS.getValueType();
1664 GetSplitVector(RHS, RHSLo, RHSHi);
1681 SDValue FpValue =
N->getOperand(0);
1683 GetSplitVector(FpValue, ArgLo, ArgHi);
1696 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1700 std::tie(LoVT, HiVT) =
1711 unsigned Opcode =
N->getOpcode();
1718 GetSplitVector(N0, InLo, InHi);
1725 EVT OutLoVT, OutHiVT;
1728 assert((2 * OutNumElements) <= InNumElements &&
1729 "Illegal extend vector in reg split");
1739 for (
unsigned i = 0; i != OutNumElements; ++i)
1740 SplitHi[i] = i + OutNumElements;
1743 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1744 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1749 unsigned NumOps =
N->getNumOperands();
1763 for (
unsigned i = 1; i < NumOps; ++i) {
1768 EVT InVT =
Op.getValueType();
1773 GetSplitVector(
Op, OpLo, OpHi);
1782 EVT LoValueVTs[] = {LoVT, MVT::Other};
1783 EVT HiValueVTs[] = {HiVT, MVT::Other};
1792 Lo.getValue(1),
Hi.getValue(1));
1796 ReplaceValueWith(
SDValue(
N, 1), Chain);
1799SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1801 EVT VT =
N->getValueType(0);
1812 else if (NE > ResNE)
1816 EVT ChainVTs[] = {EltVT, MVT::Other};
1820 for (i = 0; i !=
NE; ++i) {
1822 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1823 SDValue Operand =
N->getOperand(j);
1834 Scalar.getNode()->setFlags(
N->getFlags());
1842 for (; i < ResNE; ++i)
1847 ReplaceValueWith(
SDValue(
N, 1), Chain);
1854void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1857 EVT ResVT =
N->getValueType(0);
1858 EVT OvVT =
N->getValueType(1);
1859 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1863 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1865 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1866 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1872 unsigned Opcode =
N->getOpcode();
1884 unsigned OtherNo = 1 - ResNo;
1885 EVT OtherVT =
N->getValueType(OtherNo);
1887 SetSplitVector(
SDValue(
N, OtherNo),
1893 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1897void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1903 GetSplitVector(Vec,
Lo,
Hi);
1906 unsigned IdxVal = CIdx->getZExtValue();
1907 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1908 if (IdxVal < LoNumElts) {
1910 Lo.getValueType(),
Lo, Elt,
Idx);
1956 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1959 auto Load = cast<LoadSDNode>(
Lo);
1961 IncrementPointer(Load, LoVT, MPI, StackPtr);
1963 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1967 if (LoVT !=
Lo.getValueType())
1969 if (HiVT !=
Hi.getValueType())
1977 assert(
N->getValueType(0).isScalableVector() &&
1978 "Only scalable vectors are supported for STEP_VECTOR");
2001 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2021 EVT MemoryVT =
LD->getMemoryVT();
2025 EVT LoMemVT, HiMemVT;
2032 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2037 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2041 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2044 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2053 ReplaceValueWith(
SDValue(LD, 1), Ch);
2058 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2067 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2068 Align Alignment =
LD->getOriginalAlign();
2071 EVT MemoryVT =
LD->getMemoryVT();
2073 EVT LoMemVT, HiMemVT;
2074 bool HiIsEmpty =
false;
2075 std::tie(LoMemVT, HiMemVT) =
2081 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2084 GetSplitVector(Mask, MaskLo, MaskHi);
2086 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2091 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2100 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2109 LD->isExpandingLoad());
2115 MPI =
LD->getPointerInfo().getWithOffset(
2120 Alignment,
LD->getAAInfo(),
LD->getRanges());
2123 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2124 LD->isExpandingLoad());
2134 ReplaceValueWith(
SDValue(LD, 1), Ch);
2140 "Indexed VP strided load during type legalization!");
2142 "Unexpected indexed variable-length load offset");
2149 EVT LoMemVT, HiMemVT;
2150 bool HiIsEmpty =
false;
2151 std::tie(LoMemVT, HiMemVT) =
2157 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2160 GetSplitVector(Mask, LoMask, HiMask);
2166 std::tie(LoEVL, HiEVL) =
2204 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2215 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2228 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2237 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2240 GetSplitVector(Mask, MaskLo, MaskHi);
2242 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2246 EVT LoMemVT, HiMemVT;
2247 bool HiIsEmpty =
false;
2248 std::tie(LoMemVT, HiMemVT) =
2251 SDValue PassThruLo, PassThruHi;
2253 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2255 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2298 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2315 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2316 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2318 auto *VPSC = cast<VPGatherSDNode>(
N);
2319 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2322 EVT MemoryVT =
N->getMemoryVT();
2323 Align Alignment =
N->getOriginalAlign();
2327 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2328 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2330 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2333 EVT LoMemVT, HiMemVT;
2338 if (getTypeAction(Ops.Index.getValueType()) ==
2340 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2342 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2349 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2350 SDValue PassThru = MGT->getPassThru();
2351 SDValue PassThruLo, PassThruHi;
2354 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2356 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2361 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2363 OpsLo, MMO, IndexTy, ExtType);
2365 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2367 OpsHi, MMO, IndexTy, ExtType);
2369 auto *VPGT = cast<VPGatherSDNode>(
N);
2371 std::tie(EVLLo, EVLHi) =
2372 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2374 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2376 MMO, VPGT->getIndexType());
2378 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2380 MMO, VPGT->getIndexType());
2390 ReplaceValueWith(
SDValue(
N, 1), Ch);
2394 assert(
N->getValueType(0).isVector() &&
2395 N->getOperand(0).getValueType().isVector() &&
2396 "Operand types must be vectors");
2404 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2406 GetSplitVector(
N->getOperand(0), LL, LH);
2410 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2412 GetSplitVector(
N->getOperand(1), RL, RH);
2417 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2418 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2420 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2421 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2422 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2423 std::tie(EVLLo, EVLHi) =
2424 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2425 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2427 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2441 EVT InVT =
N->getOperand(0).getValueType();
2443 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2448 unsigned Opcode =
N->getOpcode();
2449 if (
N->getNumOperands() <= 2) {
2451 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2452 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2460 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2461 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2464 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2467 std::tie(EVLLo, EVLHi) =
2468 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2481 EVT InVT =
N->getOperand(0).getValueType();
2483 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2487 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2488 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2489 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2494void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2502 EVT InVT =
N->getOperand(0).getValueType();
2504 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2508 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2509 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2510 Lo->setFlags(
N->getFlags());
2511 Hi->setFlags(
N->getFlags());
2517 unsigned OtherNo = 1 - ResNo;
2518 EVT OtherVT =
N->getValueType(OtherNo);
2526 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2533 EVT SrcVT =
N->getOperand(0).getValueType();
2534 EVT DestVT =
N->getValueType(0);
2557 EVT SplitLoVT, SplitHiVT;
2561 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2562 N->dump(&DAG);
dbgs() <<
"\n");
2563 if (!
N->isVPOpcode()) {
2566 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2577 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2578 N->getOperand(1),
N->getOperand(2));
2583 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2586 std::tie(EVLLo, EVLHi) =
2587 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2589 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2590 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2595 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2603 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2604 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2610 return N.getResNo() == 0 &&
2614 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2619 "Expected build vector node.");
2622 for (
unsigned I = 0;
I < NewElts; ++
I) {
2627 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2629 Ops[
I] = Input1.getOperand(
Idx);
2631 if (Ops[
I].getValueType().bitsGT(EltVT))
2634 return DAG.getBuildVector(NewVT,
DL, Ops);
2642 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2646 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2648 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2657 for (
auto &
P : ShufflesIdxs) {
2658 if (
P.second.size() < 2)
2662 for (
int &
Idx : Mask) {
2665 unsigned SrcRegIdx =
Idx / NewElts;
2666 if (Inputs[SrcRegIdx].
isUndef()) {
2671 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2674 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2679 Idx = MaskElt % NewElts +
2680 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2686 Inputs[
P.second[0]] =
P.first.first;
2687 Inputs[
P.second[1]] =
P.first.second;
2690 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2694 for (
int &
Idx : Mask) {
2697 unsigned SrcRegIdx =
Idx / NewElts;
2698 if (Inputs[SrcRegIdx].
isUndef()) {
2703 getTypeAction(Inputs[SrcRegIdx].getValueType());
2705 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2706 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2709 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2711 if (UsedSubVector.count() > 1) {
2713 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2714 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2716 if (Pairs.
empty() || Pairs.
back().size() == 2)
2718 if (UsedSubVector.test(2 *
I)) {
2719 Pairs.
back().emplace_back(
I, 0);
2721 assert(UsedSubVector.test(2 *
I + 1) &&
2722 "Expected to be used one of the subvectors.");
2723 Pairs.
back().emplace_back(
I, 1);
2726 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2728 for (
int &
Idx : Mask) {
2731 unsigned SrcRegIdx =
Idx / NewElts;
2733 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2734 return Idxs.front().first == SrcRegIdx ||
2735 Idxs.back().first == SrcRegIdx;
2737 if (It == Pairs.
end())
2739 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2740 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2743 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2744 Inputs[Idxs.front().first] = DAG.
getNode(
2746 Inputs[Idxs.front().first].getValueType(),
2747 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2748 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2757 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2758 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2761 if (Shuffle->getOperand(0).getValueType() != NewVT)
2764 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2765 !Shuffle->isSplat()) {
2767 }
else if (!Inputs[
I].hasOneUse() &&
2768 !Shuffle->getOperand(1).isUndef()) {
2770 for (
int &
Idx : Mask) {
2773 unsigned SrcRegIdx =
Idx / NewElts;
2776 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2781 int OpIdx = MaskElt / NewElts;
2794 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2795 if (Shuffle->getOperand(OpIdx).isUndef())
2797 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2798 if (It == std::end(Inputs))
2800 int FoundOp = std::distance(std::begin(Inputs), It);
2803 for (
int &
Idx : Mask) {
2806 unsigned SrcRegIdx =
Idx / NewElts;
2809 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2814 int MaskIdx = MaskElt / NewElts;
2815 if (OpIdx == MaskIdx)
2816 Idx = MaskElt % NewElts + FoundOp * NewElts;
2819 Op = (OpIdx + 1) % 2;
2827 for (
int &
Idx : Mask) {
2830 unsigned SrcRegIdx =
Idx / NewElts;
2833 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2834 int OpIdx = MaskElt / NewElts;
2837 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2843 TryPeekThroughShufflesInputs(OrigMask);
2845 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2849 for (
const auto &
I : Inputs) {
2851 UniqueConstantInputs.
insert(
I);
2852 else if (!
I.isUndef())
2857 if (UniqueInputs.
size() != std::size(Inputs)) {
2858 auto &&UniqueVec = UniqueInputs.
takeVector();
2859 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2860 unsigned ConstNum = UniqueConstantVec.size();
2861 for (
int &
Idx : Mask) {
2864 unsigned SrcRegIdx =
Idx / NewElts;
2865 if (Inputs[SrcRegIdx].
isUndef()) {
2869 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2870 if (It != UniqueConstantVec.end()) {
2872 NewElts * std::distance(UniqueConstantVec.begin(), It);
2873 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2876 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2877 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2879 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2880 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2882 copy(UniqueConstantVec, std::begin(Inputs));
2883 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2886 MakeUniqueInputs(OrigMask);
2888 copy(Inputs, std::begin(OrigInputs));
2894 unsigned FirstMaskIdx =
High * NewElts;
2897 assert(!Output &&
"Expected default initialized initial value.");
2898 TryPeekThroughShufflesInputs(Mask);
2899 MakeUniqueInputs(Mask);
2901 copy(Inputs, std::begin(TmpInputs));
2904 bool SecondIteration =
false;
2905 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2910 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2911 SecondIteration =
true;
2912 return SecondIteration;
2915 Mask, std::size(Inputs), std::size(Inputs),
2917 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2918 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2921 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2923 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2924 DAG.getUNDEF(NewVT), Mask);
2925 Inputs[
Idx] = Output;
2927 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2930 if (AccumulateResults(Idx1)) {
2933 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2935 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2936 Inputs[Idx2], Mask);
2940 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2942 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2943 TmpInputs[Idx2], Mask);
2945 Inputs[Idx1] = Output;
2947 copy(OrigInputs, std::begin(Inputs));
2952 EVT OVT =
N->getValueType(0);
2959 const Align Alignment =
2960 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2962 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2963 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2964 Chain =
Hi.getValue(1);
2968 ReplaceValueWith(
SDValue(
N, 1), Chain);
2973 EVT DstVTLo, DstVTHi;
2974 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2978 EVT SrcVT =
N->getOperand(0).getValueType();
2980 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2982 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2984 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2985 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2991 GetSplitVector(
N->getOperand(0), InLo, InHi);
3003 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
3008 EVT VT =
N->getValueType(0);
3015 Align Alignment = DAG.getReducedAlign(VT,
false);
3021 auto &MF = DAG.getMachineFunction();
3035 DAG.getConstant(1,
DL, PtrVT));
3037 DAG.getConstant(EltWidth,
DL, PtrVT));
3039 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3041 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3042 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3043 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3046 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3048 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3051void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3053 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3054 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3055 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3059 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3061 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3067void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3068 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3069 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3070 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3074 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3076 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3078 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3079 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3090bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3095 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3098 switch (
N->getOpcode()) {
3101 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3110 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3116 case ISD::VP_TRUNCATE:
3118 Res = SplitVecOp_TruncateHelper(
N);
3121 case ISD::VP_FP_ROUND:
3125 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3128 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3130 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3131 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3134 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3137 case ISD::VP_SCATTER:
3138 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3141 case ISD::VP_GATHER:
3142 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3145 Res = SplitVecOp_VSELECT(
N, OpNo);
3151 case ISD::VP_SINT_TO_FP:
3152 case ISD::VP_UINT_TO_FP:
3153 if (
N->getValueType(0).bitsLT(
3154 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3155 Res = SplitVecOp_TruncateHelper(
N);
3157 Res = SplitVecOp_UnaryOp(
N);
3161 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3165 case ISD::VP_FP_TO_SINT:
3166 case ISD::VP_FP_TO_UINT:
3177 Res = SplitVecOp_UnaryOp(
N);
3180 Res = SplitVecOp_FPOpDifferentTypes(
N);
3185 Res = SplitVecOp_CMP(
N);
3191 Res = SplitVecOp_ExtVecInRegOp(
N);
3209 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3213 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3215 case ISD::VP_REDUCE_FADD:
3216 case ISD::VP_REDUCE_SEQ_FADD:
3217 case ISD::VP_REDUCE_FMUL:
3218 case ISD::VP_REDUCE_SEQ_FMUL:
3219 case ISD::VP_REDUCE_ADD:
3220 case ISD::VP_REDUCE_MUL:
3221 case ISD::VP_REDUCE_AND:
3222 case ISD::VP_REDUCE_OR:
3223 case ISD::VP_REDUCE_XOR:
3224 case ISD::VP_REDUCE_SMAX:
3225 case ISD::VP_REDUCE_SMIN:
3226 case ISD::VP_REDUCE_UMAX:
3227 case ISD::VP_REDUCE_UMIN:
3228 case ISD::VP_REDUCE_FMAX:
3229 case ISD::VP_REDUCE_FMIN:
3230 case ISD::VP_REDUCE_FMAXIMUM:
3231 case ISD::VP_REDUCE_FMINIMUM:
3232 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3234 case ISD::VP_CTTZ_ELTS:
3235 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3236 Res = SplitVecOp_VP_CttzElements(
N);
3241 if (!Res.
getNode())
return false;
3248 if (
N->isStrictFPOpcode())
3250 "Invalid operand expansion");
3253 "Invalid operand expansion");
3255 ReplaceValueWith(
SDValue(
N, 0), Res);
3259SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3262 assert(OpNo == 0 &&
"Illegal operand must be mask");
3269 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3272 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3273 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3274 "Lo and Hi have differing types");
3277 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3278 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3280 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3281 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3282 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3283 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3293SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3294 EVT ResVT =
N->getValueType(0);
3298 SDValue VecOp =
N->getOperand(OpNo);
3300 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3301 GetSplitVector(VecOp,
Lo,
Hi);
3303 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3309 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3313 EVT ResVT =
N->getValueType(0);
3322 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3323 GetSplitVector(VecOp,
Lo,
Hi);
3325 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3331 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3334SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3335 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3336 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3338 unsigned Opc =
N->getOpcode();
3339 EVT ResVT =
N->getValueType(0);
3343 SDValue VecOp =
N->getOperand(OpNo);
3345 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3346 GetSplitVector(VecOp,
Lo,
Hi);
3349 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3352 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3357 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3358 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3363 EVT ResVT =
N->getValueType(0);
3366 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3367 EVT InVT =
Lo.getValueType();
3372 if (
N->isStrictFPOpcode()) {
3373 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3374 { N->getOperand(0), Lo });
3375 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3376 { N->getOperand(0), Hi });
3385 ReplaceValueWith(
SDValue(
N, 1), Ch);
3386 }
else if (
N->getNumOperands() == 3) {
3387 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3388 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3389 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3390 std::tie(EVLLo, EVLHi) =
3391 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3392 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3393 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3395 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3396 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3406 EVT ResVT =
N->getValueType(0);
3408 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3412 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3418 Lo = BitConvertToInteger(
Lo);
3419 Hi = BitConvertToInteger(
Hi);
3421 if (DAG.getDataLayout().isBigEndian())
3429 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3431 EVT ResVT =
N->getValueType(0);
3439 GetSplitVector(SubVec,
Lo,
Hi);
3442 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3448 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3450 return SecondInsertion;
3453SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3455 EVT SubVT =
N->getValueType(0);
3460 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3462 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3465 if (IdxVal < LoEltsMin) {
3467 "Extracted subvector crosses vector split!");
3470 N->getOperand(0).getValueType().isScalableVector())
3472 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3477 "Extracting scalable subvector from fixed-width unsupported");
3485 "subvector from a scalable predicate vector");
3491 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3493 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3494 auto &MF = DAG.getMachineFunction();
3498 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3505 SubVT, dl, Store, StackPtr,
3509SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3518 GetSplitVector(Vec,
Lo,
Hi);
3520 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3522 if (IdxVal < LoElts)
3526 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3527 Idx.getValueType())), 0);
3531 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3543 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3549 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3551 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3552 auto &MF = DAG.getMachineFunction();
3555 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3563 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3565 return DAG.getExtLoad(
3576 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3584 SplitVecRes_Gather(
N,
Lo,
Hi);
3587 ReplaceValueWith(
SDValue(
N, 0), Res);
3592 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3596 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3598 SDValue EVL =
N->getVectorLength();
3600 Align Alignment =
N->getOriginalAlign();
3606 GetSplitVector(
Data, DataLo, DataHi);
3608 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3613 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3616 GetSplitVector(Mask, MaskLo, MaskHi);
3618 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3621 EVT MemoryVT =
N->getMemoryVT();
3622 EVT LoMemVT, HiMemVT;
3623 bool HiIsEmpty =
false;
3624 std::tie(LoMemVT, HiMemVT) =
3625 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3629 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3637 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3638 N->getAddressingMode(),
N->isTruncatingStore(),
3639 N->isCompressingStore());
3646 N->isCompressingStore());
3654 MPI =
N->getPointerInfo().getWithOffset(
3657 MMO = DAG.getMachineFunction().getMachineMemOperand(
3659 Alignment,
N->getAAInfo(),
N->getRanges());
3661 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3662 N->getAddressingMode(),
N->isTruncatingStore(),
3663 N->isCompressingStore());
3672 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3673 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3680 GetSplitVector(
Data, LoData, HiData);
3682 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3684 EVT LoMemVT, HiMemVT;
3685 bool HiIsEmpty =
false;
3686 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3692 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3693 else if (getTypeAction(
Mask.getValueType()) ==
3695 GetSplitVector(Mask, LoMask, HiMask);
3697 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3700 std::tie(LoEVL, HiEVL) =
3701 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3705 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3706 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3707 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3718 EVT PtrVT =
N->getBasePtr().getValueType();
3721 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3724 Align Alignment =
N->getOriginalAlign();
3732 Alignment,
N->getAAInfo(),
N->getRanges());
3735 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3736 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3737 N->isCompressingStore());
3746 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3750 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3753 Align Alignment =
N->getOriginalAlign();
3759 GetSplitVector(
Data, DataLo, DataHi);
3761 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3766 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3769 GetSplitVector(Mask, MaskLo, MaskHi);
3771 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3774 EVT MemoryVT =
N->getMemoryVT();
3775 EVT LoMemVT, HiMemVT;
3776 bool HiIsEmpty =
false;
3777 std::tie(LoMemVT, HiMemVT) =
3778 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3786 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3787 N->getAddressingMode(),
N->isTruncatingStore(),
3788 N->isCompressingStore());
3797 N->isCompressingStore());
3805 MPI =
N->getPointerInfo().getWithOffset(
3808 MMO = DAG.getMachineFunction().getMachineMemOperand(
3810 Alignment,
N->getAAInfo(),
N->getRanges());
3812 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3813 N->getAddressingMode(),
N->isTruncatingStore(),
3814 N->isCompressingStore());
3827 EVT MemoryVT =
N->getMemoryVT();
3828 Align Alignment =
N->getOriginalAlign();
3836 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3837 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3840 auto *VPSC = cast<VPScatterSDNode>(
N);
3841 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3846 EVT LoMemVT, HiMemVT;
3847 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3852 GetSplitVector(Ops.Data, DataLo, DataHi);
3854 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3858 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3859 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3861 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3865 if (getTypeAction(Ops.Index.getValueType()) ==
3867 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3869 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3877 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3878 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3880 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3881 MSC->getIndexType(), MSC->isTruncatingStore());
3886 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3887 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3888 MMO, MSC->getIndexType(),
3889 MSC->isTruncatingStore());
3891 auto *VPSC = cast<VPScatterSDNode>(
N);
3893 std::tie(EVLLo, EVLHi) =
3894 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3896 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3897 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3898 VPSC->getIndexType());
3903 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3904 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3905 VPSC->getIndexType());
3909 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3910 assert(OpNo == 1 &&
"Can only split the stored value");
3913 bool isTruncating =
N->isTruncatingStore();
3916 EVT MemoryVT =
N->getMemoryVT();
3917 Align Alignment =
N->getOriginalAlign();
3921 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3923 EVT LoMemVT, HiMemVT;
3924 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3931 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3932 Alignment, MMOFlags, AAInfo);
3934 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3938 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3941 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3942 HiMemVT, Alignment, MMOFlags, AAInfo);
3944 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3958 EVT EltVT =
N->getValueType(0).getVectorElementType();
3960 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3963 DAG.getVectorIdxConstant(i,
DL)));
3967 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3988 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3989 SDValue InVec =
N->getOperand(OpNo);
3991 EVT OutVT =
N->getValueType(0);
3999 EVT LoOutVT, HiOutVT;
4000 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4001 assert(LoOutVT == HiOutVT &&
"Unequal split?");
4006 if (isTypeLegal(LoOutVT) ||
4007 InElementSize <= OutElementSize * 2)
4008 return SplitVecOp_UnaryOp(
N);
4017 return SplitVecOp_UnaryOp(
N);
4021 GetSplitVector(InVec, InLoVec, InHiVec);
4027 EVT HalfElementVT = IsFloat ?
4029 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4036 if (
N->isStrictFPOpcode()) {
4037 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4038 {N->getOperand(0), InLoVec});
4039 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4040 {N->getOperand(0), InHiVec});
4046 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4047 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4059 if (
N->isStrictFPOpcode()) {
4063 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4071 DAG.getTargetConstant(
4078 assert(
N->getValueType(0).isVector() &&
4079 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4080 "Operand types must be vectors");
4082 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4084 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4085 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4098 DAG.getVTList(PartResVT,
N->getValueType(1)),
4099 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4101 DAG.getVTList(PartResVT,
N->getValueType(1)),
4102 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4105 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4107 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4108 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4109 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4110 std::tie(EVLLo, EVLHi) =
4111 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4112 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4113 N->getOperand(2), MaskLo, EVLLo);
4114 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4115 N->getOperand(2), MaskHi, EVLHi);
4119 EVT OpVT =
N->getOperand(0).getValueType();
4122 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4128 EVT ResVT =
N->getValueType(0);
4131 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4132 EVT InVT =
Lo.getValueType();
4137 if (
N->isStrictFPOpcode()) {
4138 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4139 { N->getOperand(0), Lo, N->getOperand(2) });
4140 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4141 { N->getOperand(0), Hi, N->getOperand(2) });
4145 Lo.getValue(1),
Hi.getValue(1));
4146 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4147 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4148 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4149 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4150 std::tie(EVLLo, EVLHi) =
4151 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4152 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4153 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4167SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4170 EVT LHSLoVT, LHSHiVT;
4171 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4173 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4174 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4177 std::tie(LHSLo, LHSHi) =
4178 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4181 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4183 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4184 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4193 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4194 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4195 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4197 EVT ResVT =
N->getValueType(0);
4202 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4203 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4209 EVT ResVT =
N->getValueType(0);
4212 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4213 EVT InVT =
Lo.getValueType();
4219 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4220 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4227 EVT ResVT =
N->getValueType(0);
4231 GetSplitVector(VecOp,
Lo,
Hi);
4233 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4234 auto [EVLLo, EVLHi] =
4236 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4242 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4244 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4245 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4252void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4253 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4256 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4261 auto unrollExpandedOp = [&]() {
4266 EVT VT =
N->getValueType(0);
4276 switch (
N->getOpcode()) {
4279 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4287 Res = WidenVecRes_ADDRSPACECAST(
N);
4294 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4298 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4302 Res = WidenVecRes_ScalarOp(
N);
4307 case ISD::VP_SELECT:
4309 Res = WidenVecRes_Select(
N);
4313 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4314 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4316 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4319 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4321 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4322 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4325 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4328 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4330 case ISD::VP_GATHER:
4331 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4334 Res = WidenVecRes_VECTOR_REVERSE(
N);
4342 case ISD::OR:
case ISD::VP_OR:
4350 case ISD::VP_FMINNUM:
4353 case ISD::VP_FMAXNUM:
4355 case ISD::VP_FMINIMUM:
4357 case ISD::VP_FMAXIMUM:
4388 case ISD::VP_FCOPYSIGN:
4389 Res = WidenVecRes_Binary(
N);
4394 Res = WidenVecRes_CMP(
N);
4399 if (unrollExpandedOp())
4414 Res = WidenVecRes_BinaryCanTrap(
N);
4423 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4426#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4427 case ISD::STRICT_##DAGN:
4428#include "llvm/IR/ConstrainedOps.def"
4429 Res = WidenVecRes_StrictFP(
N);
4438 Res = WidenVecRes_OverflowOp(
N, ResNo);
4442 Res = WidenVecRes_FCOPYSIGN(
N);
4447 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4452 if (!unrollExpandedOp())
4453 Res = WidenVecRes_ExpOp(
N);
4459 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4464 case ISD::VP_FP_EXTEND:
4466 case ISD::VP_FP_ROUND:
4468 case ISD::VP_FP_TO_SINT:
4470 case ISD::VP_FP_TO_UINT:
4472 case ISD::VP_SIGN_EXTEND:
4474 case ISD::VP_SINT_TO_FP:
4475 case ISD::VP_TRUNCATE:
4478 case ISD::VP_UINT_TO_FP:
4480 case ISD::VP_ZERO_EXTEND:
4481 Res = WidenVecRes_Convert(
N);
4486 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4492 case ISD::VP_LLRINT:
4493 Res = WidenVecRes_XRINT(
N);
4520 if (unrollExpandedOp())
4530 case ISD::VP_BITREVERSE:
4536 case ISD::VP_CTLZ_ZERO_UNDEF:
4542 case ISD::VP_CTTZ_ZERO_UNDEF:
4547 case ISD::VP_FFLOOR:
4549 case ISD::VP_FNEARBYINT:
4550 case ISD::VP_FROUND:
4551 case ISD::VP_FROUNDEVEN:
4552 case ISD::VP_FROUNDTOZERO:
4556 Res = WidenVecRes_Unary(
N);
4563 Res = WidenVecRes_Ternary(
N);
4569 SetWidenedVector(
SDValue(
N, ResNo), Res);
4576 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4577 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4578 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4579 if (
N->getNumOperands() == 3)
4580 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4582 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4583 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4587 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4588 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4595 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4596 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4597 if (
N->getNumOperands() == 2)
4598 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4601 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4602 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4606 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4607 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4616 EVT OpVT =
LHS.getValueType();
4618 LHS = GetWidenedVector(LHS);
4619 RHS = GetWidenedVector(RHS);
4620 OpVT =
LHS.getValueType();
4626 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4632SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4636 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4637 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4639 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4648 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4651 if (ConcatEnd == 1) {
4652 VT = ConcatOps[0].getValueType();
4654 return ConcatOps[0];
4657 SDLoc dl(ConcatOps[0]);
4664 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4665 int Idx = ConcatEnd - 1;
4666 VT = ConcatOps[
Idx--].getValueType();
4667 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4680 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4681 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4685 ConcatOps[
Idx+1] = VecOp;
4686 ConcatEnd =
Idx + 2;
4692 unsigned RealVals = ConcatEnd -
Idx - 1;
4693 unsigned SubConcatEnd = 0;
4694 unsigned SubConcatIdx =
Idx + 1;
4695 while (SubConcatEnd < RealVals)
4696 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4697 while (SubConcatEnd < OpsToConcat)
4698 SubConcatOps[SubConcatEnd++] = undefVec;
4700 NextVT, SubConcatOps);
4701 ConcatEnd = SubConcatIdx + 1;
4706 if (ConcatEnd == 1) {
4707 VT = ConcatOps[0].getValueType();
4709 return ConcatOps[0];
4714 if (NumOps != ConcatEnd ) {
4716 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4717 ConcatOps[j] = UndefVal;
4725 unsigned Opcode =
N->getOpcode();
4733 NumElts = NumElts / 2;
4737 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4739 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4740 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4741 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4753 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4754 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4755 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4758 unsigned ConcatEnd = 0;
4766 while (CurNumElts != 0) {
4767 while (CurNumElts >= NumElts) {
4769 DAG.getVectorIdxConstant(
Idx, dl));
4771 DAG.getVectorIdxConstant(
Idx, dl));
4772 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4774 CurNumElts -= NumElts;
4777 NumElts = NumElts / 2;
4782 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4784 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4786 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4787 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4798 switch (
N->getOpcode()) {
4801 return WidenVecRes_STRICT_FSETCC(
N);
4808 return WidenVecRes_Convert_StrictFP(
N);
4814 unsigned NumOpers =
N->getNumOperands();
4815 unsigned Opcode =
N->getOpcode();
4822 NumElts = NumElts / 2;
4833 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4837 unsigned ConcatEnd = 0;
4844 for (
unsigned i = 1; i < NumOpers; ++i) {
4850 Oper = GetWidenedVector(Oper);
4856 DAG.getUNDEF(WideOpVT), Oper,
4857 DAG.getVectorIdxConstant(0, dl));
4869 while (CurNumElts != 0) {
4870 while (CurNumElts >= NumElts) {
4873 for (
unsigned i = 0; i < NumOpers; ++i) {
4876 EVT OpVT =
Op.getValueType();
4882 DAG.getVectorIdxConstant(
Idx, dl));
4888 EVT OperVT[] = {VT, MVT::Other};
4890 ConcatOps[ConcatEnd++] = Oper;
4893 CurNumElts -= NumElts;
4896 NumElts = NumElts / 2;
4901 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4904 for (
unsigned i = 0; i < NumOpers; ++i) {
4907 EVT OpVT =
Op.getValueType();
4911 DAG.getVectorIdxConstant(
Idx, dl));
4916 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4918 ConcatOps[ConcatEnd++] = Oper;
4927 if (Chains.
size() == 1)
4928 NewChain = Chains[0];
4931 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4936SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4938 EVT ResVT =
N->getValueType(0);
4939 EVT OvVT =
N->getValueType(1);
4940 EVT WideResVT, WideOvVT;
4950 WideLHS = GetWidenedVector(
N->getOperand(0));
4951 WideRHS = GetWidenedVector(
N->getOperand(1));
4961 N->getOperand(0), Zero);
4964 N->getOperand(1), Zero);
4967 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4968 SDNode *WideNode = DAG.getNode(
4969 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4972 unsigned OtherNo = 1 - ResNo;
4973 EVT OtherVT =
N->getValueType(OtherNo);
4980 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4983 return SDValue(WideNode, ResNo);
4996 unsigned Opcode =
N->getOpcode();
5005 InOp = ZExtPromotedInteger(InOp);
5016 InOp = GetWidenedVector(
N->getOperand(0));
5019 if (InVTEC == WidenEC) {
5020 if (
N->getNumOperands() == 1)
5021 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5022 if (
N->getNumOperands() == 3) {
5023 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5026 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5028 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5051 unsigned NumConcat =
5056 if (
N->getNumOperands() == 1)
5057 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
5058 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5063 DAG.getVectorIdxConstant(0,
DL));
5065 if (
N->getNumOperands() == 1)
5066 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
5067 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5076 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5077 for (
unsigned i=0; i < MinElts; ++i) {
5079 DAG.getVectorIdxConstant(i,
DL));
5080 if (
N->getNumOperands() == 1)
5081 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
5083 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5086 return DAG.getBuildVector(WidenVT,
DL, Ops);
5095 EVT SrcVT = Src.getValueType();
5099 Src = GetWidenedVector(Src);
5100 SrcVT = Src.getValueType();
5107 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5116 EVT SrcVT = Src.getValueType();
5120 Src = GetWidenedVector(Src);
5121 SrcVT = Src.getValueType();
5128 if (
N->getNumOperands() == 1)
5129 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5131 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5132 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5136 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5139SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5150 unsigned Opcode =
N->getOpcode();
5156 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5161 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5162 for (
unsigned i=0; i < MinElts; ++i) {
5164 DAG.getVectorIdxConstant(i,
DL));
5165 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5169 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5171 return DAG.getBuildVector(WidenVT,
DL, Ops);
5174SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5175 unsigned Opcode =
N->getOpcode();
5188 InOp = GetWidenedVector(InOp);
5195 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5202 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5204 DAG.getVectorIdxConstant(i,
DL));
5221 while (Ops.
size() != WidenNumElts)
5224 return DAG.getBuildVector(WidenVT,
DL, Ops);
5230 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5231 return WidenVecRes_BinaryCanTrap(
N);
5241SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5242 SDValue FpValue =
N->getOperand(0);
5246 SDValue Arg = GetWidenedVector(FpValue);
5247 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5253 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5255 EVT ExpVT =
RHS.getValueType();
5260 ExpOp = ModifyToType(RHS, WideExpVT);
5263 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5269 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5270 if (
N->getNumOperands() == 1)
5271 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5273 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5274 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5278 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5279 {InOp,
Mask,
N->getOperand(2)});
5285 cast<VTSDNode>(
N->getOperand(1))->getVT()
5286 .getVectorElementType(),
5288 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5289 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5290 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5293SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5294 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5295 return GetWidenedVector(WidenVec);
5300 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5301 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5303 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5304 AddrSpaceCastN->getSrcAddressSpace(),
5305 AddrSpaceCastN->getDestAddressSpace());
5311 EVT VT =
N->getValueType(0);
5315 switch (getTypeAction(InVT)) {
5329 SDValue NInOp = GetPromotedInteger(InOp);
5331 if (WidenVT.
bitsEq(NInVT)) {
5334 if (DAG.getDataLayout().isBigEndian()) {
5339 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5358 InOp = GetWidenedVector(InOp);
5360 if (WidenVT.
bitsEq(InVT))
5370 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5375 unsigned NewNumParts = WidenSize / InSize;
5388 EVT OrigInVT =
N->getOperand(0).getValueType();
5401 if (WidenSize % InSize == 0) {
5408 DAG.ExtractVectorElements(InOp, Ops);
5409 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5421 return CreateStackStoreLoad(InOp, WidenVT);
5427 EVT VT =
N->getValueType(0);
5431 EVT EltVT =
N->getOperand(0).getValueType();
5438 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5439 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5441 return DAG.getBuildVector(WidenVT, dl, NewOps);
5445 EVT InVT =
N->getOperand(0).getValueType();
5448 unsigned NumOperands =
N->getNumOperands();
5450 bool InputWidened =
false;
5454 if (WidenNumElts % NumInElts == 0) {
5456 unsigned NumConcat = WidenNumElts / NumInElts;
5457 SDValue UndefVal = DAG.getUNDEF(InVT);
5459 for (
unsigned i=0; i < NumOperands; ++i)
5460 Ops[i] =
N->getOperand(i);
5461 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5466 InputWidened =
true;
5470 for (i=1; i < NumOperands; ++i)
5471 if (!
N->getOperand(i).isUndef())
5474 if (i == NumOperands)
5477 return GetWidenedVector(
N->getOperand(0));
5479 if (NumOperands == 2) {
5481 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5487 for (
unsigned i = 0; i < NumInElts; ++i) {
5489 MaskOps[i + NumInElts] = i + WidenNumElts;
5491 return DAG.getVectorShuffle(WidenVT, dl,
5492 GetWidenedVector(
N->getOperand(0)),
5493 GetWidenedVector(
N->getOperand(1)),
5500 "Cannot use build vectors to widen CONCAT_VECTOR result");
5508 for (
unsigned i=0; i < NumOperands; ++i) {
5511 InOp = GetWidenedVector(InOp);
5512 for (
unsigned j = 0;
j < NumInElts; ++
j)
5514 DAG.getVectorIdxConstant(j, dl));
5516 SDValue UndefVal = DAG.getUNDEF(EltVT);
5517 for (;
Idx < WidenNumElts; ++
Idx)
5518 Ops[
Idx] = UndefVal;
5519 return DAG.getBuildVector(WidenVT, dl, Ops);
5522SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5523 EVT VT =
N->getValueType(0);
5525 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5532SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5533 EVT VT =
N->getValueType(0);
5540 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5542 InOp = GetWidenedVector(InOp);
5548 if (IdxVal == 0 && InVT == WidenVT)
5555 assert(IdxVal % VTNumElts == 0 &&
5556 "Expected Idx to be a multiple of subvector minimum vector length");
5557 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5570 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5571 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5572 "down type's element count");
5579 for (;
I < VTNumElts / GCD; ++
I)
5582 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5583 for (;
I < WidenNumElts / GCD; ++
I)
5590 "EXTRACT_SUBVECTOR for scalable vectors");
5597 for (i = 0; i < VTNumElts; ++i)
5599 DAG.getVectorIdxConstant(IdxVal + i, dl));
5601 SDValue UndefVal = DAG.getUNDEF(EltVT);
5602 for (; i < WidenNumElts; ++i)
5604 return DAG.getBuildVector(WidenVT, dl, Ops);
5615SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5616 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5619 N->getOperand(1),
N->getOperand(2));
5632 if (!
LD->getMemoryVT().isByteSized()) {
5636 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5645 EVT LdVT =
LD->getMemoryVT();
5656 const auto *MMO =
LD->getMemOperand();
5658 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5672 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5674 Result = GenWidenVectorLoads(LdChain, LD);
5681 if (LdChain.
size() == 1)
5682 NewChain = LdChain[0];
5688 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5699 SDValue EVL =
N->getVectorLength();
5706 "Unable to widen binary VP op");
5707 Mask = GetWidenedVector(Mask);
5708 assert(
Mask.getValueType().getVectorElementCount() ==
5711 "Unable to widen vector load");
5714 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5715 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5716 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5730 "Unable to widen VP strided load");
5731 Mask = GetWidenedVector(Mask);
5734 assert(
Mask.getValueType().getVectorElementCount() ==
5736 "Data and mask vectors should have the same number of elements");
5738 SDValue Res = DAG.getStridedLoadVP(
5739 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5740 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5741 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5742 N->isExpandingLoad());
5754 EVT MaskVT =
Mask.getValueType();
5755 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5763 Mask = ModifyToType(Mask, WideMaskVT,
true);
5765 SDValue Res = DAG.getMaskedLoad(
5766 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5767 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5768 ExtType,
N->isExpandingLoad());
5779 EVT MaskVT =
Mask.getValueType();
5780 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5789 Mask = ModifyToType(Mask, WideMaskVT,
true);
5794 Index.getValueType().getScalarType(),
5802 N->getMemoryVT().getScalarType(), NumElts);
5803 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5804 WideMemVT, dl, Ops,
N->getMemOperand(),
5805 N->getIndexType(),
N->getExtensionType());
5822 N->getMemoryVT().getScalarType(), WideEC);
5823 Mask = GetWidenedMask(Mask, WideEC);
5826 Mask,
N->getVectorLength()};
5827 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5828 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5838 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5866 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5867 return N->getOperand(OpNo).getValueType();
5875 N =
N.getOperand(0);
5877 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5878 if (!
N->getOperand(i)->isUndef())
5880 N =
N.getOperand(0);
5884 N =
N.getOperand(0);
5886 N =
N.getOperand(0);
5913 { MaskVT, MVT::Other }, Ops);
5914 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5924 if (MaskScalarBits < ToMaskScalBits) {
5928 }
else if (MaskScalarBits > ToMaskScalBits) {
5934 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5936 "Mask should have the right element size by now.");
5939 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5941 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5946 EVT SubVT =
Mask->getValueType(0);
5952 assert((
Mask->getValueType(0) == ToMaskVT) &&
5953 "A mask of ToMaskVT should have been produced by now.");
5974 EVT CondVT =
Cond->getValueType(0);
5978 EVT VSelVT =
N->getValueType(0);
5990 EVT FinalVT = VSelVT;
6002 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6020 EVT ToMaskVT = VSelVT;
6027 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6043 if (ScalarBits0 != ScalarBits1) {
6044 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6045 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6057 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6058 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6062 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6075 unsigned Opcode =
N->getOpcode();
6077 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6078 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6079 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6081 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6087 Cond1 = GetWidenedVector(Cond1);
6095 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6096 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6101 Cond1 = ModifyToType(Cond1, CondWidenVT);
6104 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6105 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6107 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6108 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6110 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6114 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6115 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6118 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6123 return DAG.getUNDEF(WidenVT);
6127 EVT VT =
N->getValueType(0);
6134 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6135 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6139 for (
unsigned i = 0; i != NumElts; ++i) {
6140 int Idx =
N->getMaskElt(i);
6141 if (
Idx < (
int)NumElts)
6146 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
6148 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6152 EVT VT =
N->getValueType(0);
6157 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6163 unsigned IdxVal = WidenNumElts - VTNumElts;
6176 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6179 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6180 "down type's element count");
6183 for (; i < VTNumElts / GCD; ++i)
6186 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6187 for (; i < WidenNumElts / GCD; ++i)
6196 for (
unsigned i = 0; i != VTNumElts; ++i) {
6197 Mask.push_back(IdxVal + i);
6199 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6202 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6207 assert(
N->getValueType(0).isVector() &&
6208 N->getOperand(0).getValueType().isVector() &&
6209 "Operands must be vectors");
6223 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6224 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6231 InOp1 = GetWidenedVector(InOp1);
6232 InOp2 = GetWidenedVector(InOp2);
6234 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6235 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6242 "Input not widened to expected type!");
6244 if (
N->getOpcode() == ISD::VP_SETCC) {
6247 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6248 N->getOperand(2), Mask,
N->getOperand(4));
6255 assert(
N->getValueType(0).isVector() &&
6256 N->getOperand(1).getValueType().isVector() &&
6257 "Operands must be vectors");
6258 EVT VT =
N->getValueType(0);
6269 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6274 for (
unsigned i = 0; i != NumElts; ++i) {
6276 DAG.getVectorIdxConstant(i, dl));
6278 DAG.getVectorIdxConstant(i, dl));
6280 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6281 {Chain, LHSElem, RHSElem, CC});
6282 Chains[i] = Scalars[i].getValue(1);
6283 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6284 DAG.getBoolConstant(
true, dl, EltVT, VT),
6285 DAG.getBoolConstant(
false, dl, EltVT, VT));
6289 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6291 return DAG.getBuildVector(WidenVT, dl, Scalars);
6297bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6298 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6302 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6305 switch (
N->getOpcode()) {
6308 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6319 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6320 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6321 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6322 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6327 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6329 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6330 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6332 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6333 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6341 Res = WidenVecOp_UnrollVectorOp(
N);
6348 Res = WidenVecOp_EXTEND(
N);
6353 Res = WidenVecOp_CMP(
N);
6369 Res = WidenVecOp_Convert(
N);
6374 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6392 Res = WidenVecOp_VECREDUCE(
N);
6396 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6398 case ISD::VP_REDUCE_FADD:
6399 case ISD::VP_REDUCE_SEQ_FADD:
6400 case ISD::VP_REDUCE_FMUL:
6401 case ISD::VP_REDUCE_SEQ_FMUL:
6402 case ISD::VP_REDUCE_ADD:
6403 case ISD::VP_REDUCE_MUL:
6404 case ISD::VP_REDUCE_AND:
6405 case ISD::VP_REDUCE_OR:
6406 case ISD::VP_REDUCE_XOR:
6407 case ISD::VP_REDUCE_SMAX:
6408 case ISD::VP_REDUCE_SMIN:
6409 case ISD::VP_REDUCE_UMAX:
6410 case ISD::VP_REDUCE_UMIN:
6411 case ISD::VP_REDUCE_FMAX:
6412 case ISD::VP_REDUCE_FMIN:
6413 case ISD::VP_REDUCE_FMAXIMUM:
6414 case ISD::VP_REDUCE_FMINIMUM:
6415 Res = WidenVecOp_VP_REDUCE(
N);
6417 case ISD::VP_CTTZ_ELTS:
6418 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6419 Res = WidenVecOp_VP_CttzElements(
N);
6424 if (!Res.
getNode())
return false;
6432 if (
N->isStrictFPOpcode())
6434 "Invalid operand expansion");
6437 "Invalid operand expansion");
6439 ReplaceValueWith(
SDValue(
N, 0), Res);
6445 EVT VT =
N->getValueType(0);
6450 "Unexpected type action");
6451 InOp = GetWidenedVector(InOp);
6454 "Input wasn't widened!");
6465 FixedEltVT == InEltVT) {
6467 "Not enough elements in the fixed type for the operand!");
6469 "We can't have the same type as we started with!");
6472 DAG.getUNDEF(FixedVT), InOp,
6473 DAG.getVectorIdxConstant(0,
DL));
6476 DAG.getVectorIdxConstant(0,
DL));
6485 return WidenVecOp_Convert(
N);
6490 switch (
N->getOpcode()) {
6505 EVT OpVT =
N->getOperand(0).getValueType();
6506 EVT ResVT =
N->getValueType(0);
6514 DAG.getVectorIdxConstant(0, dl));
6516 DAG.getVectorIdxConstant(0, dl));
6522 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6523 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6525 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6532 return DAG.UnrollVectorOp(
N);
6537 EVT ResultVT =
N->getValueType(0);
6539 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6548 {WideArg,
Test},
N->getFlags());
6555 DAG.getVectorIdxConstant(0,
DL));
6557 EVT OpVT =
N->getOperand(0).getValueType();
6560 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6565 EVT VT =
N->getValueType(0);
6568 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6571 "Unexpected type action");
6572 InOp = GetWidenedVector(InOp);
6574 unsigned Opcode =
N->getOpcode();
6580 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6582 if (
N->isStrictFPOpcode()) {
6584 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6585 {
N->getOperand(0), InOp,
N->getOperand(2) });
6587 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6588 {
N->getOperand(0), InOp });
6594 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6596 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6599 DAG.getVectorIdxConstant(0, dl));
6607 if (
N->isStrictFPOpcode()) {
6610 for (
unsigned i=0; i < NumElts; ++i) {
6612 DAG.getVectorIdxConstant(i, dl));
6613 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6617 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6619 for (
unsigned i = 0; i < NumElts; ++i)
6620 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6622 InOp, DAG.getVectorIdxConstant(i, dl)));
6625 return DAG.getBuildVector(VT, dl, Ops);
6629 EVT DstVT =
N->getValueType(0);
6630 SDValue Src = GetWidenedVector(
N->getOperand(0));
6631 EVT SrcVT = Src.getValueType();
6640 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6643 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6647 return DAG.UnrollVectorOp(
N);
6651 EVT VT =
N->getValueType(0);
6652 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6660 if (!VT.
isVector() && VT != MVT::x86mmx &&
6667 DAG.getVectorIdxConstant(0, dl));
6681 .divideCoefficientBy(EltSize);
6686 DAG.getVectorIdxConstant(0, dl));
6691 return CreateStackStoreLoad(InOp, VT);
6695 EVT VT =
N->getValueType(0);
6697 EVT InVT =
N->getOperand(0).getValueType();
6702 unsigned NumOperands =
N->getNumOperands();
6705 for (i = 1; i < NumOperands; ++i)
6706 if (!
N->getOperand(i).isUndef())
6709 if (i == NumOperands)
6710 return GetWidenedVector(
N->getOperand(0));
6720 for (
unsigned i=0; i < NumOperands; ++i) {
6724 "Unexpected type action");
6725 InOp = GetWidenedVector(InOp);
6726 for (
unsigned j = 0;
j < NumInElts; ++
j)
6728 DAG.getVectorIdxConstant(j, dl));
6730 return DAG.getBuildVector(VT, dl, Ops);
6733SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6734 EVT VT =
N->getValueType(0);
6739 SubVec = GetWidenedVector(SubVec);
6745 bool IndicesValid =
false;
6748 IndicesValid =
true;
6752 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6753 Attribute::VScaleRange);
6758 IndicesValid =
true;
6764 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6769 "INSERT_SUBVECTOR");
6772SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6773 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6775 N->getValueType(0), InOp,
N->getOperand(1));
6778SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6779 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6781 N->getValueType(0), InOp,
N->getOperand(1));
6784SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6785 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6786 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6794 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6797 if (
ST->isTruncatingStore())
6816 StVal = GetWidenedVector(StVal);
6820 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6821 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6822 EVL, StVT,
ST->getMemOperand(),
6823 ST->getAddressingMode());
6827 if (GenWidenVectorStores(StChain, ST)) {
6828 if (StChain.
size() == 1)
6837SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6838 assert((OpNo == 1 || OpNo == 3) &&
6839 "Can widen only data or mask operand of vp_store");
6847 StVal = GetWidenedVector(StVal);
6853 "Unable to widen VP store");
6854 Mask = GetWidenedVector(Mask);
6856 Mask = GetWidenedVector(Mask);
6862 "Unable to widen VP store");
6863 StVal = GetWidenedVector(StVal);
6866 assert(
Mask.getValueType().getVectorElementCount() ==
6868 "Mask and data vectors should have the same number of elements");
6869 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6870 ST->getOffset(), Mask,
ST->getVectorLength(),
6871 ST->getMemoryVT(),
ST->getMemOperand(),
6872 ST->getAddressingMode(),
ST->isTruncatingStore(),
6873 ST->isCompressingStore());
6878 assert((OpNo == 1 || OpNo == 4) &&
6879 "Can widen only data or mask operand of vp_strided_store");
6888 "Unable to widen VP strided store");
6892 "Unable to widen VP strided store");
6894 StVal = GetWidenedVector(StVal);
6895 Mask = GetWidenedVector(Mask);
6898 Mask.getValueType().getVectorElementCount() &&
6899 "Data and mask vectors should have the same number of elements");
6901 return DAG.getStridedStoreVP(
6908SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6909 assert((OpNo == 1 || OpNo == 4) &&
6910 "Can widen only data or mask operand of mstore");
6913 EVT MaskVT =
Mask.getValueType();
6919 StVal = GetWidenedVector(StVal);
6926 Mask = ModifyToType(Mask, WideMaskVT,
true);
6930 Mask = ModifyToType(Mask, WideMaskVT,
true);
6936 StVal = ModifyToType(StVal, WideVT);
6939 assert(
Mask.getValueType().getVectorNumElements() ==
6941 "Mask and data vectors should have the same number of elements");
6948SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6949 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6950 auto *MG = cast<MaskedGatherSDNode>(
N);
6951 SDValue DataOp = MG->getPassThru();
6953 SDValue Scale = MG->getScale();
6961 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6962 MG->getMemOperand(), MG->getIndexType(),
6963 MG->getExtensionType());
6969SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6978 DataOp = GetWidenedVector(DataOp);
6982 EVT IndexVT =
Index.getValueType();
6988 EVT MaskVT =
Mask.getValueType();
6991 Mask = ModifyToType(Mask, WideMaskVT,
true);
6996 }
else if (OpNo == 4) {
7004 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
7009SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
7018 DataOp = GetWidenedVector(DataOp);
7021 Mask = GetWidenedMask(Mask, WideEC);
7024 }
else if (OpNo == 3) {
7033 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7038 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7039 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7041 EVT VT =
N->getValueType(0);
7056 SVT, InOp0, InOp1,
N->getOperand(2));
7063 DAG.getVectorIdxConstant(0, dl));
7065 EVT OpVT =
N->getOperand(0).getValueType();
7068 return DAG.getNode(ExtendCode, dl, VT,
CC);
7078 EVT VT =
N->getValueType(0);
7080 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7087 for (
unsigned i = 0; i != NumElts; ++i) {
7089 DAG.getVectorIdxConstant(i, dl));
7091 DAG.getVectorIdxConstant(i, dl));
7093 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7094 {Chain, LHSElem, RHSElem, CC});
7095 Chains[i] = Scalars[i].getValue(1);
7096 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7097 DAG.getBoolConstant(
true, dl, EltVT, VT),
7098 DAG.getBoolConstant(
false, dl, EltVT, VT));
7102 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7104 return DAG.getBuildVector(VT, dl, Scalars);
7109 SDValue Op = GetWidenedVector(
N->getOperand(0));
7110 EVT OrigVT =
N->getOperand(0).getValueType();
7111 EVT WideVT =
Op.getValueType();
7115 unsigned Opc =
N->getOpcode();
7117 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7118 assert(NeutralElem &&
"Neutral element must exist");
7125 unsigned GCD = std::gcd(OrigElts, WideElts);
7128 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7129 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7131 DAG.getVectorIdxConstant(
Idx, dl));
7132 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7135 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7137 DAG.getVectorIdxConstant(
Idx, dl));
7139 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7149 EVT WideVT =
Op.getValueType();
7153 unsigned Opc =
N->getOpcode();
7155 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7162 unsigned GCD = std::gcd(OrigElts, WideElts);
7165 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7166 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7168 DAG.getVectorIdxConstant(
Idx, dl));
7169 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7172 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7174 DAG.getVectorIdxConstant(
Idx, dl));
7176 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7180 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7183 SDValue Op = GetWidenedVector(
N->getOperand(1));
7185 Op.getValueType().getVectorElementCount());
7187 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7188 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7196 EVT VT =
N->getValueType(0);
7207 DAG.getVectorIdxConstant(0,
DL));
7217 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7218 {Source, Mask, N->getOperand(2)},
N->getFlags());
7235 unsigned WidenEx = 0) {
7240 unsigned AlignInBits =
Align*8;
7243 EVT RetVT = WidenEltVT;
7244 if (!Scalable && Width == WidenEltWidth)
7258 (WidenWidth % MemVTWidth) == 0 &&
7260 (MemVTWidth <= Width ||
7261 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7262 if (MemVTWidth == WidenWidth)
7281 (WidenWidth % MemVTWidth) == 0 &&
7283 (MemVTWidth <= Width ||
7284 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7293 return std::nullopt;
7304 unsigned Start,
unsigned End) {
7305 SDLoc dl(LdOps[Start]);
7306 EVT LdTy = LdOps[Start].getValueType();
7314 for (
unsigned i = Start + 1; i !=
End; ++i) {
7315 EVT NewLdTy = LdOps[i].getValueType();
7316 if (NewLdTy != LdTy) {
7337 EVT LdVT =
LD->getMemoryVT();
7351 TypeSize WidthDiff = WidenWidth - LdWidth;
7358 std::optional<EVT> FirstVT =
7359 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7366 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7371 std::optional<EVT> NewVT = FirstVT;
7373 TypeSize NewVTWidth = FirstVTWidth;
7375 RemainingWidth -= NewVTWidth;
7382 NewVTWidth = NewVT->getSizeInBits();
7388 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7389 LD->getOriginalAlign(), MMOFlags, AAInfo);
7393 if (MemVTs.
empty()) {
7395 if (!FirstVT->isVector()) {
7402 if (FirstVT == WidenVT)
7407 unsigned NumConcat =
7410 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7411 ConcatOps[0] = LdOp;
7412 for (
unsigned i = 1; i != NumConcat; ++i)
7413 ConcatOps[i] = UndefVal;
7425 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7428 for (
EVT MemVT : MemVTs) {
7429 Align NewAlign = ScaledOffset == 0
7430 ?
LD->getOriginalAlign()
7433 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7437 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7442 if (!LdOps[0].getValueType().
isVector())
7452 EVT LdTy = LdOps[i].getValueType();
7455 for (--i; i >= 0; --i) {
7456 LdTy = LdOps[i].getValueType();
7463 ConcatOps[--
Idx] = LdOps[i];
7464 for (--i; i >= 0; --i) {
7465 EVT NewLdTy = LdOps[i].getValueType();
7466 if (NewLdTy != LdTy) {
7477 WidenOps[j] = ConcatOps[
Idx+j];
7478 for (;
j != NumOps; ++
j)
7479 WidenOps[j] = DAG.getUNDEF(LdTy);
7486 ConcatOps[--
Idx] = LdOps[i];
7497 SDValue UndefVal = DAG.getUNDEF(LdTy);
7500 for (; i !=
End-
Idx; ++i)
7501 WidenOps[i] = ConcatOps[
Idx+i];
7502 for (; i != NumOps; ++i)
7503 WidenOps[i] = UndefVal;
7515 EVT LdVT =
LD->getMemoryVT();
7528 "not yet supported");
7539 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7540 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7546 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7547 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7548 LD->getOriginalAlign(), MMOFlags, AAInfo);
7553 SDValue UndefVal = DAG.getUNDEF(EltVT);
7554 for (; i != WidenNumElts; ++i)
7557 return DAG.getBuildVector(WidenVT, dl, Ops);
7569 SDValue ValOp = GetWidenedVector(
ST->getValue());
7572 EVT StVT =
ST->getMemoryVT();
7580 "Mismatch between store and value types");
7594 std::optional<EVT> NewVT =
7599 TypeSize NewVTWidth = NewVT->getSizeInBits();
7602 StWidth -= NewVTWidth;
7603 MemVTs.
back().second++;
7607 for (
const auto &Pair : MemVTs) {
7608 EVT NewVT = Pair.first;
7609 unsigned Count = Pair.second;
7615 Align NewAlign = ScaledOffset == 0
7616 ?
ST->getOriginalAlign()
7619 DAG.getVectorIdxConstant(
Idx, dl));
7620 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7625 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7637 DAG.getVectorIdxConstant(
Idx++, dl));
7639 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7643 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7657 bool FillWithZeroes) {
7662 "input and widen element type must match");
7664 "cannot modify scalable vectors in this way");
7676 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7679 for (
unsigned i = 1; i != NumConcat; ++i)
7687 DAG.getVectorIdxConstant(0, dl));
7690 "Scalable vectors should have been handled already.");
7698 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7702 DAG.getVectorIdxConstant(
Idx, dl));
7704 SDValue UndefVal = DAG.getUNDEF(EltVT);
7705 for (;
Idx < WidenNumElts; ++
Idx)
7706 Ops[
Idx] = UndefVal;
7708 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7709 if (!FillWithZeroes)
7713 "We expect to never want to FillWithZeroes for non-integral types.");
7716 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7717 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7719 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7720 DAG.getBuildVector(NVT, dl,
MaskOps));
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.