35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
119 R = ScalarizeVecRes_UnaryOp(
N);
122 R = ScalarizeVecRes_ADDRSPACECAST(
N);
125 R = ScalarizeVecRes_FFREXP(
N, ResNo);
174 R = ScalarizeVecRes_BinOp(
N);
179 R = ScalarizeVecRes_CMP(
N);
185 R = ScalarizeVecRes_TernaryOp(
N);
188#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
189 case ISD::STRICT_##DAGN:
190#include "llvm/IR/ConstrainedOps.def"
191 R = ScalarizeVecRes_StrictFPOp(
N);
196 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
205 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
215 R = ScalarizeVecRes_FIX(
N);
221 SetScalarizedVector(
SDValue(
N, ResNo), R);
225 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
226 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
228 LHS.getValueType(), LHS, RHS,
N->getFlags());
236 if (getTypeAction(
LHS.getValueType()) ==
238 LHS = GetScalarizedVector(LHS);
239 RHS = GetScalarizedVector(RHS);
241 EVT VT =
LHS.getValueType().getVectorElementType();
249 N->getValueType(0).getVectorElementType(), LHS, RHS);
253 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
254 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
255 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
261 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
262 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
268SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
269 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
270 "Unexpected vector type!");
271 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
273 EVT VT0 =
N->getValueType(0);
274 EVT VT1 =
N->getValueType(1);
279 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
283 unsigned OtherNo = 1 - ResNo;
284 EVT OtherVT =
N->getValueType(OtherNo);
286 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
290 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
293 return SDValue(ScalarNode, ResNo);
297 EVT VT =
N->getValueType(0).getVectorElementType();
298 unsigned NumOpers =
N->getNumOperands();
300 EVT ValueVTs[] = {VT, MVT::Other};
309 for (
unsigned i = 1; i < NumOpers; ++i) {
315 Oper = GetScalarizedVector(Oper);
326 Opers,
N->getFlags());
337 EVT ResVT =
N->getValueType(0);
338 EVT OvVT =
N->getValueType(1);
342 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
343 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
348 ScalarLHS = ElemsLHS[0];
349 ScalarRHS = ElemsRHS[0];
355 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
359 unsigned OtherNo = 1 - ResNo;
360 EVT OtherVT =
N->getValueType(OtherNo);
362 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
366 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
369 return SDValue(ScalarNode, ResNo);
374 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
375 return GetScalarizedVector(
Op);
380 if (
Op.getValueType().isVector()
381 &&
Op.getValueType().getVectorNumElements() == 1
382 && !isSimpleLegalType(
Op.getValueType()))
383 Op = GetScalarizedVector(
Op);
384 EVT NewVT =
N->getValueType(0).getVectorElementType();
389SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
390 EVT EltVT =
N->getValueType(0).getVectorElementType();
399SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
401 N->getValueType(0).getVectorElementType(),
402 N->getOperand(0),
N->getOperand(1));
408 EVT OpVT =
Op.getValueType();
412 Op = GetScalarizedVector(
Op);
419 N->getValueType(0).getVectorElementType(),
Op,
424 SDValue Op = GetScalarizedVector(
N->getOperand(0));
429SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
433 EVT EltVT =
N->getValueType(0).getVectorElementType();
434 if (
Op.getValueType() != EltVT)
441 assert(
N->isUnindexed() &&
"Indexed vector load?");
445 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
446 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
447 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
448 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
458 EVT DestVT =
N->getValueType(0).getVectorElementType();
460 EVT OpVT =
Op.getValueType();
470 Op = GetScalarizedVector(
Op);
480 EVT EltVT =
N->getValueType(0).getVectorElementType();
482 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
491 EVT OpVT =
Op.getValueType();
493 EVT EltVT =
N->getValueType(0).getVectorElementType();
496 Op = GetScalarizedVector(
Op);
502 switch (
N->getOpcode()) {
514SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
515 EVT DestVT =
N->getValueType(0).getVectorElementType();
517 EVT OpVT =
Op.getValueType();
527 Op = GetScalarizedVector(
Op);
533 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
534 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
535 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
539SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
542 EVT EltVT =
N->getValueType(0).getVectorElementType();
551 EVT OpVT =
Cond.getValueType();
564 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
578 EVT OpVT =
Cond->getOperand(0).getValueType();
585 EVT CondVT =
Cond.getValueType();
586 if (ScalarBool != VecBool) {
587 switch (ScalarBool) {
608 auto BoolVT = getSetCCResultType(CondVT);
609 if (BoolVT.bitsLT(CondVT))
614 GetScalarizedVector(
N->getOperand(2)));
618 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
620 LHS.getValueType(),
N->getOperand(0), LHS,
621 GetScalarizedVector(
N->getOperand(2)));
625 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
627 N->getOperand(0),
N->getOperand(1),
628 LHS, GetScalarizedVector(
N->getOperand(3)),
633 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
636SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
638 SDValue Arg =
N->getOperand(2).getOperand(0);
640 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
641 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
642 return GetScalarizedVector(
N->getOperand(
Op));
645SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
647 EVT SrcVT = Src.getValueType();
652 Src = GetScalarizedVector(Src);
658 EVT DstVT =
N->getValueType(0).getVectorElementType();
659 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
663 assert(
N->getValueType(0).isVector() &&
664 N->getOperand(0).getValueType().isVector() &&
665 "Operand types must be vectors");
668 EVT OpVT =
LHS.getValueType();
669 EVT NVT =
N->getValueType(0).getVectorElementType();
674 LHS = GetScalarizedVector(LHS);
675 RHS = GetScalarizedVector(RHS);
691 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
699 EVT ResultVT =
N->getValueType(0).getVectorElementType();
702 Arg = GetScalarizedVector(Arg);
715 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
722bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
727 switch (
N->getOpcode()) {
730 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
737 Res = ScalarizeVecOp_BITCAST(
N);
749 Res = ScalarizeVecOp_UnaryOp(
N);
755 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
758 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
761 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
764 Res = ScalarizeVecOp_VSELECT(
N);
767 Res = ScalarizeVecOp_VSETCC(
N);
770 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
773 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
776 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
779 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
782 Res = ScalarizeVecOp_FP_EXTEND(
N);
799 Res = ScalarizeVecOp_VECREDUCE(
N);
803 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
807 Res = ScalarizeVecOp_CMP(
N);
812 if (!Res.
getNode())
return false;
820 "Invalid operand expansion");
822 ReplaceValueWith(
SDValue(
N, 0), Res);
829 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
831 N->getValueType(0), Elt);
837 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
838 "Unexpected vector type!");
839 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
841 N->getValueType(0).getScalarType(), Elt);
849SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
850 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
851 "Unexpected vector type!");
852 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
854 {
N->getValueType(0).getScalarType(), MVT::Other },
855 {
N->getOperand(0), Elt });
865 ReplaceValueWith(
SDValue(
N, 0), Res);
870SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
872 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
873 Ops[i] = GetScalarizedVector(
N->getOperand(i));
879SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
880 EVT VT =
N->getValueType(0);
881 SDValue Res = GetScalarizedVector(
N->getOperand(0));
893 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
894 EVT VT =
N->getValueType(0);
904 assert(
N->getValueType(0).isVector() &&
905 N->getOperand(0).getValueType().isVector() &&
906 "Operand types must be vectors");
907 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
909 EVT VT =
N->getValueType(0);
910 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
911 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
913 EVT OpVT =
N->getOperand(0).getValueType();
925 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
933 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
934 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
937 if (
N->isTruncatingStore())
939 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
940 N->getBasePtr(),
N->getPointerInfo(),
941 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
942 N->getMemOperand()->getFlags(),
N->getAAInfo());
944 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
945 N->getBasePtr(),
N->getPointerInfo(),
946 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
952SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
953 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
954 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
956 N->getValueType(0).getVectorElementType(), Elt,
961SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
963 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
964 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
968 {
N->getOperand(0), Elt,
N->getOperand(2) });
977 ReplaceValueWith(
SDValue(
N, 0), Res);
984 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
986 N->getValueType(0).getVectorElementType(), Elt);
992SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
993 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
997 {
N->getOperand(0), Elt});
1006 ReplaceValueWith(
SDValue(
N, 0), Res);
1011 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1018SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1024 SDValue Op = GetScalarizedVector(VecOp);
1026 AccOp,
Op,
N->getFlags());
1030 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1031 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1032 return DAG.
getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), LHS, RHS);
1043void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1048 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1051 switch (
N->getOpcode()) {
1054 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1066 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1081 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1084 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1088 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1091 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1093 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1094 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1097 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1100 case ISD::VP_GATHER:
1101 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1105 SplitVecRes_SETCC(
N,
Lo,
Hi);
1108 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1111 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1114 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1117 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1120 SplitVecRes_VECTOR_INTERLEAVE(
N);
1123 SplitVecRes_VAARG(
N,
Lo,
Hi);
1129 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1135 case ISD::VP_BITREVERSE:
1143 case ISD::VP_CTLZ_ZERO_UNDEF:
1145 case ISD::VP_CTTZ_ZERO_UNDEF:
1156 case ISD::VP_FFLOOR:
1161 case ISD::VP_FNEARBYINT:
1166 case ISD::VP_FP_EXTEND:
1168 case ISD::VP_FP_ROUND:
1170 case ISD::VP_FP_TO_SINT:
1172 case ISD::VP_FP_TO_UINT:
1178 case ISD::VP_LLRINT:
1180 case ISD::VP_FROUND:
1182 case ISD::VP_FROUNDEVEN:
1187 case ISD::VP_FROUNDTOZERO:
1189 case ISD::VP_SINT_TO_FP:
1191 case ISD::VP_TRUNCATE:
1193 case ISD::VP_UINT_TO_FP:
1195 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1198 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1201 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1207 case ISD::VP_SIGN_EXTEND:
1208 case ISD::VP_ZERO_EXTEND:
1209 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1226 case ISD::VP_FMINNUM:
1229 case ISD::VP_FMAXNUM:
1231 case ISD::VP_FMINIMUM:
1233 case ISD::VP_FMAXIMUM:
1239 case ISD::OR:
case ISD::VP_OR:
1259 case ISD::VP_FCOPYSIGN:
1260 SplitVecRes_BinOp(
N,
Lo,
Hi);
1267 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1271 SplitVecRes_CMP(
N,
Lo,
Hi);
1274#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1275 case ISD::STRICT_##DAGN:
1276#include "llvm/IR/ConstrainedOps.def"
1277 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1282 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1291 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1301 SplitVecRes_FIX(
N,
Lo,
Hi);
1303 case ISD::EXPERIMENTAL_VP_REVERSE:
1304 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1313void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1322 DL,
Ptr.getValueType(),
1323 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1325 Flags.setNoUnsignedWrap(
true);
1327 *ScaledOffset += IncrementSize;
1331 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1337std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1338 return SplitMask(Mask,
SDLoc(Mask));
1341std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1344 EVT MaskVT =
Mask.getValueType();
1346 GetSplitVector(Mask, MaskLo, MaskHi);
1349 return std::make_pair(MaskLo, MaskHi);
1354 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1356 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1360 unsigned Opcode =
N->getOpcode();
1361 if (
N->getNumOperands() == 2) {
1367 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1368 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1371 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1374 std::tie(EVLLo, EVLHi) =
1375 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1378 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1380 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1386 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1388 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1390 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1394 unsigned Opcode =
N->getOpcode();
1395 if (
N->getNumOperands() == 3) {
1401 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1402 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1405 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1408 std::tie(EVLLo, EVLHi) =
1409 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1412 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1414 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1424 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1426 GetSplitVector(LHS, LHSLo, LHSHi);
1427 GetSplitVector(RHS, RHSLo, RHSHi);
1429 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1430 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1433 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1434 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1435 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1440 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1442 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1446 unsigned Opcode =
N->getOpcode();
1465 switch (getTypeAction(InVT)) {
1480 GetExpandedOp(InOp,
Lo,
Hi);
1491 GetSplitVector(InOp,
Lo,
Hi);
1512 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1535 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1537 unsigned NumSubvectors =
N->getNumOperands() / 2;
1538 if (NumSubvectors == 1) {
1539 Lo =
N->getOperand(0);
1540 Hi =
N->getOperand(1);
1554void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1576 GetSplitVector(Vec,
Lo,
Hi);
1579 EVT LoVT =
Lo.getValueType();
1588 unsigned IdxVal =
Idx->getAsZExtVal();
1589 if (IdxVal + SubElems <= LoElems) {
1597 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1623 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1627 auto *
Load = cast<LoadSDNode>(
Lo);
1629 IncrementPointer(Load, LoVT, MPI, StackPtr);
1632 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1641 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1646 EVT RHSVT =
RHS.getValueType();
1649 GetSplitVector(RHS, RHSLo, RHSHi);
1666 SDValue FpValue =
N->getOperand(0);
1668 GetSplitVector(FpValue, ArgLo, ArgHi);
1681 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1685 std::tie(LoVT, HiVT) =
1696 unsigned Opcode =
N->getOpcode();
1703 GetSplitVector(N0, InLo, InHi);
1710 EVT OutLoVT, OutHiVT;
1713 assert((2 * OutNumElements) <= InNumElements &&
1714 "Illegal extend vector in reg split");
1724 for (
unsigned i = 0; i != OutNumElements; ++i)
1725 SplitHi[i] = i + OutNumElements;
1728 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1729 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1734 unsigned NumOps =
N->getNumOperands();
1748 for (
unsigned i = 1; i < NumOps; ++i) {
1753 EVT InVT =
Op.getValueType();
1758 GetSplitVector(
Op, OpLo, OpHi);
1767 EVT LoValueVTs[] = {LoVT, MVT::Other};
1768 EVT HiValueVTs[] = {HiVT, MVT::Other};
1777 Lo.getValue(1),
Hi.getValue(1));
1781 ReplaceValueWith(
SDValue(
N, 1), Chain);
1784SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1786 EVT VT =
N->getValueType(0);
1797 else if (NE > ResNE)
1801 EVT ChainVTs[] = {EltVT, MVT::Other};
1805 for (i = 0; i !=
NE; ++i) {
1807 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1808 SDValue Operand =
N->getOperand(j);
1819 Scalar.getNode()->setFlags(
N->getFlags());
1827 for (; i < ResNE; ++i)
1832 ReplaceValueWith(
SDValue(
N, 1), Chain);
1839void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1842 EVT ResVT =
N->getValueType(0);
1843 EVT OvVT =
N->getValueType(1);
1844 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1848 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1850 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1851 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1857 unsigned Opcode =
N->getOpcode();
1869 unsigned OtherNo = 1 - ResNo;
1870 EVT OtherVT =
N->getValueType(OtherNo);
1872 SetSplitVector(
SDValue(
N, OtherNo),
1878 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1882void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1888 GetSplitVector(Vec,
Lo,
Hi);
1891 unsigned IdxVal = CIdx->getZExtValue();
1892 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1893 if (IdxVal < LoNumElts) {
1895 Lo.getValueType(),
Lo, Elt,
Idx);
1941 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1944 auto Load = cast<LoadSDNode>(
Lo);
1946 IncrementPointer(Load, LoVT, MPI, StackPtr);
1948 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1952 if (LoVT !=
Lo.getValueType())
1954 if (HiVT !=
Hi.getValueType())
1962 assert(
N->getValueType(0).isScalableVector() &&
1963 "Only scalable vectors are supported for STEP_VECTOR");
1986 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2006 EVT MemoryVT =
LD->getMemoryVT();
2010 EVT LoMemVT, HiMemVT;
2017 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2022 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2026 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2029 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2038 ReplaceValueWith(
SDValue(LD, 1), Ch);
2043 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2052 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2053 Align Alignment =
LD->getOriginalAlign();
2056 EVT MemoryVT =
LD->getMemoryVT();
2058 EVT LoMemVT, HiMemVT;
2059 bool HiIsEmpty =
false;
2060 std::tie(LoMemVT, HiMemVT) =
2066 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2069 GetSplitVector(Mask, MaskLo, MaskHi);
2071 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2076 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2085 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2094 LD->isExpandingLoad());
2100 MPI =
LD->getPointerInfo().getWithOffset(
2105 Alignment,
LD->getAAInfo(),
LD->getRanges());
2108 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2109 LD->isExpandingLoad());
2119 ReplaceValueWith(
SDValue(LD, 1), Ch);
2125 "Indexed VP strided load during type legalization!");
2127 "Unexpected indexed variable-length load offset");
2134 EVT LoMemVT, HiMemVT;
2135 bool HiIsEmpty =
false;
2136 std::tie(LoMemVT, HiMemVT) =
2142 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2145 GetSplitVector(Mask, LoMask, HiMask);
2151 std::tie(LoEVL, HiEVL) =
2189 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2200 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2213 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2222 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2225 GetSplitVector(Mask, MaskLo, MaskHi);
2227 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2231 EVT LoMemVT, HiMemVT;
2232 bool HiIsEmpty =
false;
2233 std::tie(LoMemVT, HiMemVT) =
2236 SDValue PassThruLo, PassThruHi;
2238 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2240 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2283 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2300 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2301 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2303 auto *VPSC = cast<VPGatherSDNode>(
N);
2304 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2307 EVT MemoryVT =
N->getMemoryVT();
2308 Align Alignment =
N->getOriginalAlign();
2312 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2313 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2315 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2318 EVT LoMemVT, HiMemVT;
2323 if (getTypeAction(Ops.Index.getValueType()) ==
2325 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2327 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2334 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2335 SDValue PassThru = MGT->getPassThru();
2336 SDValue PassThruLo, PassThruHi;
2339 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2341 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2346 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2348 OpsLo, MMO, IndexTy, ExtType);
2350 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2352 OpsHi, MMO, IndexTy, ExtType);
2354 auto *VPGT = cast<VPGatherSDNode>(
N);
2356 std::tie(EVLLo, EVLHi) =
2357 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2359 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2361 MMO, VPGT->getIndexType());
2363 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2365 MMO, VPGT->getIndexType());
2375 ReplaceValueWith(
SDValue(
N, 1), Ch);
2379 assert(
N->getValueType(0).isVector() &&
2380 N->getOperand(0).getValueType().isVector() &&
2381 "Operand types must be vectors");
2389 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2391 GetSplitVector(
N->getOperand(0), LL, LH);
2395 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2397 GetSplitVector(
N->getOperand(1), RL, RH);
2402 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2403 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2405 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2406 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2407 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2408 std::tie(EVLLo, EVLHi) =
2409 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2410 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2412 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2426 EVT InVT =
N->getOperand(0).getValueType();
2428 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2433 unsigned Opcode =
N->getOpcode();
2434 if (
N->getNumOperands() <= 2) {
2436 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2437 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2445 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2446 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2449 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2452 std::tie(EVLLo, EVLHi) =
2453 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2466 EVT InVT =
N->getOperand(0).getValueType();
2468 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2472 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2473 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2474 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2479void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2487 EVT InVT =
N->getOperand(0).getValueType();
2489 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2493 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2494 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2495 Lo->setFlags(
N->getFlags());
2496 Hi->setFlags(
N->getFlags());
2502 unsigned OtherNo = 1 - ResNo;
2503 EVT OtherVT =
N->getValueType(OtherNo);
2511 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2518 EVT SrcVT =
N->getOperand(0).getValueType();
2519 EVT DestVT =
N->getValueType(0);
2542 EVT SplitLoVT, SplitHiVT;
2546 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2547 N->dump(&DAG);
dbgs() <<
"\n");
2548 if (!
N->isVPOpcode()) {
2551 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2562 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2563 N->getOperand(1),
N->getOperand(2));
2568 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2571 std::tie(EVLLo, EVLHi) =
2572 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2574 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2575 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2580 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2588 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2589 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2595 return N.getResNo() == 0 &&
2599 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2604 "Expected build vector node.");
2607 for (
unsigned I = 0;
I < NewElts; ++
I) {
2612 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2614 Ops[
I] = Input1.getOperand(
Idx);
2616 if (Ops[
I].getValueType().bitsGT(EltVT))
2619 return DAG.getBuildVector(NewVT,
DL, Ops);
2627 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2631 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2633 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2642 for (
auto &
P : ShufflesIdxs) {
2643 if (
P.second.size() < 2)
2647 for (
int &
Idx : Mask) {
2650 unsigned SrcRegIdx =
Idx / NewElts;
2651 if (Inputs[SrcRegIdx].
isUndef()) {
2656 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2659 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2664 Idx = MaskElt % NewElts +
2665 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2671 Inputs[
P.second[0]] =
P.first.first;
2672 Inputs[
P.second[1]] =
P.first.second;
2675 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2679 for (
int &
Idx : Mask) {
2682 unsigned SrcRegIdx =
Idx / NewElts;
2683 if (Inputs[SrcRegIdx].
isUndef()) {
2688 getTypeAction(Inputs[SrcRegIdx].getValueType());
2690 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2691 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2694 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2696 if (UsedSubVector.count() > 1) {
2698 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2699 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2701 if (Pairs.
empty() || Pairs.
back().size() == 2)
2703 if (UsedSubVector.test(2 *
I)) {
2704 Pairs.
back().emplace_back(
I, 0);
2706 assert(UsedSubVector.test(2 *
I + 1) &&
2707 "Expected to be used one of the subvectors.");
2708 Pairs.
back().emplace_back(
I, 1);
2711 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2713 for (
int &
Idx : Mask) {
2716 unsigned SrcRegIdx =
Idx / NewElts;
2718 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2719 return Idxs.front().first == SrcRegIdx ||
2720 Idxs.back().first == SrcRegIdx;
2722 if (It == Pairs.
end())
2724 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2725 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2728 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2729 Inputs[Idxs.front().first] = DAG.
getNode(
2731 Inputs[Idxs.front().first].getValueType(),
2732 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2733 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2742 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2743 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2746 if (Shuffle->getOperand(0).getValueType() != NewVT)
2749 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2750 !Shuffle->isSplat()) {
2752 }
else if (!Inputs[
I].hasOneUse() &&
2753 !Shuffle->getOperand(1).isUndef()) {
2755 for (
int &
Idx : Mask) {
2758 unsigned SrcRegIdx =
Idx / NewElts;
2761 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2766 int OpIdx = MaskElt / NewElts;
2779 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2780 if (Shuffle->getOperand(OpIdx).isUndef())
2782 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2783 if (It == std::end(Inputs))
2785 int FoundOp = std::distance(std::begin(Inputs), It);
2788 for (
int &
Idx : Mask) {
2791 unsigned SrcRegIdx =
Idx / NewElts;
2794 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2799 int MaskIdx = MaskElt / NewElts;
2800 if (OpIdx == MaskIdx)
2801 Idx = MaskElt % NewElts + FoundOp * NewElts;
2804 Op = (OpIdx + 1) % 2;
2812 for (
int &
Idx : Mask) {
2815 unsigned SrcRegIdx =
Idx / NewElts;
2818 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2819 int OpIdx = MaskElt / NewElts;
2822 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2828 TryPeekThroughShufflesInputs(OrigMask);
2830 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2834 for (
const auto &
I : Inputs) {
2836 UniqueConstantInputs.
insert(
I);
2837 else if (!
I.isUndef())
2842 if (UniqueInputs.
size() != std::size(Inputs)) {
2843 auto &&UniqueVec = UniqueInputs.
takeVector();
2844 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2845 unsigned ConstNum = UniqueConstantVec.size();
2846 for (
int &
Idx : Mask) {
2849 unsigned SrcRegIdx =
Idx / NewElts;
2850 if (Inputs[SrcRegIdx].
isUndef()) {
2854 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2855 if (It != UniqueConstantVec.end()) {
2857 NewElts * std::distance(UniqueConstantVec.begin(), It);
2858 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2861 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2862 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2864 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2865 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2867 copy(UniqueConstantVec, std::begin(Inputs));
2868 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2871 MakeUniqueInputs(OrigMask);
2873 copy(Inputs, std::begin(OrigInputs));
2879 unsigned FirstMaskIdx =
High * NewElts;
2882 assert(!Output &&
"Expected default initialized initial value.");
2883 TryPeekThroughShufflesInputs(Mask);
2884 MakeUniqueInputs(Mask);
2886 copy(Inputs, std::begin(TmpInputs));
2889 bool SecondIteration =
false;
2890 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2895 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2896 SecondIteration =
true;
2897 return SecondIteration;
2900 Mask, std::size(Inputs), std::size(Inputs),
2902 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2903 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2906 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2908 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2909 DAG.getUNDEF(NewVT), Mask);
2910 Inputs[
Idx] = Output;
2912 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2915 if (AccumulateResults(Idx1)) {
2918 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2920 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2921 Inputs[Idx2], Mask);
2925 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2927 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2928 TmpInputs[Idx2], Mask);
2930 Inputs[Idx1] = Output;
2932 copy(OrigInputs, std::begin(Inputs));
2937 EVT OVT =
N->getValueType(0);
2944 const Align Alignment =
2945 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2947 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2948 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2949 Chain =
Hi.getValue(1);
2953 ReplaceValueWith(
SDValue(
N, 1), Chain);
2958 EVT DstVTLo, DstVTHi;
2959 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2963 EVT SrcVT =
N->getOperand(0).getValueType();
2965 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2967 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2969 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2970 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2976 GetSplitVector(
N->getOperand(0), InLo, InHi);
2988 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
2993 EVT VT =
N->getValueType(0);
3000 Align Alignment = DAG.getReducedAlign(VT,
false);
3006 auto &MF = DAG.getMachineFunction();
3020 DAG.getConstant(1,
DL, PtrVT));
3022 DAG.getConstant(EltWidth,
DL, PtrVT));
3024 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3026 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3027 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3028 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3031 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3033 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3036void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3038 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3039 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3040 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3044 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3046 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3052void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3053 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3054 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3055 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3059 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3061 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3063 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3064 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3075bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3080 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3083 switch (
N->getOpcode()) {
3086 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3095 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3101 case ISD::VP_TRUNCATE:
3103 Res = SplitVecOp_TruncateHelper(
N);
3106 case ISD::VP_FP_ROUND:
3110 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3113 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3115 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3116 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3119 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3122 case ISD::VP_SCATTER:
3123 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3126 case ISD::VP_GATHER:
3127 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3130 Res = SplitVecOp_VSELECT(
N, OpNo);
3136 case ISD::VP_SINT_TO_FP:
3137 case ISD::VP_UINT_TO_FP:
3138 if (
N->getValueType(0).bitsLT(
3139 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3140 Res = SplitVecOp_TruncateHelper(
N);
3142 Res = SplitVecOp_UnaryOp(
N);
3146 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3150 case ISD::VP_FP_TO_SINT:
3151 case ISD::VP_FP_TO_UINT:
3162 Res = SplitVecOp_UnaryOp(
N);
3165 Res = SplitVecOp_FPOpDifferentTypes(
N);
3170 Res = SplitVecOp_CMP(
N);
3176 Res = SplitVecOp_ExtVecInRegOp(
N);
3194 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3198 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3200 case ISD::VP_REDUCE_FADD:
3201 case ISD::VP_REDUCE_SEQ_FADD:
3202 case ISD::VP_REDUCE_FMUL:
3203 case ISD::VP_REDUCE_SEQ_FMUL:
3204 case ISD::VP_REDUCE_ADD:
3205 case ISD::VP_REDUCE_MUL:
3206 case ISD::VP_REDUCE_AND:
3207 case ISD::VP_REDUCE_OR:
3208 case ISD::VP_REDUCE_XOR:
3209 case ISD::VP_REDUCE_SMAX:
3210 case ISD::VP_REDUCE_SMIN:
3211 case ISD::VP_REDUCE_UMAX:
3212 case ISD::VP_REDUCE_UMIN:
3213 case ISD::VP_REDUCE_FMAX:
3214 case ISD::VP_REDUCE_FMIN:
3215 case ISD::VP_REDUCE_FMAXIMUM:
3216 case ISD::VP_REDUCE_FMINIMUM:
3217 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3219 case ISD::VP_CTTZ_ELTS:
3220 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3221 Res = SplitVecOp_VP_CttzElements(
N);
3226 if (!Res.
getNode())
return false;
3233 if (
N->isStrictFPOpcode())
3235 "Invalid operand expansion");
3238 "Invalid operand expansion");
3240 ReplaceValueWith(
SDValue(
N, 0), Res);
3244SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3247 assert(OpNo == 0 &&
"Illegal operand must be mask");
3254 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3257 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3258 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3259 "Lo and Hi have differing types");
3262 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3263 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3265 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3266 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3267 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3268 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3278SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3279 EVT ResVT =
N->getValueType(0);
3283 SDValue VecOp =
N->getOperand(OpNo);
3285 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3286 GetSplitVector(VecOp,
Lo,
Hi);
3288 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3294 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3298 EVT ResVT =
N->getValueType(0);
3307 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3308 GetSplitVector(VecOp,
Lo,
Hi);
3310 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3316 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3319SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3320 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3321 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3323 unsigned Opc =
N->getOpcode();
3324 EVT ResVT =
N->getValueType(0);
3328 SDValue VecOp =
N->getOperand(OpNo);
3330 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3331 GetSplitVector(VecOp,
Lo,
Hi);
3334 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3337 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3342 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3343 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3348 EVT ResVT =
N->getValueType(0);
3351 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3352 EVT InVT =
Lo.getValueType();
3357 if (
N->isStrictFPOpcode()) {
3358 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3359 { N->getOperand(0), Lo });
3360 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3361 { N->getOperand(0), Hi });
3370 ReplaceValueWith(
SDValue(
N, 1), Ch);
3371 }
else if (
N->getNumOperands() == 3) {
3372 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3373 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3374 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3375 std::tie(EVLLo, EVLHi) =
3376 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3377 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3378 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3380 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3381 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3391 EVT ResVT =
N->getValueType(0);
3393 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3397 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3403 Lo = BitConvertToInteger(
Lo);
3404 Hi = BitConvertToInteger(
Hi);
3406 if (DAG.getDataLayout().isBigEndian())
3414 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3416 EVT ResVT =
N->getValueType(0);
3424 GetSplitVector(SubVec,
Lo,
Hi);
3427 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3433 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3435 return SecondInsertion;
3438SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3440 EVT SubVT =
N->getValueType(0);
3445 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3447 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3450 if (IdxVal < LoEltsMin) {
3452 "Extracted subvector crosses vector split!");
3455 N->getOperand(0).getValueType().isScalableVector())
3457 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3462 "Extracting scalable subvector from fixed-width unsupported");
3470 "subvector from a scalable predicate vector");
3476 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3478 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3479 auto &MF = DAG.getMachineFunction();
3483 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3490 SubVT, dl, Store, StackPtr,
3494SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3503 GetSplitVector(Vec,
Lo,
Hi);
3505 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3507 if (IdxVal < LoElts)
3511 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3512 Idx.getValueType())), 0);
3516 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3528 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3534 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3536 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3537 auto &MF = DAG.getMachineFunction();
3540 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3548 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3550 return DAG.getExtLoad(
3561 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3569 SplitVecRes_Gather(
N,
Lo,
Hi);
3572 ReplaceValueWith(
SDValue(
N, 0), Res);
3577 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3581 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3583 SDValue EVL =
N->getVectorLength();
3585 Align Alignment =
N->getOriginalAlign();
3591 GetSplitVector(
Data, DataLo, DataHi);
3593 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3598 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3601 GetSplitVector(Mask, MaskLo, MaskHi);
3603 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3606 EVT MemoryVT =
N->getMemoryVT();
3607 EVT LoMemVT, HiMemVT;
3608 bool HiIsEmpty =
false;
3609 std::tie(LoMemVT, HiMemVT) =
3610 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3614 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3622 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3623 N->getAddressingMode(),
N->isTruncatingStore(),
3624 N->isCompressingStore());
3631 N->isCompressingStore());
3639 MPI =
N->getPointerInfo().getWithOffset(
3642 MMO = DAG.getMachineFunction().getMachineMemOperand(
3644 Alignment,
N->getAAInfo(),
N->getRanges());
3646 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3647 N->getAddressingMode(),
N->isTruncatingStore(),
3648 N->isCompressingStore());
3657 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3658 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3665 GetSplitVector(
Data, LoData, HiData);
3667 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3669 EVT LoMemVT, HiMemVT;
3670 bool HiIsEmpty =
false;
3671 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3677 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3678 else if (getTypeAction(
Mask.getValueType()) ==
3680 GetSplitVector(Mask, LoMask, HiMask);
3682 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3685 std::tie(LoEVL, HiEVL) =
3686 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3690 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3691 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3692 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3703 EVT PtrVT =
N->getBasePtr().getValueType();
3706 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3709 Align Alignment =
N->getOriginalAlign();
3717 Alignment,
N->getAAInfo(),
N->getRanges());
3720 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3721 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3722 N->isCompressingStore());
3731 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3735 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3738 Align Alignment =
N->getOriginalAlign();
3744 GetSplitVector(
Data, DataLo, DataHi);
3746 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3751 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3754 GetSplitVector(Mask, MaskLo, MaskHi);
3756 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3759 EVT MemoryVT =
N->getMemoryVT();
3760 EVT LoMemVT, HiMemVT;
3761 bool HiIsEmpty =
false;
3762 std::tie(LoMemVT, HiMemVT) =
3763 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3771 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3772 N->getAddressingMode(),
N->isTruncatingStore(),
3773 N->isCompressingStore());
3782 N->isCompressingStore());
3790 MPI =
N->getPointerInfo().getWithOffset(
3793 MMO = DAG.getMachineFunction().getMachineMemOperand(
3795 Alignment,
N->getAAInfo(),
N->getRanges());
3797 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3798 N->getAddressingMode(),
N->isTruncatingStore(),
3799 N->isCompressingStore());
3812 EVT MemoryVT =
N->getMemoryVT();
3813 Align Alignment =
N->getOriginalAlign();
3821 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3822 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3825 auto *VPSC = cast<VPScatterSDNode>(
N);
3826 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3831 EVT LoMemVT, HiMemVT;
3832 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3837 GetSplitVector(Ops.Data, DataLo, DataHi);
3839 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3843 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3844 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3846 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3850 if (getTypeAction(Ops.Index.getValueType()) ==
3852 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3854 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3862 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3863 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3865 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3866 MSC->getIndexType(), MSC->isTruncatingStore());
3871 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3872 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3873 MMO, MSC->getIndexType(),
3874 MSC->isTruncatingStore());
3876 auto *VPSC = cast<VPScatterSDNode>(
N);
3878 std::tie(EVLLo, EVLHi) =
3879 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3881 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3882 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3883 VPSC->getIndexType());
3888 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3889 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3890 VPSC->getIndexType());
3894 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3895 assert(OpNo == 1 &&
"Can only split the stored value");
3898 bool isTruncating =
N->isTruncatingStore();
3901 EVT MemoryVT =
N->getMemoryVT();
3902 Align Alignment =
N->getOriginalAlign();
3906 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3908 EVT LoMemVT, HiMemVT;
3909 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3916 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3917 Alignment, MMOFlags, AAInfo);
3919 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3923 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3926 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3927 HiMemVT, Alignment, MMOFlags, AAInfo);
3929 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3943 EVT EltVT =
N->getValueType(0).getVectorElementType();
3945 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3948 DAG.getVectorIdxConstant(i,
DL)));
3952 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3973 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3974 SDValue InVec =
N->getOperand(OpNo);
3976 EVT OutVT =
N->getValueType(0);
3984 EVT LoOutVT, HiOutVT;
3985 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
3986 assert(LoOutVT == HiOutVT &&
"Unequal split?");
3991 if (isTypeLegal(LoOutVT) ||
3992 InElementSize <= OutElementSize * 2)
3993 return SplitVecOp_UnaryOp(
N);
4002 return SplitVecOp_UnaryOp(
N);
4006 GetSplitVector(InVec, InLoVec, InHiVec);
4012 EVT HalfElementVT = IsFloat ?
4014 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4021 if (
N->isStrictFPOpcode()) {
4022 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4023 {N->getOperand(0), InLoVec});
4024 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4025 {N->getOperand(0), InHiVec});
4031 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4032 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4044 if (
N->isStrictFPOpcode()) {
4048 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4056 DAG.getTargetConstant(
4063 assert(
N->getValueType(0).isVector() &&
4064 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4065 "Operand types must be vectors");
4067 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4069 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4070 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4083 DAG.getVTList(PartResVT,
N->getValueType(1)),
4084 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4086 DAG.getVTList(PartResVT,
N->getValueType(1)),
4087 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4090 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4092 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4093 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4094 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4095 std::tie(EVLLo, EVLHi) =
4096 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4097 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4098 N->getOperand(2), MaskLo, EVLLo);
4099 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4100 N->getOperand(2), MaskHi, EVLHi);
4104 EVT OpVT =
N->getOperand(0).getValueType();
4107 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4113 EVT ResVT =
N->getValueType(0);
4116 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4117 EVT InVT =
Lo.getValueType();
4122 if (
N->isStrictFPOpcode()) {
4123 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4124 { N->getOperand(0), Lo, N->getOperand(2) });
4125 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4126 { N->getOperand(0), Hi, N->getOperand(2) });
4130 Lo.getValue(1),
Hi.getValue(1));
4131 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4132 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4133 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4134 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4135 std::tie(EVLLo, EVLHi) =
4136 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4137 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4138 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4152SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4155 EVT LHSLoVT, LHSHiVT;
4156 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4158 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4159 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4162 std::tie(LHSLo, LHSHi) =
4163 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4166 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4168 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4169 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4178 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4179 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4180 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4182 EVT ResVT =
N->getValueType(0);
4187 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4188 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4194 EVT ResVT =
N->getValueType(0);
4197 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4198 EVT InVT =
Lo.getValueType();
4204 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4205 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4212 EVT ResVT =
N->getValueType(0);
4216 GetSplitVector(VecOp,
Lo,
Hi);
4218 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4219 auto [EVLLo, EVLHi] =
4221 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4227 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4229 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4230 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4237void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4238 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4241 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4246 auto unrollExpandedOp = [&]() {
4251 EVT VT =
N->getValueType(0);
4261 switch (
N->getOpcode()) {
4264 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4272 Res = WidenVecRes_ADDRSPACECAST(
N);
4279 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4283 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4287 Res = WidenVecRes_ScalarOp(
N);
4292 case ISD::VP_SELECT:
4294 Res = WidenVecRes_Select(
N);
4298 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4299 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4301 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4304 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4306 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4307 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4310 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4313 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4315 case ISD::VP_GATHER:
4316 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4319 Res = WidenVecRes_VECTOR_REVERSE(
N);
4327 case ISD::OR:
case ISD::VP_OR:
4335 case ISD::VP_FMINNUM:
4338 case ISD::VP_FMAXNUM:
4340 case ISD::VP_FMINIMUM:
4342 case ISD::VP_FMAXIMUM:
4373 case ISD::VP_FCOPYSIGN:
4374 Res = WidenVecRes_Binary(
N);
4379 Res = WidenVecRes_CMP(
N);
4384 if (unrollExpandedOp())
4399 Res = WidenVecRes_BinaryCanTrap(
N);
4408 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4411#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4412 case ISD::STRICT_##DAGN:
4413#include "llvm/IR/ConstrainedOps.def"
4414 Res = WidenVecRes_StrictFP(
N);
4423 Res = WidenVecRes_OverflowOp(
N, ResNo);
4427 Res = WidenVecRes_FCOPYSIGN(
N);
4432 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4437 if (!unrollExpandedOp())
4438 Res = WidenVecRes_ExpOp(
N);
4444 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4449 case ISD::VP_FP_EXTEND:
4451 case ISD::VP_FP_ROUND:
4453 case ISD::VP_FP_TO_SINT:
4455 case ISD::VP_FP_TO_UINT:
4457 case ISD::VP_SIGN_EXTEND:
4459 case ISD::VP_SINT_TO_FP:
4460 case ISD::VP_TRUNCATE:
4463 case ISD::VP_UINT_TO_FP:
4465 case ISD::VP_ZERO_EXTEND:
4466 Res = WidenVecRes_Convert(
N);
4471 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4477 case ISD::VP_LLRINT:
4478 Res = WidenVecRes_XRINT(
N);
4499 if (unrollExpandedOp())
4509 case ISD::VP_BITREVERSE:
4515 case ISD::VP_CTLZ_ZERO_UNDEF:
4521 case ISD::VP_CTTZ_ZERO_UNDEF:
4526 case ISD::VP_FFLOOR:
4528 case ISD::VP_FNEARBYINT:
4529 case ISD::VP_FROUND:
4530 case ISD::VP_FROUNDEVEN:
4531 case ISD::VP_FROUNDTOZERO:
4535 Res = WidenVecRes_Unary(
N);
4542 Res = WidenVecRes_Ternary(
N);
4548 SetWidenedVector(
SDValue(
N, ResNo), Res);
4555 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4556 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4557 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4558 if (
N->getNumOperands() == 3)
4559 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4561 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4562 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4566 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4567 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4574 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4575 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4576 if (
N->getNumOperands() == 2)
4577 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4580 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4581 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4585 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4586 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4595 EVT OpVT =
LHS.getValueType();
4597 LHS = GetWidenedVector(LHS);
4598 RHS = GetWidenedVector(RHS);
4599 OpVT =
LHS.getValueType();
4605 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4611SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4615 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4616 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4618 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4627 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4630 if (ConcatEnd == 1) {
4631 VT = ConcatOps[0].getValueType();
4633 return ConcatOps[0];
4636 SDLoc dl(ConcatOps[0]);
4643 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4644 int Idx = ConcatEnd - 1;
4645 VT = ConcatOps[
Idx--].getValueType();
4646 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4659 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4660 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4664 ConcatOps[
Idx+1] = VecOp;
4665 ConcatEnd =
Idx + 2;
4671 unsigned RealVals = ConcatEnd -
Idx - 1;
4672 unsigned SubConcatEnd = 0;
4673 unsigned SubConcatIdx =
Idx + 1;
4674 while (SubConcatEnd < RealVals)
4675 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4676 while (SubConcatEnd < OpsToConcat)
4677 SubConcatOps[SubConcatEnd++] = undefVec;
4679 NextVT, SubConcatOps);
4680 ConcatEnd = SubConcatIdx + 1;
4685 if (ConcatEnd == 1) {
4686 VT = ConcatOps[0].getValueType();
4688 return ConcatOps[0];
4693 if (NumOps != ConcatEnd ) {
4695 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4696 ConcatOps[j] = UndefVal;
4704 unsigned Opcode =
N->getOpcode();
4712 NumElts = NumElts / 2;
4716 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4718 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4719 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4720 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4732 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4733 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4734 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4737 unsigned ConcatEnd = 0;
4745 while (CurNumElts != 0) {
4746 while (CurNumElts >= NumElts) {
4748 DAG.getVectorIdxConstant(
Idx, dl));
4750 DAG.getVectorIdxConstant(
Idx, dl));
4751 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4753 CurNumElts -= NumElts;
4756 NumElts = NumElts / 2;
4761 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4763 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4765 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4766 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4777 switch (
N->getOpcode()) {
4780 return WidenVecRes_STRICT_FSETCC(
N);
4787 return WidenVecRes_Convert_StrictFP(
N);
4793 unsigned NumOpers =
N->getNumOperands();
4794 unsigned Opcode =
N->getOpcode();
4801 NumElts = NumElts / 2;
4812 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4816 unsigned ConcatEnd = 0;
4823 for (
unsigned i = 1; i < NumOpers; ++i) {
4829 Oper = GetWidenedVector(Oper);
4835 DAG.getUNDEF(WideOpVT), Oper,
4836 DAG.getVectorIdxConstant(0, dl));
4848 while (CurNumElts != 0) {
4849 while (CurNumElts >= NumElts) {
4852 for (
unsigned i = 0; i < NumOpers; ++i) {
4855 EVT OpVT =
Op.getValueType();
4861 DAG.getVectorIdxConstant(
Idx, dl));
4867 EVT OperVT[] = {VT, MVT::Other};
4869 ConcatOps[ConcatEnd++] = Oper;
4872 CurNumElts -= NumElts;
4875 NumElts = NumElts / 2;
4880 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4883 for (
unsigned i = 0; i < NumOpers; ++i) {
4886 EVT OpVT =
Op.getValueType();
4890 DAG.getVectorIdxConstant(
Idx, dl));
4895 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4897 ConcatOps[ConcatEnd++] = Oper;
4906 if (Chains.
size() == 1)
4907 NewChain = Chains[0];
4910 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4915SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4917 EVT ResVT =
N->getValueType(0);
4918 EVT OvVT =
N->getValueType(1);
4919 EVT WideResVT, WideOvVT;
4929 WideLHS = GetWidenedVector(
N->getOperand(0));
4930 WideRHS = GetWidenedVector(
N->getOperand(1));
4940 N->getOperand(0), Zero);
4943 N->getOperand(1), Zero);
4946 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4947 SDNode *WideNode = DAG.getNode(
4948 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4951 unsigned OtherNo = 1 - ResNo;
4952 EVT OtherVT =
N->getValueType(OtherNo);
4959 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4962 return SDValue(WideNode, ResNo);
4975 unsigned Opcode =
N->getOpcode();
4984 InOp = ZExtPromotedInteger(InOp);
4995 InOp = GetWidenedVector(
N->getOperand(0));
4998 if (InVTEC == WidenEC) {
4999 if (
N->getNumOperands() == 1)
5000 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5001 if (
N->getNumOperands() == 3) {
5002 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5005 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5007 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5030 unsigned NumConcat =
5035 if (
N->getNumOperands() == 1)
5036 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
5037 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5042 DAG.getVectorIdxConstant(0,
DL));
5044 if (
N->getNumOperands() == 1)
5045 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
5046 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5055 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5056 for (
unsigned i=0; i < MinElts; ++i) {
5058 DAG.getVectorIdxConstant(i,
DL));
5059 if (
N->getNumOperands() == 1)
5060 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
5062 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5065 return DAG.getBuildVector(WidenVT,
DL, Ops);
5074 EVT SrcVT = Src.getValueType();
5078 Src = GetWidenedVector(Src);
5079 SrcVT = Src.getValueType();
5086 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5095 EVT SrcVT = Src.getValueType();
5099 Src = GetWidenedVector(Src);
5100 SrcVT = Src.getValueType();
5107 if (
N->getNumOperands() == 1)
5108 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5110 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5111 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5115 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5118SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5129 unsigned Opcode =
N->getOpcode();
5135 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5140 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5141 for (
unsigned i=0; i < MinElts; ++i) {
5143 DAG.getVectorIdxConstant(i,
DL));
5144 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5148 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5150 return DAG.getBuildVector(WidenVT,
DL, Ops);
5153SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5154 unsigned Opcode =
N->getOpcode();
5167 InOp = GetWidenedVector(InOp);
5174 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5181 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5183 DAG.getVectorIdxConstant(i,
DL));
5200 while (Ops.
size() != WidenNumElts)
5203 return DAG.getBuildVector(WidenVT,
DL, Ops);
5209 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5210 return WidenVecRes_BinaryCanTrap(
N);
5220SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5221 SDValue FpValue =
N->getOperand(0);
5225 SDValue Arg = GetWidenedVector(FpValue);
5226 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5232 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5234 EVT ExpVT =
RHS.getValueType();
5239 ExpOp = ModifyToType(RHS, WideExpVT);
5242 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5248 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5249 if (
N->getNumOperands() == 1)
5250 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5252 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5253 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5257 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5258 {InOp,
Mask,
N->getOperand(2)});
5264 cast<VTSDNode>(
N->getOperand(1))->getVT()
5265 .getVectorElementType(),
5267 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5268 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5269 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5272SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5273 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5274 return GetWidenedVector(WidenVec);
5279 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5280 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5282 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5283 AddrSpaceCastN->getSrcAddressSpace(),
5284 AddrSpaceCastN->getDestAddressSpace());
5290 EVT VT =
N->getValueType(0);
5294 switch (getTypeAction(InVT)) {
5308 SDValue NInOp = GetPromotedInteger(InOp);
5310 if (WidenVT.
bitsEq(NInVT)) {
5313 if (DAG.getDataLayout().isBigEndian()) {
5318 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5337 InOp = GetWidenedVector(InOp);
5339 if (WidenVT.
bitsEq(InVT))
5349 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5354 unsigned NewNumParts = WidenSize / InSize;
5367 EVT OrigInVT =
N->getOperand(0).getValueType();
5380 if (WidenSize % InSize == 0) {
5387 DAG.ExtractVectorElements(InOp, Ops);
5388 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5400 return CreateStackStoreLoad(InOp, WidenVT);
5406 EVT VT =
N->getValueType(0);
5410 EVT EltVT =
N->getOperand(0).getValueType();
5417 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5418 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5420 return DAG.getBuildVector(WidenVT, dl, NewOps);
5424 EVT InVT =
N->getOperand(0).getValueType();
5427 unsigned NumOperands =
N->getNumOperands();
5429 bool InputWidened =
false;
5433 if (WidenNumElts % NumInElts == 0) {
5435 unsigned NumConcat = WidenNumElts / NumInElts;
5436 SDValue UndefVal = DAG.getUNDEF(InVT);
5438 for (
unsigned i=0; i < NumOperands; ++i)
5439 Ops[i] =
N->getOperand(i);
5440 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5445 InputWidened =
true;
5449 for (i=1; i < NumOperands; ++i)
5450 if (!
N->getOperand(i).isUndef())
5453 if (i == NumOperands)
5456 return GetWidenedVector(
N->getOperand(0));
5458 if (NumOperands == 2) {
5460 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5466 for (
unsigned i = 0; i < NumInElts; ++i) {
5468 MaskOps[i + NumInElts] = i + WidenNumElts;
5470 return DAG.getVectorShuffle(WidenVT, dl,
5471 GetWidenedVector(
N->getOperand(0)),
5472 GetWidenedVector(
N->getOperand(1)),
5479 "Cannot use build vectors to widen CONCAT_VECTOR result");
5487 for (
unsigned i=0; i < NumOperands; ++i) {
5490 InOp = GetWidenedVector(InOp);
5491 for (
unsigned j = 0;
j < NumInElts; ++
j)
5493 DAG.getVectorIdxConstant(j, dl));
5495 SDValue UndefVal = DAG.getUNDEF(EltVT);
5496 for (;
Idx < WidenNumElts; ++
Idx)
5497 Ops[
Idx] = UndefVal;
5498 return DAG.getBuildVector(WidenVT, dl, Ops);
5501SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5502 EVT VT =
N->getValueType(0);
5504 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5511SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5512 EVT VT =
N->getValueType(0);
5519 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5521 InOp = GetWidenedVector(InOp);
5527 if (IdxVal == 0 && InVT == WidenVT)
5534 assert(IdxVal % VTNumElts == 0 &&
5535 "Expected Idx to be a multiple of subvector minimum vector length");
5536 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5549 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5550 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5551 "down type's element count");
5558 for (;
I < VTNumElts / GCD; ++
I)
5561 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5562 for (;
I < WidenNumElts / GCD; ++
I)
5569 "EXTRACT_SUBVECTOR for scalable vectors");
5576 for (i = 0; i < VTNumElts; ++i)
5578 DAG.getVectorIdxConstant(IdxVal + i, dl));
5580 SDValue UndefVal = DAG.getUNDEF(EltVT);
5581 for (; i < WidenNumElts; ++i)
5583 return DAG.getBuildVector(WidenVT, dl, Ops);
5594SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5595 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5598 N->getOperand(1),
N->getOperand(2));
5611 if (!
LD->getMemoryVT().isByteSized()) {
5615 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5624 EVT LdVT =
LD->getMemoryVT();
5635 const auto *MMO =
LD->getMemOperand();
5637 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5651 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5653 Result = GenWidenVectorLoads(LdChain, LD);
5660 if (LdChain.
size() == 1)
5661 NewChain = LdChain[0];
5667 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5678 SDValue EVL =
N->getVectorLength();
5685 "Unable to widen binary VP op");
5686 Mask = GetWidenedVector(Mask);
5687 assert(
Mask.getValueType().getVectorElementCount() ==
5690 "Unable to widen vector load");
5693 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5694 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5695 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5709 "Unable to widen VP strided load");
5710 Mask = GetWidenedVector(Mask);
5713 assert(
Mask.getValueType().getVectorElementCount() ==
5715 "Data and mask vectors should have the same number of elements");
5717 SDValue Res = DAG.getStridedLoadVP(
5718 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5719 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5720 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5721 N->isExpandingLoad());
5733 EVT MaskVT =
Mask.getValueType();
5734 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5742 Mask = ModifyToType(Mask, WideMaskVT,
true);
5744 SDValue Res = DAG.getMaskedLoad(
5745 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5746 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5747 ExtType,
N->isExpandingLoad());
5758 EVT MaskVT =
Mask.getValueType();
5759 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5768 Mask = ModifyToType(Mask, WideMaskVT,
true);
5773 Index.getValueType().getScalarType(),
5781 N->getMemoryVT().getScalarType(), NumElts);
5782 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5783 WideMemVT, dl, Ops,
N->getMemOperand(),
5784 N->getIndexType(),
N->getExtensionType());
5801 N->getMemoryVT().getScalarType(), WideEC);
5802 Mask = GetWidenedMask(Mask, WideEC);
5805 Mask,
N->getVectorLength()};
5806 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5807 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5817 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5845 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5846 return N->getOperand(OpNo).getValueType();
5854 N =
N.getOperand(0);
5856 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5857 if (!
N->getOperand(i)->isUndef())
5859 N =
N.getOperand(0);
5863 N =
N.getOperand(0);
5865 N =
N.getOperand(0);
5892 { MaskVT, MVT::Other }, Ops);
5893 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5903 if (MaskScalarBits < ToMaskScalBits) {
5907 }
else if (MaskScalarBits > ToMaskScalBits) {
5913 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5915 "Mask should have the right element size by now.");
5918 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5920 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5925 EVT SubVT =
Mask->getValueType(0);
5931 assert((
Mask->getValueType(0) == ToMaskVT) &&
5932 "A mask of ToMaskVT should have been produced by now.");
5953 EVT CondVT =
Cond->getValueType(0);
5957 EVT VSelVT =
N->getValueType(0);
5969 EVT FinalVT = VSelVT;
5981 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
5999 EVT ToMaskVT = VSelVT;
6006 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6022 if (ScalarBits0 != ScalarBits1) {
6023 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6024 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6036 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6037 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6041 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6054 unsigned Opcode =
N->getOpcode();
6056 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6057 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6058 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6060 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6066 Cond1 = GetWidenedVector(Cond1);
6074 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6075 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6080 Cond1 = ModifyToType(Cond1, CondWidenVT);
6083 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6084 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6086 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6087 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6089 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6093 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6094 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6097 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6102 return DAG.getUNDEF(WidenVT);
6106 EVT VT =
N->getValueType(0);
6113 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6114 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6118 for (
unsigned i = 0; i != NumElts; ++i) {
6119 int Idx =
N->getMaskElt(i);
6120 if (
Idx < (
int)NumElts)
6125 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
6127 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6131 EVT VT =
N->getValueType(0);
6136 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6142 unsigned IdxVal = WidenNumElts - VTNumElts;
6155 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6158 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6159 "down type's element count");
6162 for (; i < VTNumElts / GCD; ++i)
6165 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6166 for (; i < WidenNumElts / GCD; ++i)
6175 for (
unsigned i = 0; i != VTNumElts; ++i) {
6176 Mask.push_back(IdxVal + i);
6178 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6181 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6186 assert(
N->getValueType(0).isVector() &&
6187 N->getOperand(0).getValueType().isVector() &&
6188 "Operands must be vectors");
6202 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6203 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6210 InOp1 = GetWidenedVector(InOp1);
6211 InOp2 = GetWidenedVector(InOp2);
6213 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6214 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6221 "Input not widened to expected type!");
6223 if (
N->getOpcode() == ISD::VP_SETCC) {
6226 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6227 N->getOperand(2), Mask,
N->getOperand(4));
6234 assert(
N->getValueType(0).isVector() &&
6235 N->getOperand(1).getValueType().isVector() &&
6236 "Operands must be vectors");
6237 EVT VT =
N->getValueType(0);
6248 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6253 for (
unsigned i = 0; i != NumElts; ++i) {
6255 DAG.getVectorIdxConstant(i, dl));
6257 DAG.getVectorIdxConstant(i, dl));
6259 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6260 {Chain, LHSElem, RHSElem, CC});
6261 Chains[i] = Scalars[i].getValue(1);
6262 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6263 DAG.getBoolConstant(
true, dl, EltVT, VT),
6264 DAG.getBoolConstant(
false, dl, EltVT, VT));
6268 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6270 return DAG.getBuildVector(WidenVT, dl, Scalars);
6276bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6277 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6281 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6284 switch (
N->getOpcode()) {
6287 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6298 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6299 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6300 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6301 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6306 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6308 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6309 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6311 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6312 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6320 Res = WidenVecOp_UnrollVectorOp(
N);
6327 Res = WidenVecOp_EXTEND(
N);
6332 Res = WidenVecOp_CMP(
N);
6348 Res = WidenVecOp_Convert(
N);
6353 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6371 Res = WidenVecOp_VECREDUCE(
N);
6375 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6377 case ISD::VP_REDUCE_FADD:
6378 case ISD::VP_REDUCE_SEQ_FADD:
6379 case ISD::VP_REDUCE_FMUL:
6380 case ISD::VP_REDUCE_SEQ_FMUL:
6381 case ISD::VP_REDUCE_ADD:
6382 case ISD::VP_REDUCE_MUL:
6383 case ISD::VP_REDUCE_AND:
6384 case ISD::VP_REDUCE_OR:
6385 case ISD::VP_REDUCE_XOR:
6386 case ISD::VP_REDUCE_SMAX:
6387 case ISD::VP_REDUCE_SMIN:
6388 case ISD::VP_REDUCE_UMAX:
6389 case ISD::VP_REDUCE_UMIN:
6390 case ISD::VP_REDUCE_FMAX:
6391 case ISD::VP_REDUCE_FMIN:
6392 case ISD::VP_REDUCE_FMAXIMUM:
6393 case ISD::VP_REDUCE_FMINIMUM:
6394 Res = WidenVecOp_VP_REDUCE(
N);
6396 case ISD::VP_CTTZ_ELTS:
6397 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6398 Res = WidenVecOp_VP_CttzElements(
N);
6403 if (!Res.
getNode())
return false;
6411 if (
N->isStrictFPOpcode())
6413 "Invalid operand expansion");
6416 "Invalid operand expansion");
6418 ReplaceValueWith(
SDValue(
N, 0), Res);
6424 EVT VT =
N->getValueType(0);
6429 "Unexpected type action");
6430 InOp = GetWidenedVector(InOp);
6433 "Input wasn't widened!");
6444 FixedEltVT == InEltVT) {
6446 "Not enough elements in the fixed type for the operand!");
6448 "We can't have the same type as we started with!");
6451 DAG.getUNDEF(FixedVT), InOp,
6452 DAG.getVectorIdxConstant(0,
DL));
6455 DAG.getVectorIdxConstant(0,
DL));
6464 return WidenVecOp_Convert(
N);
6469 switch (
N->getOpcode()) {
6484 EVT OpVT =
N->getOperand(0).getValueType();
6485 EVT ResVT =
N->getValueType(0);
6493 DAG.getVectorIdxConstant(0, dl));
6495 DAG.getVectorIdxConstant(0, dl));
6501 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6502 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6504 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6511 return DAG.UnrollVectorOp(
N);
6516 EVT ResultVT =
N->getValueType(0);
6518 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6527 {WideArg,
Test},
N->getFlags());
6534 DAG.getVectorIdxConstant(0,
DL));
6536 EVT OpVT =
N->getOperand(0).getValueType();
6539 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6544 EVT VT =
N->getValueType(0);
6547 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6550 "Unexpected type action");
6551 InOp = GetWidenedVector(InOp);
6553 unsigned Opcode =
N->getOpcode();
6559 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6561 if (
N->isStrictFPOpcode()) {
6563 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6564 {
N->getOperand(0), InOp,
N->getOperand(2) });
6566 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6567 {
N->getOperand(0), InOp });
6573 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6575 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6578 DAG.getVectorIdxConstant(0, dl));
6586 if (
N->isStrictFPOpcode()) {
6589 for (
unsigned i=0; i < NumElts; ++i) {
6591 DAG.getVectorIdxConstant(i, dl));
6592 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6596 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6598 for (
unsigned i = 0; i < NumElts; ++i)
6599 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6601 InOp, DAG.getVectorIdxConstant(i, dl)));
6604 return DAG.getBuildVector(VT, dl, Ops);
6608 EVT DstVT =
N->getValueType(0);
6609 SDValue Src = GetWidenedVector(
N->getOperand(0));
6610 EVT SrcVT = Src.getValueType();
6619 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6622 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6626 return DAG.UnrollVectorOp(
N);
6630 EVT VT =
N->getValueType(0);
6631 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6639 if (!VT.
isVector() && VT != MVT::x86mmx &&
6646 DAG.getVectorIdxConstant(0, dl));
6660 .divideCoefficientBy(EltSize);
6665 DAG.getVectorIdxConstant(0, dl));
6670 return CreateStackStoreLoad(InOp, VT);
6674 EVT VT =
N->getValueType(0);
6676 EVT InVT =
N->getOperand(0).getValueType();
6681 unsigned NumOperands =
N->getNumOperands();
6684 for (i = 1; i < NumOperands; ++i)
6685 if (!
N->getOperand(i).isUndef())
6688 if (i == NumOperands)
6689 return GetWidenedVector(
N->getOperand(0));
6699 for (
unsigned i=0; i < NumOperands; ++i) {
6703 "Unexpected type action");
6704 InOp = GetWidenedVector(InOp);
6705 for (
unsigned j = 0;
j < NumInElts; ++
j)
6707 DAG.getVectorIdxConstant(j, dl));
6709 return DAG.getBuildVector(VT, dl, Ops);
6712SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6713 EVT VT =
N->getValueType(0);
6718 SubVec = GetWidenedVector(SubVec);
6724 bool IndicesValid =
false;
6727 IndicesValid =
true;
6731 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6732 Attribute::VScaleRange);
6737 IndicesValid =
true;
6743 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6748 "INSERT_SUBVECTOR");
6751SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6752 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6754 N->getValueType(0), InOp,
N->getOperand(1));
6757SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6758 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6760 N->getValueType(0), InOp,
N->getOperand(1));
6763SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6764 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6765 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6773 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6776 if (
ST->isTruncatingStore())
6795 StVal = GetWidenedVector(StVal);
6799 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6800 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6801 EVL, StVT,
ST->getMemOperand(),
6802 ST->getAddressingMode());
6806 if (GenWidenVectorStores(StChain, ST)) {
6807 if (StChain.
size() == 1)
6816SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6817 assert((OpNo == 1 || OpNo == 3) &&
6818 "Can widen only data or mask operand of vp_store");
6826 StVal = GetWidenedVector(StVal);
6832 "Unable to widen VP store");
6833 Mask = GetWidenedVector(Mask);
6835 Mask = GetWidenedVector(Mask);
6841 "Unable to widen VP store");
6842 StVal = GetWidenedVector(StVal);
6845 assert(
Mask.getValueType().getVectorElementCount() ==
6847 "Mask and data vectors should have the same number of elements");
6848 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6849 ST->getOffset(), Mask,
ST->getVectorLength(),
6850 ST->getMemoryVT(),
ST->getMemOperand(),
6851 ST->getAddressingMode(),
ST->isTruncatingStore(),
6852 ST->isCompressingStore());
6857 assert((OpNo == 1 || OpNo == 4) &&
6858 "Can widen only data or mask operand of vp_strided_store");
6867 "Unable to widen VP strided store");
6871 "Unable to widen VP strided store");
6873 StVal = GetWidenedVector(StVal);
6874 Mask = GetWidenedVector(Mask);
6877 Mask.getValueType().getVectorElementCount() &&
6878 "Data and mask vectors should have the same number of elements");
6880 return DAG.getStridedStoreVP(
6887SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6888 assert((OpNo == 1 || OpNo == 4) &&
6889 "Can widen only data or mask operand of mstore");
6892 EVT MaskVT =
Mask.getValueType();
6898 StVal = GetWidenedVector(StVal);
6905 Mask = ModifyToType(Mask, WideMaskVT,
true);
6909 Mask = ModifyToType(Mask, WideMaskVT,
true);
6915 StVal = ModifyToType(StVal, WideVT);
6918 assert(
Mask.getValueType().getVectorNumElements() ==
6920 "Mask and data vectors should have the same number of elements");
6927SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6928 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6929 auto *MG = cast<MaskedGatherSDNode>(
N);
6930 SDValue DataOp = MG->getPassThru();
6932 SDValue Scale = MG->getScale();
6940 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6941 MG->getMemOperand(), MG->getIndexType(),
6942 MG->getExtensionType());
6948SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6957 DataOp = GetWidenedVector(DataOp);
6961 EVT IndexVT =
Index.getValueType();
6967 EVT MaskVT =
Mask.getValueType();
6970 Mask = ModifyToType(Mask, WideMaskVT,
true);
6975 }
else if (OpNo == 4) {
6983 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
6988SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
6997 DataOp = GetWidenedVector(DataOp);
7000 Mask = GetWidenedMask(Mask, WideEC);
7003 }
else if (OpNo == 3) {
7012 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7017 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7018 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7020 EVT VT =
N->getValueType(0);
7035 SVT, InOp0, InOp1,
N->getOperand(2));
7042 DAG.getVectorIdxConstant(0, dl));
7044 EVT OpVT =
N->getOperand(0).getValueType();
7047 return DAG.getNode(ExtendCode, dl, VT,
CC);
7057 EVT VT =
N->getValueType(0);
7059 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7066 for (
unsigned i = 0; i != NumElts; ++i) {
7068 DAG.getVectorIdxConstant(i, dl));
7070 DAG.getVectorIdxConstant(i, dl));
7072 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7073 {Chain, LHSElem, RHSElem, CC});
7074 Chains[i] = Scalars[i].getValue(1);
7075 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7076 DAG.getBoolConstant(
true, dl, EltVT, VT),
7077 DAG.getBoolConstant(
false, dl, EltVT, VT));
7081 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7083 return DAG.getBuildVector(VT, dl, Scalars);
7088 SDValue Op = GetWidenedVector(
N->getOperand(0));
7089 EVT OrigVT =
N->getOperand(0).getValueType();
7090 EVT WideVT =
Op.getValueType();
7094 unsigned Opc =
N->getOpcode();
7096 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7097 assert(NeutralElem &&
"Neutral element must exist");
7104 unsigned GCD = std::gcd(OrigElts, WideElts);
7107 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7108 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7110 DAG.getVectorIdxConstant(
Idx, dl));
7111 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7114 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7116 DAG.getVectorIdxConstant(
Idx, dl));
7118 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7128 EVT WideVT =
Op.getValueType();
7132 unsigned Opc =
N->getOpcode();
7134 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7141 unsigned GCD = std::gcd(OrigElts, WideElts);
7144 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7145 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7147 DAG.getVectorIdxConstant(
Idx, dl));
7148 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7151 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7153 DAG.getVectorIdxConstant(
Idx, dl));
7155 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7159 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7162 SDValue Op = GetWidenedVector(
N->getOperand(1));
7164 Op.getValueType().getVectorElementCount());
7166 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7167 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7175 EVT VT =
N->getValueType(0);
7186 DAG.getVectorIdxConstant(0,
DL));
7196 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7197 {Source, Mask, N->getOperand(2)},
N->getFlags());
7214 unsigned WidenEx = 0) {
7219 unsigned AlignInBits =
Align*8;
7222 EVT RetVT = WidenEltVT;
7223 if (!Scalable && Width == WidenEltWidth)
7237 (WidenWidth % MemVTWidth) == 0 &&
7239 (MemVTWidth <= Width ||
7240 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7241 if (MemVTWidth == WidenWidth)
7260 (WidenWidth % MemVTWidth) == 0 &&
7262 (MemVTWidth <= Width ||
7263 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7272 return std::nullopt;
7283 unsigned Start,
unsigned End) {
7284 SDLoc dl(LdOps[Start]);
7285 EVT LdTy = LdOps[Start].getValueType();
7293 for (
unsigned i = Start + 1; i !=
End; ++i) {
7294 EVT NewLdTy = LdOps[i].getValueType();
7295 if (NewLdTy != LdTy) {
7316 EVT LdVT =
LD->getMemoryVT();
7330 TypeSize WidthDiff = WidenWidth - LdWidth;
7337 std::optional<EVT> FirstVT =
7338 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7345 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7350 std::optional<EVT> NewVT = FirstVT;
7352 TypeSize NewVTWidth = FirstVTWidth;
7354 RemainingWidth -= NewVTWidth;
7361 NewVTWidth = NewVT->getSizeInBits();
7367 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7368 LD->getOriginalAlign(), MMOFlags, AAInfo);
7372 if (MemVTs.
empty()) {
7374 if (!FirstVT->isVector()) {
7381 if (FirstVT == WidenVT)
7386 unsigned NumConcat =
7389 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7390 ConcatOps[0] = LdOp;
7391 for (
unsigned i = 1; i != NumConcat; ++i)
7392 ConcatOps[i] = UndefVal;
7404 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7407 for (
EVT MemVT : MemVTs) {
7408 Align NewAlign = ScaledOffset == 0
7409 ?
LD->getOriginalAlign()
7412 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7416 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7421 if (!LdOps[0].getValueType().
isVector())
7431 EVT LdTy = LdOps[i].getValueType();
7434 for (--i; i >= 0; --i) {
7435 LdTy = LdOps[i].getValueType();
7442 ConcatOps[--
Idx] = LdOps[i];
7443 for (--i; i >= 0; --i) {
7444 EVT NewLdTy = LdOps[i].getValueType();
7445 if (NewLdTy != LdTy) {
7456 WidenOps[j] = ConcatOps[
Idx+j];
7457 for (;
j != NumOps; ++
j)
7458 WidenOps[j] = DAG.getUNDEF(LdTy);
7465 ConcatOps[--
Idx] = LdOps[i];
7476 SDValue UndefVal = DAG.getUNDEF(LdTy);
7479 for (; i !=
End-
Idx; ++i)
7480 WidenOps[i] = ConcatOps[
Idx+i];
7481 for (; i != NumOps; ++i)
7482 WidenOps[i] = UndefVal;
7494 EVT LdVT =
LD->getMemoryVT();
7507 "not yet supported");
7518 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7519 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7525 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7526 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7527 LD->getOriginalAlign(), MMOFlags, AAInfo);
7532 SDValue UndefVal = DAG.getUNDEF(EltVT);
7533 for (; i != WidenNumElts; ++i)
7536 return DAG.getBuildVector(WidenVT, dl, Ops);
7548 SDValue ValOp = GetWidenedVector(
ST->getValue());
7551 EVT StVT =
ST->getMemoryVT();
7559 "Mismatch between store and value types");
7573 std::optional<EVT> NewVT =
7578 TypeSize NewVTWidth = NewVT->getSizeInBits();
7581 StWidth -= NewVTWidth;
7582 MemVTs.
back().second++;
7586 for (
const auto &Pair : MemVTs) {
7587 EVT NewVT = Pair.first;
7588 unsigned Count = Pair.second;
7594 Align NewAlign = ScaledOffset == 0
7595 ?
ST->getOriginalAlign()
7598 DAG.getVectorIdxConstant(
Idx, dl));
7599 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7604 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7616 DAG.getVectorIdxConstant(
Idx++, dl));
7618 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7622 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7636 bool FillWithZeroes) {
7641 "input and widen element type must match");
7643 "cannot modify scalable vectors in this way");
7655 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7658 for (
unsigned i = 1; i != NumConcat; ++i)
7666 DAG.getVectorIdxConstant(0, dl));
7669 "Scalable vectors should have been handled already.");
7677 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7681 DAG.getVectorIdxConstant(
Idx, dl));
7683 SDValue UndefVal = DAG.getUNDEF(EltVT);
7684 for (;
Idx < WidenNumElts; ++
Idx)
7685 Ops[
Idx] = UndefVal;
7687 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7688 if (!FillWithZeroes)
7692 "We expect to never want to FillWithZeroes for non-integral types.");
7695 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7696 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7698 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7699 DAG.getBuildVector(NVT, dl,
MaskOps));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.