19#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32#define GET_RISCVVSSEGTable_IMPL
33#define GET_RISCVVLSEGTable_IMPL
34#define GET_RISCVVLXSEGTable_IMPL
35#define GET_RISCVVSXSEGTable_IMPL
36#define GET_RISCVVLETable_IMPL
37#define GET_RISCVVSETable_IMPL
38#define GET_RISCVVLXTable_IMPL
39#define GET_RISCVVSXTable_IMPL
40#define GET_RISCVMaskedPseudosTable_IMPL
41#include "RISCVGenSearchableTables.inc"
45 assert(
Node->getNumOperands() > 0 &&
"Node with no operands");
46 unsigned LastOpIdx =
Node->getNumOperands() - 1;
47 if (
Node->getOperand(LastOpIdx).getValueType() == MVT::Glue)
49 if (
Node->getOperand(LastOpIdx).getValueType() == MVT::Other)
63 bool MadeChange =
false;
70 switch (
N->getOpcode()) {
74 MVT VT =
N->getSimpleValueType(0);
80 N->getOperand(0), VL);
87 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
88 MVT VT =
N->getSimpleValueType(0);
94 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
130 MVT::i64, MPI,
Align(8),
137 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
156 bool MadeChange =
false;
160 if (
N->use_empty() || !
N->isMachineOpcode())
163 MadeChange |= doPeepholeSExtW(
N);
164 MadeChange |= doPeepholeMaskedRVV(
N);
169 MadeChange |= doPeepholeMergeVVMFold();
181 switch (Inst.getOpndKind()) {
214 static const unsigned M1TupleRegClassIDs[] = {
215 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
216 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
217 RISCV::VRN8M1RegClassID};
218 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
219 RISCV::VRN3M2RegClassID,
220 RISCV::VRN4M2RegClassID};
233 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
234 "Unexpected subreg numbering");
235 SubReg0 = RISCV::sub_vrm1_0;
236 RegClassID = M1TupleRegClassIDs[NF - 2];
239 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
240 "Unexpected subreg numbering");
241 SubReg0 = RISCV::sub_vrm2_0;
242 RegClassID = M2TupleRegClassIDs[NF - 2];
245 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
246 "Unexpected subreg numbering");
247 SubReg0 = RISCV::sub_vrm4_0;
248 RegClassID = RISCV::VRN2M4RegClassID;
257 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
269 bool IsLoad,
MVT *IndexVT) {
270 SDValue Chain = Node->getOperand(0);
273 Operands.push_back(Node->getOperand(CurOp++));
275 if (IsStridedOrIndexed) {
276 Operands.push_back(Node->getOperand(CurOp++));
278 *IndexVT =
Operands.back()->getSimpleValueType(0);
283 SDValue Mask = Node->getOperand(CurOp++);
297 if (IsMasked && IsLoad) {
299 uint64_t Policy = Node->getConstantOperandVal(CurOp++);
316 unsigned NF = Node->getNumValues() - 1;
317 MVT VT = Node->getSimpleValueType(0);
325 Node->op_begin() + CurOp + NF);
337 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided,
false,
Log2SEW,
338 static_cast<unsigned>(LMUL));
342 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
346 for (
unsigned I = 0;
I < NF; ++
I) {
358 unsigned NF = Node->getNumValues() - 2;
359 MVT VT = Node->getSimpleValueType(0);
368 Node->op_begin() + CurOp + NF);
381 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU,
false,
true,
382 Log2SEW,
static_cast<unsigned>(LMUL));
386 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
390 for (
unsigned I = 0;
I < NF; ++
I) {
404 unsigned NF = Node->getNumValues() - 1;
405 MVT VT = Node->getSimpleValueType(0);
413 Node->op_begin() + CurOp + NF);
427 "Element count mismatch");
431 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
433 "values when XLEN=32");
436 NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
437 static_cast<unsigned>(IndexLMUL));
441 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
445 for (
unsigned I = 0;
I < NF; ++
I) {
458 unsigned NF = Node->getNumOperands() - 4;
463 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
471 unsigned CurOp = 2 + NF;
477 NF, IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
481 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
490 unsigned NF = Node->getNumOperands() - 5;
493 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
501 unsigned CurOp = 2 + NF;
509 "Element count mismatch");
513 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
515 "values when XLEN=32");
518 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
519 static_cast<unsigned>(IndexLMUL));
523 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
538 unsigned IntNo = Node->getConstantOperandVal(0);
540 assert((IntNo == Intrinsic::riscv_vsetvli ||
541 IntNo == Intrinsic::riscv_vsetvlimax) &&
542 "Unexpected vsetvli intrinsic");
544 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
545 unsigned Offset = (VLMax ? 1 : 2);
548 "Unexpected number of operands");
553 Node->getConstantOperandVal(
Offset + 1) & 0x7);
562 unsigned Opcode = RISCV::PseudoVSETVLI;
565 Opcode = RISCV::PseudoVSETVLIX0;
567 VLOperand = Node->getOperand(1);
569 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
571 if (isUInt<5>(AVL)) {
587 MVT VT = Node->getSimpleValueType(0);
588 unsigned Opcode = Node->getOpcode();
590 "Unexpected opcode");
595 SDValue N0 = Node->getOperand(0);
596 SDValue N1 = Node->getOperand(1);
613 bool SignExt =
false;
631 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
632 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
635 int64_t ShiftedVal = Val >> ShAmt;
636 if (!isInt<12>(ShiftedVal))
640 if (SignExt && ShAmt >= 32)
647 case ISD::AND: BinOpc = RISCV::ANDI;
break;
648 case ISD::OR: BinOpc = RISCV::ORI;
break;
649 case ISD::XOR: BinOpc = RISCV::XORI;
break;
652 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
666 if (!Subtarget->hasVendorXTHeadBb())
669 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
673 SDValue N0 = Node->getOperand(0);
677 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
685 MVT VT = Node->getSimpleValueType(0);
686 const unsigned RightShAmt = N1C->getZExtValue();
691 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
695 const unsigned LeftShAmt = N01C->getZExtValue();
698 if (LeftShAmt > RightShAmt)
702 const unsigned Msb = MsbPlusOne - 1;
703 const unsigned Lsb = RightShAmt - LeftShAmt;
705 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
714 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
720 const unsigned Msb = ExtSize - 1;
721 const unsigned Lsb = RightShAmt;
723 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
733 if (!Subtarget->hasVendorXTHeadMemIdx())
748 int64_t
Offset =
C->getSExtValue();
757 for (Shift = 0; Shift < 4; Shift++)
758 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
767 if (LoadVT == MVT::i8 && IsPre)
768 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
769 else if (LoadVT == MVT::i8 && IsPost)
770 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
771 else if (LoadVT == MVT::i16 && IsPre)
772 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
773 else if (LoadVT == MVT::i16 && IsPost)
774 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
775 else if (LoadVT == MVT::i32 && IsPre)
776 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
777 else if (LoadVT == MVT::i32 && IsPost)
778 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
779 else if (LoadVT == MVT::i64 && IsPre)
780 Opcode = RISCV::TH_LDIB;
781 else if (LoadVT == MVT::i64 && IsPost)
782 Opcode = RISCV::TH_LDIA;
804 if (Node->isMachineOpcode()) {
812 unsigned Opcode = Node->getOpcode();
815 MVT VT = Node->getSimpleValueType(0);
817 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
822 auto *ConstNode = cast<ConstantSDNode>(Node);
823 if (ConstNode->isZero()) {
829 int64_t Imm = ConstNode->getSExtValue();
832 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
834 Imm = SignExtend64<16>(Imm);
837 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
838 Imm = SignExtend64<32>(Imm);
844 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
869 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
888 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
894 bool HasZdinx = Subtarget->hasStdExtZdinx();
896 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
898 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
913 if (!Subtarget->hasStdExtZfa())
916 "Unexpected subtarget");
919 if (!
SDValue(Node, 0).use_empty()) {
921 Node->getOperand(0));
924 if (!
SDValue(Node, 1).use_empty()) {
926 Node->getOperand(0));
934 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
937 SDValue N0 = Node->getOperand(0);
941 unsigned ShAmt = N1C->getZExtValue();
947 unsigned XLen = Subtarget->
getXLen();
950 if (TrailingZeros > 0 && LeadingZeros == 32) {
964 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
967 SDValue N0 = Node->getOperand(0);
970 unsigned ShAmt = N1C->getZExtValue();
976 unsigned XLen = Subtarget->
getXLen();
979 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
998 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1002 if (ShAmt >= TrailingOnes)
1005 if (TrailingOnes == 32) {
1018 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1020 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1026 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1048 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1051 SDValue N0 = Node->getOperand(0);
1054 unsigned ShAmt = N1C->getZExtValue();
1056 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1058 if (ExtSize >= 32 || ShAmt >= ExtSize)
1060 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1077 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1082 const bool isC1ANDI = isInt<12>(C1);
1084 SDValue N0 = Node->getOperand(0);
1089 if (!Subtarget->hasVendorXTHeadBb())
1101 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1104 unsigned C2 =
C->getZExtValue();
1105 unsigned XLen = Subtarget->
getXLen();
1106 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1114 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1118 C1 &= maskTrailingZeros<uint64_t>(C2);
1120 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1124 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1130 if (!LeftShift && isC1Mask) {
1134 if (C2 + 32 == Leading) {
1146 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1148 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1153 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1167 const unsigned Lsb = C2;
1168 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1173 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1175 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1177 Skip |= HasBitTest && Leading == XLen - 1;
1178 if (OneUseOrZExtW && !Skip) {
1180 RISCV::SLLI,
DL, VT,
X,
1196 if (C2 + Leading < XLen &&
1197 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1199 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1208 if (OneUseOrZExtW && !IsCANDI) {
1210 RISCV::SLLI,
DL, VT,
X,
1226 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1228 unsigned SrliOpc = RISCV::SRLI;
1231 isa<ConstantSDNode>(
X.getOperand(1)) &&
1232 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1233 SrliOpc = RISCV::SRLIW;
1234 X =
X.getOperand(0);
1246 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1247 OneUseOrZExtW && !IsCANDI) {
1249 RISCV::SRLIW,
DL, VT,
X,
1264 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1266 RISCV::SRLI,
DL, VT,
X,
1275 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1277 RISCV::SRLIW,
DL, VT,
X,
1293 if (isC1Mask && !isC1ANDI) {
1295 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1312 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1313 if (!N1C || !N1C->hasOneUse())
1317 SDValue N0 = Node->getOperand(0);
1334 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1336 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1337 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1341 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1343 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1344 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1350 unsigned XLen = Subtarget->
getXLen();
1356 unsigned ConstantShift = XLen - LeadingZeros;
1360 uint64_t ShiftedC1 = C1 << ConstantShift;
1363 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1381 unsigned IntNo = Node->getConstantOperandVal(0);
1386 case Intrinsic::riscv_vmsgeu:
1387 case Intrinsic::riscv_vmsge: {
1388 SDValue Src1 = Node->getOperand(1);
1389 SDValue Src2 = Node->getOperand(2);
1390 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1391 bool IsCmpUnsignedZero =
false;
1396 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1397 int64_t CVal =
C->getSExtValue();
1398 if (CVal >= -15 && CVal <= 16) {
1399 if (!IsUnsigned || CVal != 0)
1401 IsCmpUnsignedZero =
true;
1405 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1409#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1410 case RISCVII::VLMUL::lmulenum: \
1411 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1412 : RISCV::PseudoVMSLT_VX_##suffix; \
1413 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1414 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1423#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1431 if (IsCmpUnsignedZero) {
1442 {Cmp, Cmp, VL, SEW}));
1445 case Intrinsic::riscv_vmsgeu_mask:
1446 case Intrinsic::riscv_vmsge_mask: {
1447 SDValue Src1 = Node->getOperand(2);
1448 SDValue Src2 = Node->getOperand(3);
1449 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1450 bool IsCmpUnsignedZero =
false;
1455 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1456 int64_t CVal =
C->getSExtValue();
1457 if (CVal >= -15 && CVal <= 16) {
1458 if (!IsUnsigned || CVal != 0)
1460 IsCmpUnsignedZero =
true;
1464 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1469#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1470 case RISCVII::VLMUL::lmulenum: \
1471 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1472 : RISCV::PseudoVMSLT_VX_##suffix; \
1473 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1474 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1483#undef CASE_VMSLT_OPCODES
1489#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1490 case RISCVII::VLMUL::lmulenum: \
1491 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1492 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1493 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1502#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1509 SDValue MaskedOff = Node->getOperand(1);
1510 SDValue Mask = Node->getOperand(4);
1513 if (IsCmpUnsignedZero) {
1516 if (Mask == MaskedOff) {
1522 {Mask, MaskedOff, VL, MaskSEW}));
1529 if (Mask == MaskedOff) {
1534 {Mask, Cmp, VL, MaskSEW}));
1551 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1555 {Cmp, Mask, VL, MaskSEW}));
1558 case Intrinsic::riscv_vsetvli:
1559 case Intrinsic::riscv_vsetvlimax:
1565 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1570 case Intrinsic::riscv_vlseg2:
1571 case Intrinsic::riscv_vlseg3:
1572 case Intrinsic::riscv_vlseg4:
1573 case Intrinsic::riscv_vlseg5:
1574 case Intrinsic::riscv_vlseg6:
1575 case Intrinsic::riscv_vlseg7:
1576 case Intrinsic::riscv_vlseg8: {
1580 case Intrinsic::riscv_vlseg2_mask:
1581 case Intrinsic::riscv_vlseg3_mask:
1582 case Intrinsic::riscv_vlseg4_mask:
1583 case Intrinsic::riscv_vlseg5_mask:
1584 case Intrinsic::riscv_vlseg6_mask:
1585 case Intrinsic::riscv_vlseg7_mask:
1586 case Intrinsic::riscv_vlseg8_mask: {
1590 case Intrinsic::riscv_vlsseg2:
1591 case Intrinsic::riscv_vlsseg3:
1592 case Intrinsic::riscv_vlsseg4:
1593 case Intrinsic::riscv_vlsseg5:
1594 case Intrinsic::riscv_vlsseg6:
1595 case Intrinsic::riscv_vlsseg7:
1596 case Intrinsic::riscv_vlsseg8: {
1600 case Intrinsic::riscv_vlsseg2_mask:
1601 case Intrinsic::riscv_vlsseg3_mask:
1602 case Intrinsic::riscv_vlsseg4_mask:
1603 case Intrinsic::riscv_vlsseg5_mask:
1604 case Intrinsic::riscv_vlsseg6_mask:
1605 case Intrinsic::riscv_vlsseg7_mask:
1606 case Intrinsic::riscv_vlsseg8_mask: {
1610 case Intrinsic::riscv_vloxseg2:
1611 case Intrinsic::riscv_vloxseg3:
1612 case Intrinsic::riscv_vloxseg4:
1613 case Intrinsic::riscv_vloxseg5:
1614 case Intrinsic::riscv_vloxseg6:
1615 case Intrinsic::riscv_vloxseg7:
1616 case Intrinsic::riscv_vloxseg8:
1619 case Intrinsic::riscv_vluxseg2:
1620 case Intrinsic::riscv_vluxseg3:
1621 case Intrinsic::riscv_vluxseg4:
1622 case Intrinsic::riscv_vluxseg5:
1623 case Intrinsic::riscv_vluxseg6:
1624 case Intrinsic::riscv_vluxseg7:
1625 case Intrinsic::riscv_vluxseg8:
1628 case Intrinsic::riscv_vloxseg2_mask:
1629 case Intrinsic::riscv_vloxseg3_mask:
1630 case Intrinsic::riscv_vloxseg4_mask:
1631 case Intrinsic::riscv_vloxseg5_mask:
1632 case Intrinsic::riscv_vloxseg6_mask:
1633 case Intrinsic::riscv_vloxseg7_mask:
1634 case Intrinsic::riscv_vloxseg8_mask:
1637 case Intrinsic::riscv_vluxseg2_mask:
1638 case Intrinsic::riscv_vluxseg3_mask:
1639 case Intrinsic::riscv_vluxseg4_mask:
1640 case Intrinsic::riscv_vluxseg5_mask:
1641 case Intrinsic::riscv_vluxseg6_mask:
1642 case Intrinsic::riscv_vluxseg7_mask:
1643 case Intrinsic::riscv_vluxseg8_mask:
1646 case Intrinsic::riscv_vlseg8ff:
1647 case Intrinsic::riscv_vlseg7ff:
1648 case Intrinsic::riscv_vlseg6ff:
1649 case Intrinsic::riscv_vlseg5ff:
1650 case Intrinsic::riscv_vlseg4ff:
1651 case Intrinsic::riscv_vlseg3ff:
1652 case Intrinsic::riscv_vlseg2ff: {
1656 case Intrinsic::riscv_vlseg8ff_mask:
1657 case Intrinsic::riscv_vlseg7ff_mask:
1658 case Intrinsic::riscv_vlseg6ff_mask:
1659 case Intrinsic::riscv_vlseg5ff_mask:
1660 case Intrinsic::riscv_vlseg4ff_mask:
1661 case Intrinsic::riscv_vlseg3ff_mask:
1662 case Intrinsic::riscv_vlseg2ff_mask: {
1666 case Intrinsic::riscv_vloxei:
1667 case Intrinsic::riscv_vloxei_mask:
1668 case Intrinsic::riscv_vluxei:
1669 case Intrinsic::riscv_vluxei_mask: {
1670 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1671 IntNo == Intrinsic::riscv_vluxei_mask;
1672 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1673 IntNo == Intrinsic::riscv_vloxei_mask;
1675 MVT VT = Node->getSimpleValueType(0);
1680 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1683 Operands.push_back(Node->getOperand(CurOp++));
1694 "Element count mismatch");
1699 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1701 "values when XLEN=32");
1704 IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1705 static_cast<unsigned>(IndexLMUL));
1709 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1715 case Intrinsic::riscv_vlm:
1716 case Intrinsic::riscv_vle:
1717 case Intrinsic::riscv_vle_mask:
1718 case Intrinsic::riscv_vlse:
1719 case Intrinsic::riscv_vlse_mask: {
1720 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1721 IntNo == Intrinsic::riscv_vlse_mask;
1723 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1725 MVT VT = Node->getSimpleValueType(0);
1730 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1732 bool IsTU = HasPassthruOperand &&
1733 (IsMasked || !Node->getOperand(CurOp).isUndef());
1736 Operands.push_back(Node->getOperand(CurOp++));
1737 else if (HasPassthruOperand)
1746 RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided,
false,
Log2SEW,
1747 static_cast<unsigned>(LMUL));
1751 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1757 case Intrinsic::riscv_vleff:
1758 case Intrinsic::riscv_vleff_mask: {
1759 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1761 MVT VT = Node->getSimpleValueType(0);
1766 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1769 Operands.push_back(Node->getOperand(CurOp++));
1780 RISCV::getVLEPseudo(IsMasked, IsTU,
false,
true,
1781 Log2SEW,
static_cast<unsigned>(LMUL));
1784 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1794 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1796 case Intrinsic::riscv_vsseg2:
1797 case Intrinsic::riscv_vsseg3:
1798 case Intrinsic::riscv_vsseg4:
1799 case Intrinsic::riscv_vsseg5:
1800 case Intrinsic::riscv_vsseg6:
1801 case Intrinsic::riscv_vsseg7:
1802 case Intrinsic::riscv_vsseg8: {
1806 case Intrinsic::riscv_vsseg2_mask:
1807 case Intrinsic::riscv_vsseg3_mask:
1808 case Intrinsic::riscv_vsseg4_mask:
1809 case Intrinsic::riscv_vsseg5_mask:
1810 case Intrinsic::riscv_vsseg6_mask:
1811 case Intrinsic::riscv_vsseg7_mask:
1812 case Intrinsic::riscv_vsseg8_mask: {
1816 case Intrinsic::riscv_vssseg2:
1817 case Intrinsic::riscv_vssseg3:
1818 case Intrinsic::riscv_vssseg4:
1819 case Intrinsic::riscv_vssseg5:
1820 case Intrinsic::riscv_vssseg6:
1821 case Intrinsic::riscv_vssseg7:
1822 case Intrinsic::riscv_vssseg8: {
1826 case Intrinsic::riscv_vssseg2_mask:
1827 case Intrinsic::riscv_vssseg3_mask:
1828 case Intrinsic::riscv_vssseg4_mask:
1829 case Intrinsic::riscv_vssseg5_mask:
1830 case Intrinsic::riscv_vssseg6_mask:
1831 case Intrinsic::riscv_vssseg7_mask:
1832 case Intrinsic::riscv_vssseg8_mask: {
1836 case Intrinsic::riscv_vsoxseg2:
1837 case Intrinsic::riscv_vsoxseg3:
1838 case Intrinsic::riscv_vsoxseg4:
1839 case Intrinsic::riscv_vsoxseg5:
1840 case Intrinsic::riscv_vsoxseg6:
1841 case Intrinsic::riscv_vsoxseg7:
1842 case Intrinsic::riscv_vsoxseg8:
1845 case Intrinsic::riscv_vsuxseg2:
1846 case Intrinsic::riscv_vsuxseg3:
1847 case Intrinsic::riscv_vsuxseg4:
1848 case Intrinsic::riscv_vsuxseg5:
1849 case Intrinsic::riscv_vsuxseg6:
1850 case Intrinsic::riscv_vsuxseg7:
1851 case Intrinsic::riscv_vsuxseg8:
1854 case Intrinsic::riscv_vsoxseg2_mask:
1855 case Intrinsic::riscv_vsoxseg3_mask:
1856 case Intrinsic::riscv_vsoxseg4_mask:
1857 case Intrinsic::riscv_vsoxseg5_mask:
1858 case Intrinsic::riscv_vsoxseg6_mask:
1859 case Intrinsic::riscv_vsoxseg7_mask:
1860 case Intrinsic::riscv_vsoxseg8_mask:
1863 case Intrinsic::riscv_vsuxseg2_mask:
1864 case Intrinsic::riscv_vsuxseg3_mask:
1865 case Intrinsic::riscv_vsuxseg4_mask:
1866 case Intrinsic::riscv_vsuxseg5_mask:
1867 case Intrinsic::riscv_vsuxseg6_mask:
1868 case Intrinsic::riscv_vsuxseg7_mask:
1869 case Intrinsic::riscv_vsuxseg8_mask:
1872 case Intrinsic::riscv_vsoxei:
1873 case Intrinsic::riscv_vsoxei_mask:
1874 case Intrinsic::riscv_vsuxei:
1875 case Intrinsic::riscv_vsuxei_mask: {
1876 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1877 IntNo == Intrinsic::riscv_vsuxei_mask;
1878 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1879 IntNo == Intrinsic::riscv_vsoxei_mask;
1881 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1886 Operands.push_back(Node->getOperand(CurOp++));
1894 "Element count mismatch");
1899 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1901 "values when XLEN=32");
1904 IsMasked,
false, IsOrdered, IndexLog2EEW,
1905 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
1909 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1915 case Intrinsic::riscv_vsm:
1916 case Intrinsic::riscv_vse:
1917 case Intrinsic::riscv_vse_mask:
1918 case Intrinsic::riscv_vsse:
1919 case Intrinsic::riscv_vsse_mask: {
1920 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1921 IntNo == Intrinsic::riscv_vsse_mask;
1923 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1925 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1930 Operands.push_back(Node->getOperand(CurOp++));
1937 IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
1940 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1950 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1962 SDValue V = Node->getOperand(0);
1963 SDValue SubV = Node->getOperand(1);
1965 auto Idx = Node->getConstantOperandVal(2);
1969 MVT SubVecContainerVT = SubVecVT;
1972 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
1974 VT =
TLI.getContainerForFixedLengthVector(VT);
1978 std::tie(SubRegIdx,
Idx) =
1980 VT, SubVecContainerVT,
Idx,
TRI);
1992 (void)IsSubVecPartReg;
1993 assert((!IsSubVecPartReg || V.isUndef()) &&
1994 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1995 "the subvector is smaller than a full-sized register");
1999 if (SubRegIdx == RISCV::NoSubRegister) {
2003 "Unexpected subvector extraction");
2016 SDValue V = Node->getOperand(0);
2017 auto Idx = Node->getConstantOperandVal(1);
2018 MVT InVT = V.getSimpleValueType();
2022 MVT SubVecContainerVT = VT;
2025 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2027 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2031 std::tie(SubRegIdx,
Idx) =
2033 InVT, SubVecContainerVT,
Idx,
TRI);
2043 if (SubRegIdx == RISCV::NoSubRegister) {
2047 "Unexpected subvector extraction");
2066 if (!Node->getOperand(0).isUndef())
2068 SDValue Src = Node->getOperand(1);
2069 auto *Ld = dyn_cast<LoadSDNode>(Src);
2072 EVT MemVT = Ld->getMemoryVT();
2098 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2108 false,
false, IsStrided,
false,
2109 Log2SEW,
static_cast<unsigned>(LMUL));
2127 const SDValue &Op,
unsigned ConstraintID, std::vector<SDValue> &OutOps) {
2130 switch (ConstraintID) {
2134 assert(Found &&
"SelectAddrRegImm should always succeed");
2136 OutOps.push_back(Op0);
2137 OutOps.push_back(Op1);
2141 OutOps.push_back(Op);
2154 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2172 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2173 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2174 if (isInt<12>(CVal)) {
2190 if (!isa<ConstantSDNode>(
Addr))
2193 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2198 int64_t Lo12 = SignExtend64<12>(CVal);
2200 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2202 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2220 if (Seq.
back().getOpcode() != RISCV::ADDI)
2222 Lo12 = Seq.
back().getImm();
2226 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2236 for (
auto *
Use :
Add->uses()) {
2241 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2247 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2250 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2258 unsigned MaxShiftAmount,
2261 EVT VT =
Addr.getSimpleValueType();
2267 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2269 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2271 ShiftAmt =
N.getConstantOperandVal(1);
2276 return ShiftAmt != 0;
2280 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2285 isInt<12>(C1->getSExtValue())) {
2294 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2298 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2302 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2317 MVT VT =
Addr.getSimpleValueType();
2325 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2327 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2328 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2332 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2340 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2341 if (CVal == 0 || Alignment > CVal) {
2342 int64_t CombinedOffset = CVal + GA->getOffset();
2346 CombinedOffset, GA->getTargetFlags());
2352 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2360 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2361 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2362 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2363 "simm12 not already handled?");
2368 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2369 int64_t Adj = CVal < 0 ? -2048 : 2047;
2435 if (Imm != 0 && Imm % ShiftWidth == 0) {
2444 if (Imm != 0 && Imm % ShiftWidth == 0) {
2448 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2456 if (Imm % ShiftWidth == ShiftWidth - 1) {
2478 "Unexpected condition code!");
2485 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2486 if (CCVal != ExpectedCCVal)
2492 if (!
LHS.getValueType().isInteger())
2503 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2504 int64_t CVal =
C->getSExtValue();
2507 if (CVal == -2048) {
2510 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2517 if (isInt<12>(CVal) || CVal == 2048) {
2520 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2536 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2537 Val =
N.getOperand(0);
2541 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2542 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2547 N.getConstantOperandVal(1) == ShiftAmt &&
2554 MVT VT =
N.getSimpleValueType();
2565 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2566 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2567 Val =
N.getOperand(0);
2571 MVT VT =
N.getSimpleValueType();
2586 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2592 uint64_t Mask =
N.getConstantOperandVal(1);
2595 unsigned XLen = Subtarget->
getXLen();
2597 Mask &= maskTrailingZeros<uint64_t>(C2);
2599 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2607 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2609 EVT VT =
N.getValueType();
2619 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2621 EVT VT =
N.getValueType();
2633 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2634 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2635 isa<ConstantSDNode>(
N.getOperand(1))) {
2641 unsigned C1 =
N.getConstantOperandVal(1);
2642 unsigned XLen = Subtarget->
getXLen();
2647 if (LeftShift && Leading == 32 && Trailing > 0 &&
2648 (Trailing + C1) == ShAmt) {
2650 EVT VT =
N.getValueType();
2659 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2660 (Trailing - C1) == ShAmt) {
2662 EVT VT =
N.getValueType();
2681 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2686 uint64_t Mask =
N.getConstantOperandVal(1);
2689 Mask &= maskTrailingZeros<uint64_t>(C2);
2697 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2699 EVT VT =
N.getValueType();
2723 const unsigned Depth)
const {
2729 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
2730 "Unexpected opcode");
2735 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2738 if (!
User->isMachineOpcode())
2742 switch (
User->getMachineOpcode()) {
2765 case RISCV::SLLI_UW:
2766 case RISCV::FMV_W_X:
2767 case RISCV::FCVT_H_W:
2768 case RISCV::FCVT_H_WU:
2769 case RISCV::FCVT_S_W:
2770 case RISCV::FCVT_S_WU:
2771 case RISCV::FCVT_D_W:
2772 case RISCV::FCVT_D_WU:
2773 case RISCV::TH_REVW:
2774 case RISCV::TH_SRRIW:
2787 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
2792 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
2801 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
2820 unsigned ShAmt =
User->getConstantOperandVal(1);
2834 case RISCV::FMV_H_X:
2835 case RISCV::ZEXT_H_RV32:
2836 case RISCV::ZEXT_H_RV64:
2842 if (Bits < (Subtarget->
getXLen() / 2))
2846 case RISCV::SH1ADD_UW:
2847 case RISCV::SH2ADD_UW:
2848 case RISCV::SH3ADD_UW:
2851 if (UI.getOperandNo() != 0 || Bits < 32)
2855 if (UI.getOperandNo() != 0 || Bits < 8)
2859 if (UI.getOperandNo() != 0 || Bits < 16)
2863 if (UI.getOperandNo() != 0 || Bits < 32)
2875 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
2876 int64_t
Offset =
C->getSExtValue();
2878 for (Shift = 0; Shift < 4; Shift++)
2879 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
2886 EVT Ty =
N->getValueType(0);
2898 auto *
C = dyn_cast<ConstantSDNode>(
N);
2899 if (
C && isUInt<5>(
C->getZExtValue())) {
2901 N->getValueType(0));
2902 }
else if (
C &&
C->isAllOnes()) {
2905 N->getValueType(0));
2906 }
else if (isa<RegisterSDNode>(
N) &&
2907 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
2913 N->getValueType(0));
2924 assert(
N.getNumOperands() == 3 &&
"Unexpected number of operands");
2925 SplatVal =
N.getOperand(1);
2936 !isa<ConstantSDNode>(
N.getOperand(1)))
2938 assert(
N.getNumOperands() == 3 &&
"Unexpected number of operands");
2941 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2951 assert(XLenVT ==
N.getOperand(1).getSimpleValueType() &&
2952 "Unexpected splat operand type");
2953 MVT EltVT =
N.getSimpleValueType().getVectorElementType();
2954 if (EltVT.
bitsLT(XLenVT))
2957 if (!ValidateImm(SplatImm))
2966 [](int64_t Imm) {
return isInt<5>(Imm); });
2971 N, SplatVal, *
CurDAG, *Subtarget,
2972 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2978 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
2979 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2985 !isa<ConstantSDNode>(
N.getOperand(1)))
2989 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2991 if (!isUInt<5>(SplatImm))
3016 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3028 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3031 if (!isInt<5>(ImmVal))
3043bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3045 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3067 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3068 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3069 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3070 case RISCV::MUL: Opc = RISCV::MULW;
break;
3071 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3079 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3094 case RISCV::TH_MULAW:
3095 case RISCV::TH_MULAH:
3096 case RISCV::TH_MULSW:
3097 case RISCV::TH_MULSH:
3110 if (!isa<RegisterSDNode>(
N->getOperand(MaskOpIdx)) ||
3111 cast<RegisterSDNode>(
N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
3115 const auto *Glued =
N->getGluedNode();
3121 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3122 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3128 const auto IsVMSet = [](
unsigned Opc) {
3129 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3130 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3131 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3132 Opc == RISCV::PseudoVMSET_M_B8;
3146bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(
SDNode *
N) {
3148 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3152 unsigned MaskOpIdx =
I->MaskOpIdx;
3158 std::optional<unsigned> TailPolicyOpIdx;
3162 bool UseTUPseudo =
false;
3165 if (
I->UnmaskedTUPseudo ==
I->UnmaskedPseudo) {
3169 if (!(
N->getConstantOperandVal(*TailPolicyOpIdx) &
3172 if (!
N->getOperand(0).isUndef()) {
3175 if (
I->UnmaskedTUPseudo ==
I->MaskedPseudo)
3183 unsigned Opc = UseTUPseudo ?
I->UnmaskedTUPseudo :
I->UnmaskedPseudo;
3192 "Unexpected pseudo to transform to");
3197 for (
unsigned I = !UseTUPseudo,
E =
N->getNumOperands();
I !=
E;
I++) {
3200 if (
I == MaskOpIdx ||
I == TailPolicyOpIdx ||
3201 Op.getValueType() == MVT::Glue)
3207 const auto *Glued =
N->getGluedNode();
3208 if (
auto *TGlued = Glued->getGluedNode())
3212 Result->setFlags(
N->getFlags());
3227bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N,
bool IsTA) {
3228 unsigned Offset = IsTA ? 0 : 1;
3237 "Expect True is the first output of an instruction.");
3252 bool IsMasked =
false;
3254 RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
3255 if (!Info && HasMergeOp) {
3256 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3272 if (False != MergeOpTrue)
3277 assert(HasMergeOp &&
"Expected merge op");
3311 if (
SDNode *Glued =
N->getGluedNode())
3319 unsigned TrueVLIndex =
3320 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3323 auto IsNoFPExcept = [
this](
SDValue N) {
3325 N->getFlags().hasNoFPExcept();
3335 unsigned MaskedOpc =
Info->MaskedPseudo;
3337 "Expected instructions with mask have policy operand.");
3339 "Expected instructions with mask have merge operand.");
3360 if (
N->getGluedNode())
3361 Ops.
push_back(
N->getOperand(
N->getNumOperands() - 1));
3376 doPeepholeMaskedRVV(Result);
3382bool RISCVDAGToDAGISel::performVMergeToVAdd(
SDNode *
N) {
3384 switch (
N->getMachineOpcode()) {
3387 case RISCV::PseudoVMERGE_VVM_MF8_TU:
3388 NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
3390 case RISCV::PseudoVMERGE_VVM_MF4_TU:
3391 NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
3393 case RISCV::PseudoVMERGE_VVM_MF2_TU:
3394 NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
3396 case RISCV::PseudoVMERGE_VVM_M1_TU:
3397 NewOpc = RISCV::PseudoVADD_VI_M1_TU;
3399 case RISCV::PseudoVMERGE_VVM_M2_TU:
3400 NewOpc = RISCV::PseudoVADD_VI_M2_TU;
3402 case RISCV::PseudoVMERGE_VVM_M4_TU:
3403 NewOpc = RISCV::PseudoVADD_VI_M4_TU;
3405 case RISCV::PseudoVMERGE_VVM_M8_TU:
3406 NewOpc = RISCV::PseudoVADD_VI_M8_TU;
3414 EVT VT =
N->getValueType(0);
3415 SDValue Ops[] = {
N->getOperand(1),
N->getOperand(2),
3417 N->getOperand(4),
N->getOperand(5)};
3423bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3424 bool MadeChange =
false;
3429 if (
N->use_empty() || !
N->isMachineOpcode())
3432 auto IsVMergeTU = [](
unsigned Opcode) {
3433 return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
3434 Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
3435 Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
3436 Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
3437 Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
3438 Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
3439 Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
3442 auto IsVMergeTA = [](
unsigned Opcode) {
3443 return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
3444 Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
3445 Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
3446 Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
3447 Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
3448 Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
3449 Opcode == RISCV::PseudoVMERGE_VVM_M8;
3452 unsigned Opc =
N->getMachineOpcode();
3455 if ((IsVMergeTU(Opc) &&
N->getOperand(0) ==
N->getOperand(1)) ||
3457 MadeChange |= performCombineVMergeAndVOps(
N, IsVMergeTA(Opc));
3458 if (IsVMergeTU(Opc) &&
N->getOperand(0) ==
N->getOperand(1))
3459 MadeChange |= performVMergeToVAdd(
N);
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isAllUndef(ArrayRef< SDValue > Values)
static bool isWorthFoldingAdd(SDValue Add)
static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset)
bool(*)(int64_t) ValidateFn
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
bool hasAllHUsers(SDNode *Node) const
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool hasVInstructions() const
bool hasStdExtZhinxOrZhinxmin() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_end() const
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize Fixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
Iterator for intrusive lists based on ilist_node.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Level
Code generation optimization level.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasDummyMaskOp(uint64_t TSFlags)
static bool hasMergeOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
static unsigned decodeVSEW(unsigned VSEW)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
static constexpr int64_t VLMaxSentinel
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel)
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.