20#include "llvm/IR/IntrinsicsRISCV.h"
29#define DEBUG_TYPE "riscv-isel"
30#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
33 "riscv-use-rematerializable-movimm",
cl::Hidden,
34 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
35 "constant materialization"),
39#define GET_RISCVVSSEGTable_IMPL
40#define GET_RISCVVLSEGTable_IMPL
41#define GET_RISCVVLXSEGTable_IMPL
42#define GET_RISCVVSXSEGTable_IMPL
43#define GET_RISCVVLETable_IMPL
44#define GET_RISCVVSETable_IMPL
45#define GET_RISCVVLXTable_IMPL
46#define GET_RISCVVSXTable_IMPL
47#define GET_RISCVMaskedPseudosTable_IMPL
48#include "RISCVGenSearchableTables.inc"
54 bool MadeChange =
false;
61 switch (
N->getOpcode()) {
65 MVT VT =
N->getSimpleValueType(0);
81 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
82 MVT VT =
N->getSimpleValueType(0);
88 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
96 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
120 MVT::i64, MPI,
Align(8),
127 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
146 bool MadeChange =
false;
150 if (
N->use_empty() || !
N->isMachineOpcode())
153 MadeChange |= doPeepholeSExtW(
N);
154 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
159 MadeChange |= doPeepholeMergeVVMFold();
167 MadeChange |= doPeepholeNoRegPassThru();
179 switch (Inst.getOpndKind()) {
218 if (Seq.
size() > 3) {
219 unsigned ShiftAmt, AddOpc;
239 static const unsigned M1TupleRegClassIDs[] = {
240 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
241 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
242 RISCV::VRN8M1RegClassID};
243 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
244 RISCV::VRN3M2RegClassID,
245 RISCV::VRN4M2RegClassID};
258 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
259 "Unexpected subreg numbering");
260 SubReg0 = RISCV::sub_vrm1_0;
261 RegClassID = M1TupleRegClassIDs[NF - 2];
264 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
265 "Unexpected subreg numbering");
266 SubReg0 = RISCV::sub_vrm2_0;
267 RegClassID = M2TupleRegClassIDs[NF - 2];
270 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
271 "Unexpected subreg numbering");
272 SubReg0 = RISCV::sub_vrm4_0;
273 RegClassID = RISCV::VRN2M4RegClassID;
282 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
294 bool IsLoad,
MVT *IndexVT) {
295 SDValue Chain = Node->getOperand(0);
298 Operands.push_back(Node->getOperand(CurOp++));
300 if (IsStridedOrIndexed) {
301 Operands.push_back(Node->getOperand(CurOp++));
303 *IndexVT =
Operands.back()->getSimpleValueType(0);
308 SDValue Mask = Node->getOperand(CurOp++);
327 Policy = Node->getConstantOperandVal(CurOp++);
340 unsigned NF = Node->getNumValues() - 1;
341 MVT VT = Node->getSimpleValueType(0);
349 Node->op_begin() + CurOp + NF);
358 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false,
Log2SEW,
359 static_cast<unsigned>(LMUL));
363 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
367 for (
unsigned I = 0;
I < NF; ++
I) {
379 unsigned NF = Node->getNumValues() - 2;
380 MVT VT = Node->getSimpleValueType(0);
389 Node->op_begin() + CurOp + NF);
399 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
400 Log2SEW,
static_cast<unsigned>(LMUL));
404 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
408 for (
unsigned I = 0;
I < NF; ++
I) {
422 unsigned NF = Node->getNumValues() - 1;
423 MVT VT = Node->getSimpleValueType(0);
431 Node->op_begin() + CurOp + NF);
442 "Element count mismatch");
446 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
448 "values when XLEN=32");
451 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
452 static_cast<unsigned>(IndexLMUL));
456 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
460 for (
unsigned I = 0;
I < NF; ++
I) {
473 unsigned NF = Node->getNumOperands() - 4;
478 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
486 unsigned CurOp = 2 + NF;
492 NF, IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
496 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
505 unsigned NF = Node->getNumOperands() - 5;
508 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
516 unsigned CurOp = 2 + NF;
524 "Element count mismatch");
528 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
530 "values when XLEN=32");
533 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
534 static_cast<unsigned>(IndexLMUL));
538 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
553 unsigned IntNo = Node->getConstantOperandVal(0);
555 assert((IntNo == Intrinsic::riscv_vsetvli ||
556 IntNo == Intrinsic::riscv_vsetvlimax) &&
557 "Unexpected vsetvli intrinsic");
559 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
560 unsigned Offset = (VLMax ? 1 : 2);
563 "Unexpected number of operands");
568 Node->getConstantOperandVal(
Offset + 1) & 0x7);
575 unsigned Opcode = RISCV::PseudoVSETVLI;
576 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
584 Opcode = RISCV::PseudoVSETVLIX0;
586 VLOperand = Node->getOperand(1);
588 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
590 if (isUInt<5>(AVL)) {
593 XLenVT, VLImm, VTypeIOp));
604 MVT VT = Node->getSimpleValueType(0);
605 unsigned Opcode = Node->getOpcode();
607 "Unexpected opcode");
612 SDValue N0 = Node->getOperand(0);
613 SDValue N1 = Node->getOperand(1);
630 bool SignExt =
false;
648 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
652 int64_t ShiftedVal = Val >> ShAmt;
653 if (!isInt<12>(ShiftedVal))
657 if (SignExt && ShAmt >= 32)
664 case ISD::AND: BinOpc = RISCV::ANDI;
break;
665 case ISD::OR: BinOpc = RISCV::ORI;
break;
666 case ISD::XOR: BinOpc = RISCV::XORI;
break;
669 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
683 if (!Subtarget->hasVendorXTHeadBb())
686 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
690 SDValue N0 = Node->getOperand(0);
694 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
702 MVT VT = Node->getSimpleValueType(0);
703 const unsigned RightShAmt = N1C->getZExtValue();
708 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
712 const unsigned LeftShAmt = N01C->getZExtValue();
715 if (LeftShAmt > RightShAmt)
719 const unsigned Msb = MsbPlusOne - 1;
720 const unsigned Lsb = RightShAmt - LeftShAmt;
722 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
731 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
737 const unsigned Msb = ExtSize - 1;
738 const unsigned Lsb = RightShAmt;
740 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
750 if (!Subtarget->hasVendorXTHeadMemIdx())
765 int64_t
Offset =
C->getSExtValue();
774 for (Shift = 0; Shift < 4; Shift++)
775 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
784 if (LoadVT == MVT::i8 && IsPre)
785 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
786 else if (LoadVT == MVT::i8 && IsPost)
787 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
788 else if (LoadVT == MVT::i16 && IsPre)
789 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
790 else if (LoadVT == MVT::i16 && IsPost)
791 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
792 else if (LoadVT == MVT::i32 && IsPre)
793 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
794 else if (LoadVT == MVT::i32 && IsPost)
795 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
796 else if (LoadVT == MVT::i64 && IsPre)
798 else if (LoadVT == MVT::i64 && IsPost)
821 if (Node->isMachineOpcode()) {
829 unsigned Opcode = Node->getOpcode();
832 MVT VT = Node->getSimpleValueType(0);
834 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
838 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
839 auto *ConstNode = cast<ConstantSDNode>(Node);
840 if (ConstNode->isZero()) {
846 int64_t Imm = ConstNode->getSExtValue();
849 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
851 Imm = SignExtend64<16>(Imm);
854 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
855 Imm = SignExtend64<32>(Imm);
861 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
862 auto [FPImm, NeedsFNeg] =
873 FNegOpc = RISCV::FSGNJN_H;
877 FNegOpc = RISCV::FSGNJN_S;
881 FNegOpc = RISCV::FSGNJN_D;
894 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
904 bool HasZdinx = Subtarget->hasStdExtZdinx();
905 bool Is64Bit = Subtarget->
is64Bit();
911 assert(Subtarget->hasStdExtZfbfmin());
912 Opc = RISCV::FMV_H_X;
919 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
926 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
928 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
933 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
942 Opc = RISCV::FSGNJN_D;
944 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
953 if (!Subtarget->hasStdExtZfa())
956 "Unexpected subtarget");
959 if (!
SDValue(Node, 0).use_empty()) {
961 Node->getOperand(0));
964 if (!
SDValue(Node, 1).use_empty()) {
966 Node->getOperand(0));
974 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
977 SDValue N0 = Node->getOperand(0);
981 unsigned ShAmt = N1C->getZExtValue();
987 unsigned XLen = Subtarget->
getXLen();
990 if (TrailingZeros > 0 && LeadingZeros == 32) {
1004 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1007 SDValue N0 = Node->getOperand(0);
1010 unsigned ShAmt = N1C->getZExtValue();
1016 unsigned XLen = Subtarget->
getXLen();
1019 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1038 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1042 if (ShAmt >= TrailingOnes)
1045 if (TrailingOnes == 32) {
1047 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1058 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1060 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1066 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1088 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1091 SDValue N0 = Node->getOperand(0);
1094 unsigned ShAmt = N1C->getZExtValue();
1096 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1098 if (ExtSize >= 32 || ShAmt >= ExtSize)
1100 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1117 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1122 const bool isC1ANDI = isInt<12>(C1);
1124 SDValue N0 = Node->getOperand(0);
1129 if (!Subtarget->hasVendorXTHeadBb())
1141 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1144 unsigned C2 =
C->getZExtValue();
1145 unsigned XLen = Subtarget->
getXLen();
1146 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1154 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1158 C1 &= maskTrailingZeros<uint64_t>(C2);
1160 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1164 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1170 if (!LeftShift && isC1Mask) {
1174 if (C2 + 32 == Leading) {
1186 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1188 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1193 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1207 const unsigned Lsb = C2;
1208 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1213 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1215 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1217 Skip |= HasBitTest && Leading == XLen - 1;
1218 if (OneUseOrZExtW && !Skip) {
1220 RISCV::SLLI,
DL, VT,
X,
1236 if (C2 + Leading < XLen &&
1237 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1239 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1248 if (OneUseOrZExtW && !IsCANDI) {
1250 RISCV::SLLI,
DL, VT,
X,
1266 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1268 unsigned SrliOpc = RISCV::SRLI;
1271 isa<ConstantSDNode>(
X.getOperand(1)) &&
1272 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1273 SrliOpc = RISCV::SRLIW;
1274 X =
X.getOperand(0);
1286 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1287 OneUseOrZExtW && !IsCANDI) {
1289 RISCV::SRLIW,
DL, VT,
X,
1304 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1306 RISCV::SRLI,
DL, VT,
X,
1315 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1317 RISCV::SRLIW,
DL, VT,
X,
1333 if (isC1Mask && !isC1ANDI) {
1335 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1352 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1353 if (!N1C || !N1C->hasOneUse())
1357 SDValue N0 = Node->getOperand(0);
1374 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1376 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1377 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1381 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1383 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1384 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1390 unsigned XLen = Subtarget->
getXLen();
1396 unsigned ConstantShift = XLen - LeadingZeros;
1400 uint64_t ShiftedC1 = C1 << ConstantShift;
1403 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1421 unsigned IntNo = Node->getConstantOperandVal(0);
1426 case Intrinsic::riscv_vmsgeu:
1427 case Intrinsic::riscv_vmsge: {
1428 SDValue Src1 = Node->getOperand(1);
1429 SDValue Src2 = Node->getOperand(2);
1430 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1431 bool IsCmpUnsignedZero =
false;
1436 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1437 int64_t CVal =
C->getSExtValue();
1438 if (CVal >= -15 && CVal <= 16) {
1439 if (!IsUnsigned || CVal != 0)
1441 IsCmpUnsignedZero =
true;
1445 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1449#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1450 case RISCVII::VLMUL::lmulenum: \
1451 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1452 : RISCV::PseudoVMSLT_VX_##suffix; \
1453 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1454 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1463#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1471 if (IsCmpUnsignedZero) {
1482 {Cmp, Cmp, VL, SEW}));
1485 case Intrinsic::riscv_vmsgeu_mask:
1486 case Intrinsic::riscv_vmsge_mask: {
1487 SDValue Src1 = Node->getOperand(2);
1488 SDValue Src2 = Node->getOperand(3);
1489 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1490 bool IsCmpUnsignedZero =
false;
1495 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1496 int64_t CVal =
C->getSExtValue();
1497 if (CVal >= -15 && CVal <= 16) {
1498 if (!IsUnsigned || CVal != 0)
1500 IsCmpUnsignedZero =
true;
1504 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1509#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1510 case RISCVII::VLMUL::lmulenum: \
1511 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1512 : RISCV::PseudoVMSLT_VX_##suffix; \
1513 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1514 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1523#undef CASE_VMSLT_OPCODES
1529#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1530 case RISCVII::VLMUL::lmulenum: \
1531 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1532 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1533 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1542#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1549 SDValue MaskedOff = Node->getOperand(1);
1550 SDValue Mask = Node->getOperand(4);
1553 if (IsCmpUnsignedZero) {
1556 if (Mask == MaskedOff) {
1562 {Mask, MaskedOff, VL, MaskSEW}));
1569 if (Mask == MaskedOff) {
1574 {Mask, Cmp, VL, MaskSEW}));
1591 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1595 {Cmp, Mask, VL, MaskSEW}));
1598 case Intrinsic::riscv_vsetvli:
1599 case Intrinsic::riscv_vsetvlimax:
1605 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1610 case Intrinsic::riscv_vlseg2:
1611 case Intrinsic::riscv_vlseg3:
1612 case Intrinsic::riscv_vlseg4:
1613 case Intrinsic::riscv_vlseg5:
1614 case Intrinsic::riscv_vlseg6:
1615 case Intrinsic::riscv_vlseg7:
1616 case Intrinsic::riscv_vlseg8: {
1620 case Intrinsic::riscv_vlseg2_mask:
1621 case Intrinsic::riscv_vlseg3_mask:
1622 case Intrinsic::riscv_vlseg4_mask:
1623 case Intrinsic::riscv_vlseg5_mask:
1624 case Intrinsic::riscv_vlseg6_mask:
1625 case Intrinsic::riscv_vlseg7_mask:
1626 case Intrinsic::riscv_vlseg8_mask: {
1630 case Intrinsic::riscv_vlsseg2:
1631 case Intrinsic::riscv_vlsseg3:
1632 case Intrinsic::riscv_vlsseg4:
1633 case Intrinsic::riscv_vlsseg5:
1634 case Intrinsic::riscv_vlsseg6:
1635 case Intrinsic::riscv_vlsseg7:
1636 case Intrinsic::riscv_vlsseg8: {
1640 case Intrinsic::riscv_vlsseg2_mask:
1641 case Intrinsic::riscv_vlsseg3_mask:
1642 case Intrinsic::riscv_vlsseg4_mask:
1643 case Intrinsic::riscv_vlsseg5_mask:
1644 case Intrinsic::riscv_vlsseg6_mask:
1645 case Intrinsic::riscv_vlsseg7_mask:
1646 case Intrinsic::riscv_vlsseg8_mask: {
1650 case Intrinsic::riscv_vloxseg2:
1651 case Intrinsic::riscv_vloxseg3:
1652 case Intrinsic::riscv_vloxseg4:
1653 case Intrinsic::riscv_vloxseg5:
1654 case Intrinsic::riscv_vloxseg6:
1655 case Intrinsic::riscv_vloxseg7:
1656 case Intrinsic::riscv_vloxseg8:
1659 case Intrinsic::riscv_vluxseg2:
1660 case Intrinsic::riscv_vluxseg3:
1661 case Intrinsic::riscv_vluxseg4:
1662 case Intrinsic::riscv_vluxseg5:
1663 case Intrinsic::riscv_vluxseg6:
1664 case Intrinsic::riscv_vluxseg7:
1665 case Intrinsic::riscv_vluxseg8:
1668 case Intrinsic::riscv_vloxseg2_mask:
1669 case Intrinsic::riscv_vloxseg3_mask:
1670 case Intrinsic::riscv_vloxseg4_mask:
1671 case Intrinsic::riscv_vloxseg5_mask:
1672 case Intrinsic::riscv_vloxseg6_mask:
1673 case Intrinsic::riscv_vloxseg7_mask:
1674 case Intrinsic::riscv_vloxseg8_mask:
1677 case Intrinsic::riscv_vluxseg2_mask:
1678 case Intrinsic::riscv_vluxseg3_mask:
1679 case Intrinsic::riscv_vluxseg4_mask:
1680 case Intrinsic::riscv_vluxseg5_mask:
1681 case Intrinsic::riscv_vluxseg6_mask:
1682 case Intrinsic::riscv_vluxseg7_mask:
1683 case Intrinsic::riscv_vluxseg8_mask:
1686 case Intrinsic::riscv_vlseg8ff:
1687 case Intrinsic::riscv_vlseg7ff:
1688 case Intrinsic::riscv_vlseg6ff:
1689 case Intrinsic::riscv_vlseg5ff:
1690 case Intrinsic::riscv_vlseg4ff:
1691 case Intrinsic::riscv_vlseg3ff:
1692 case Intrinsic::riscv_vlseg2ff: {
1696 case Intrinsic::riscv_vlseg8ff_mask:
1697 case Intrinsic::riscv_vlseg7ff_mask:
1698 case Intrinsic::riscv_vlseg6ff_mask:
1699 case Intrinsic::riscv_vlseg5ff_mask:
1700 case Intrinsic::riscv_vlseg4ff_mask:
1701 case Intrinsic::riscv_vlseg3ff_mask:
1702 case Intrinsic::riscv_vlseg2ff_mask: {
1706 case Intrinsic::riscv_vloxei:
1707 case Intrinsic::riscv_vloxei_mask:
1708 case Intrinsic::riscv_vluxei:
1709 case Intrinsic::riscv_vluxei_mask: {
1710 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1711 IntNo == Intrinsic::riscv_vluxei_mask;
1712 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1713 IntNo == Intrinsic::riscv_vloxei_mask;
1715 MVT VT = Node->getSimpleValueType(0);
1720 Operands.push_back(Node->getOperand(CurOp++));
1728 "Element count mismatch");
1733 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1735 "values when XLEN=32");
1738 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1739 static_cast<unsigned>(IndexLMUL));
1743 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1749 case Intrinsic::riscv_vlm:
1750 case Intrinsic::riscv_vle:
1751 case Intrinsic::riscv_vle_mask:
1752 case Intrinsic::riscv_vlse:
1753 case Intrinsic::riscv_vlse_mask: {
1754 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1755 IntNo == Intrinsic::riscv_vlse_mask;
1757 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1759 MVT VT = Node->getSimpleValueType(0);
1768 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1771 if (HasPassthruOperand)
1772 Operands.push_back(Node->getOperand(CurOp++));
1785 RISCV::getVLEPseudo(IsMasked, IsStrided,
false,
Log2SEW,
1786 static_cast<unsigned>(LMUL));
1790 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1796 case Intrinsic::riscv_vleff:
1797 case Intrinsic::riscv_vleff_mask: {
1798 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1800 MVT VT = Node->getSimpleValueType(0);
1805 Operands.push_back(Node->getOperand(CurOp++));
1812 RISCV::getVLEPseudo(IsMasked,
false,
true,
1813 Log2SEW,
static_cast<unsigned>(LMUL));
1816 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1826 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1828 case Intrinsic::riscv_vsseg2:
1829 case Intrinsic::riscv_vsseg3:
1830 case Intrinsic::riscv_vsseg4:
1831 case Intrinsic::riscv_vsseg5:
1832 case Intrinsic::riscv_vsseg6:
1833 case Intrinsic::riscv_vsseg7:
1834 case Intrinsic::riscv_vsseg8: {
1838 case Intrinsic::riscv_vsseg2_mask:
1839 case Intrinsic::riscv_vsseg3_mask:
1840 case Intrinsic::riscv_vsseg4_mask:
1841 case Intrinsic::riscv_vsseg5_mask:
1842 case Intrinsic::riscv_vsseg6_mask:
1843 case Intrinsic::riscv_vsseg7_mask:
1844 case Intrinsic::riscv_vsseg8_mask: {
1848 case Intrinsic::riscv_vssseg2:
1849 case Intrinsic::riscv_vssseg3:
1850 case Intrinsic::riscv_vssseg4:
1851 case Intrinsic::riscv_vssseg5:
1852 case Intrinsic::riscv_vssseg6:
1853 case Intrinsic::riscv_vssseg7:
1854 case Intrinsic::riscv_vssseg8: {
1858 case Intrinsic::riscv_vssseg2_mask:
1859 case Intrinsic::riscv_vssseg3_mask:
1860 case Intrinsic::riscv_vssseg4_mask:
1861 case Intrinsic::riscv_vssseg5_mask:
1862 case Intrinsic::riscv_vssseg6_mask:
1863 case Intrinsic::riscv_vssseg7_mask:
1864 case Intrinsic::riscv_vssseg8_mask: {
1868 case Intrinsic::riscv_vsoxseg2:
1869 case Intrinsic::riscv_vsoxseg3:
1870 case Intrinsic::riscv_vsoxseg4:
1871 case Intrinsic::riscv_vsoxseg5:
1872 case Intrinsic::riscv_vsoxseg6:
1873 case Intrinsic::riscv_vsoxseg7:
1874 case Intrinsic::riscv_vsoxseg8:
1877 case Intrinsic::riscv_vsuxseg2:
1878 case Intrinsic::riscv_vsuxseg3:
1879 case Intrinsic::riscv_vsuxseg4:
1880 case Intrinsic::riscv_vsuxseg5:
1881 case Intrinsic::riscv_vsuxseg6:
1882 case Intrinsic::riscv_vsuxseg7:
1883 case Intrinsic::riscv_vsuxseg8:
1886 case Intrinsic::riscv_vsoxseg2_mask:
1887 case Intrinsic::riscv_vsoxseg3_mask:
1888 case Intrinsic::riscv_vsoxseg4_mask:
1889 case Intrinsic::riscv_vsoxseg5_mask:
1890 case Intrinsic::riscv_vsoxseg6_mask:
1891 case Intrinsic::riscv_vsoxseg7_mask:
1892 case Intrinsic::riscv_vsoxseg8_mask:
1895 case Intrinsic::riscv_vsuxseg2_mask:
1896 case Intrinsic::riscv_vsuxseg3_mask:
1897 case Intrinsic::riscv_vsuxseg4_mask:
1898 case Intrinsic::riscv_vsuxseg5_mask:
1899 case Intrinsic::riscv_vsuxseg6_mask:
1900 case Intrinsic::riscv_vsuxseg7_mask:
1901 case Intrinsic::riscv_vsuxseg8_mask:
1904 case Intrinsic::riscv_vsoxei:
1905 case Intrinsic::riscv_vsoxei_mask:
1906 case Intrinsic::riscv_vsuxei:
1907 case Intrinsic::riscv_vsuxei_mask: {
1908 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1909 IntNo == Intrinsic::riscv_vsuxei_mask;
1910 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1911 IntNo == Intrinsic::riscv_vsoxei_mask;
1913 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1918 Operands.push_back(Node->getOperand(CurOp++));
1926 "Element count mismatch");
1931 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1933 "values when XLEN=32");
1936 IsMasked, IsOrdered, IndexLog2EEW,
1937 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
1941 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1947 case Intrinsic::riscv_vsm:
1948 case Intrinsic::riscv_vse:
1949 case Intrinsic::riscv_vse_mask:
1950 case Intrinsic::riscv_vsse:
1951 case Intrinsic::riscv_vsse_mask: {
1952 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1953 IntNo == Intrinsic::riscv_vsse_mask;
1955 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1957 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1962 Operands.push_back(Node->getOperand(CurOp++));
1969 IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
1972 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1982 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1994 SDValue V = Node->getOperand(0);
1995 SDValue SubV = Node->getOperand(1);
1997 auto Idx = Node->getConstantOperandVal(2);
2001 MVT SubVecContainerVT = SubVecVT;
2004 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2006 VT =
TLI.getContainerForFixedLengthVector(VT);
2010 std::tie(SubRegIdx,
Idx) =
2012 VT, SubVecContainerVT,
Idx,
TRI);
2024 (void)IsSubVecPartReg;
2025 assert((!IsSubVecPartReg || V.isUndef()) &&
2026 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2027 "the subvector is smaller than a full-sized register");
2031 if (SubRegIdx == RISCV::NoSubRegister) {
2035 "Unexpected subvector extraction");
2048 SDValue V = Node->getOperand(0);
2049 auto Idx = Node->getConstantOperandVal(1);
2050 MVT InVT = V.getSimpleValueType();
2054 MVT SubVecContainerVT = VT;
2057 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2059 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2063 std::tie(SubRegIdx,
Idx) =
2065 InVT, SubVecContainerVT,
Idx,
TRI);
2075 if (SubRegIdx == RISCV::NoSubRegister) {
2079 "Unexpected subvector extraction");
2098 if (!Node->getOperand(0).isUndef())
2100 SDValue Src = Node->getOperand(1);
2101 auto *Ld = dyn_cast<LoadSDNode>(Src);
2104 if (!Ld || Ld->isIndexed())
2106 EVT MemVT = Ld->getMemoryVT();
2132 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2142 Operands.append({VL,
SEW, PolicyOp, Ld->getChain()});
2146 false, IsStrided,
false,
2147 Log2SEW,
static_cast<unsigned>(LMUL));
2159 unsigned Locality = Node->getConstantOperandVal(3);
2163 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2167 int NontemporalLevel = 0;
2170 NontemporalLevel = 3;
2173 NontemporalLevel = 1;
2176 NontemporalLevel = 0;
2182 if (NontemporalLevel & 0b1)
2184 if (NontemporalLevel & 0b10)
2196 std::vector<SDValue> &OutOps) {
2199 switch (ConstraintID) {
2204 assert(Found &&
"SelectAddrRegImm should always succeed");
2206 OutOps.push_back(Op0);
2207 OutOps.push_back(Op1);
2211 OutOps.push_back(
Op);
2225 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2243 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2244 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2245 if (isInt<12>(CVal)) {
2261 bool IsPrefetch =
false) {
2262 if (!isa<ConstantSDNode>(
Addr))
2265 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2270 int64_t Lo12 = SignExtend64<12>(CVal);
2272 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2273 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2277 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2294 if (Seq.
back().getOpcode() != RISCV::ADDI)
2296 Lo12 = Seq.
back().getImm();
2297 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2302 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2312 for (
auto *
Use :
Add->uses()) {
2317 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2323 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2326 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2334 unsigned MaxShiftAmount,
2337 EVT VT =
Addr.getSimpleValueType();
2343 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2345 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2347 ShiftAmt =
N.getConstantOperandVal(1);
2352 return ShiftAmt != 0;
2356 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2361 isInt<12>(C1->getSExtValue())) {
2370 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2374 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2378 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2393 MVT VT =
Addr.getSimpleValueType();
2401 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2403 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2404 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2408 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2416 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2417 if (CVal == 0 || Alignment > CVal) {
2418 int64_t CombinedOffset = CVal + GA->getOffset();
2422 CombinedOffset, GA->getTargetFlags());
2428 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2436 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2437 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2438 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2439 "simm12 not already handled?");
2444 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2445 int64_t Adj = CVal < 0 ? -2048 : 2047;
2487 MVT VT =
Addr.getSimpleValueType();
2490 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2491 if (isInt<12>(CVal)) {
2495 if ((CVal & 0b11111) != 0) {
2501 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2509 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2510 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2511 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2512 "simm12 not already handled?");
2516 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2517 int64_t Adj = CVal < 0 ? -2048 : 2016;
2518 int64_t AdjustedOffset = CVal - Adj;
2520 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2582 if (Imm != 0 && Imm % ShiftWidth == 0) {
2591 if (Imm != 0 && Imm % ShiftWidth == 0) {
2595 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2603 if (Imm % ShiftWidth == ShiftWidth - 1) {
2625 "Unexpected condition code!");
2632 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2633 if (CCVal != ExpectedCCVal)
2639 if (!
LHS.getValueType().isScalarInteger())
2650 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2651 int64_t CVal =
C->getSExtValue();
2654 if (CVal == -2048) {
2657 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2664 if (isInt<12>(CVal) || CVal == 2048) {
2667 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2683 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2684 Val =
N.getOperand(0);
2688 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2689 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2694 N.getConstantOperandVal(1) == ShiftAmt &&
2701 MVT VT =
N.getSimpleValueType();
2712 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2713 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2714 Val =
N.getOperand(0);
2718 MVT VT =
N.getSimpleValueType();
2733 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2739 uint64_t Mask =
N.getConstantOperandVal(1);
2742 unsigned XLen = Subtarget->
getXLen();
2744 Mask &= maskTrailingZeros<uint64_t>(C2);
2746 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2754 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2756 EVT VT =
N.getValueType();
2766 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2768 EVT VT =
N.getValueType();
2780 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2781 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2782 isa<ConstantSDNode>(
N.getOperand(1))) {
2788 unsigned C1 =
N.getConstantOperandVal(1);
2789 unsigned XLen = Subtarget->
getXLen();
2794 if (LeftShift && Leading == 32 && Trailing > 0 &&
2795 (Trailing + C1) == ShAmt) {
2797 EVT VT =
N.getValueType();
2806 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2807 (Trailing - C1) == ShAmt) {
2809 EVT VT =
N.getValueType();
2828 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2833 uint64_t Mask =
N.getConstantOperandVal(1);
2836 Mask &= maskTrailingZeros<uint64_t>(C2);
2844 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2846 EVT VT =
N.getValueType();
2874 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
2876 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
2880 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
2882 if (UserOpNo == VLIdx)
2885 auto NumDemandedBits =
2887 return NumDemandedBits && Bits >= *NumDemandedBits;
2900 const unsigned Depth)
const {
2906 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
2907 "Unexpected opcode");
2914 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
2917 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2920 if (!
User->isMachineOpcode())
2924 switch (
User->getMachineOpcode()) {
2949 case RISCV::SLLI_UW:
2950 case RISCV::FMV_W_X:
2951 case RISCV::FCVT_H_W:
2952 case RISCV::FCVT_H_WU:
2953 case RISCV::FCVT_S_W:
2954 case RISCV::FCVT_S_WU:
2955 case RISCV::FCVT_D_W:
2956 case RISCV::FCVT_D_WU:
2957 case RISCV::TH_REVW:
2958 case RISCV::TH_SRRIW:
2971 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
2976 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
2985 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3004 unsigned ShAmt =
User->getConstantOperandVal(1);
3018 case RISCV::FMV_H_X:
3019 case RISCV::ZEXT_H_RV32:
3020 case RISCV::ZEXT_H_RV64:
3026 if (Bits < (Subtarget->
getXLen() / 2))
3030 case RISCV::SH1ADD_UW:
3031 case RISCV::SH2ADD_UW:
3032 case RISCV::SH3ADD_UW:
3035 if (UI.getOperandNo() != 0 || Bits < 32)
3039 if (UI.getOperandNo() != 0 || Bits < 8)
3043 if (UI.getOperandNo() != 0 || Bits < 16)
3047 if (UI.getOperandNo() != 0 || Bits < 32)
3059 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3060 int64_t
Offset =
C->getSExtValue();
3062 for (Shift = 0; Shift < 4; Shift++)
3063 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3070 EVT Ty =
N->getValueType(0);
3082 auto *
C = dyn_cast<ConstantSDNode>(
N);
3083 if (
C && isUInt<5>(
C->getZExtValue())) {
3085 N->getValueType(0));
3086 }
else if (
C &&
C->isAllOnes()) {
3089 N->getValueType(0));
3090 }
else if (isa<RegisterSDNode>(
N) &&
3091 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3097 N->getValueType(0));
3107 if (!
N.getOperand(0).isUndef())
3109 N =
N.getOperand(1);
3114 !
Splat.getOperand(0).isUndef())
3116 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3125 SplatVal =
Splat.getOperand(1);
3132 std::function<
bool(int64_t)> ValidateImm) {
3134 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3137 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3139 "Unexpected splat operand type");
3148 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3152 if (!ValidateImm(SplatImm))
3161 [](int64_t Imm) {
return isInt<5>(Imm); });
3166 N, SplatVal, *
CurDAG, *Subtarget,
3167 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3173 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3174 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3181 N, SplatVal, *
CurDAG, *Subtarget,
3182 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3195 isa<ConstantSDNode>(VL) &&
3202 if (!
N.hasOneUse() ||
3203 N.getValueType().getSizeInBits().getKnownMinValue() < 8)
3205 N =
N->getOperand(0);
3226 ->getLegalZfaFPImm(APF, VT)
3231 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3243 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3246 if (!isInt<5>(ImmVal))
3258bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3260 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3282 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3283 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3284 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3285 case RISCV::MUL: Opc = RISCV::MULW;
break;
3286 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3294 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3309 case RISCV::TH_MULAW:
3310 case RISCV::TH_MULAH:
3311 case RISCV::TH_MULSW:
3312 case RISCV::TH_MULSH:
3327 if (!isa<RegisterSDNode>(MaskOp) ||
3328 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3332 const auto *Glued = GlueOp.
getNode();
3338 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3339 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3351 const auto IsVMSet = [](
unsigned Opc) {
3352 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3353 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3354 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3355 Opc == RISCV::PseudoVMSET_M_B8;
3368 N->getOperand(
N->getNumOperands() - 1));
3372 return V.isMachineOpcode() &&
3373 V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3382 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3386 unsigned MaskOpIdx =
I->MaskOpIdx;
3392 const unsigned Opc =
I->UnmaskedPseudo;
3399 "Masked and unmasked pseudos are inconsistent");
3401 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3406 for (
unsigned I = !UseTUPseudo,
E =
N->getNumOperands();
I !=
E;
I++) {
3409 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3415 const auto *Glued =
N->getGluedNode();
3416 if (
auto *TGlued = Glued->getGluedNode())
3422 if (!
N->memoperands_empty())
3425 Result->setFlags(
N->getFlags());
3442 return RISCV::PseudoVMSET_M_B1;
3444 return RISCV::PseudoVMSET_M_B2;
3446 return RISCV::PseudoVMSET_M_B4;
3448 return RISCV::PseudoVMSET_M_B8;
3450 return RISCV::PseudoVMSET_M_B16;
3452 return RISCV::PseudoVMSET_M_B32;
3454 return RISCV::PseudoVMSET_M_B64;
3474bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3478 Merge =
N->getOperand(0);
3479 False =
N->getOperand(0);
3480 True =
N->getOperand(1);
3481 VL =
N->getOperand(2);
3486 Merge =
N->getOperand(0);
3487 False =
N->getOperand(1);
3488 True =
N->getOperand(2);
3489 Mask =
N->getOperand(3);
3490 VL =
N->getOperand(4);
3492 Glue =
N->getOperand(
N->getNumOperands() - 1);
3494 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3503 "Expect True is the first output of an instruction.");
3517 bool IsMasked =
false;
3519 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3520 if (!Info && HasTiedDest) {
3521 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3542 if (False != MergeOpTrue)
3547 assert(HasTiedDest &&
"Expected tied dest");
3590 unsigned TrueVLIndex =
3591 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3602 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3603 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3606 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3612 VL = GetMinVL(TrueVL, VL);
3619 if (TrueVL != VL || !IsMasked)
3644 RISCV::V0, AllOnesMask,
SDValue());
3649 unsigned MaskedOpc =
Info->MaskedPseudo;
3653 "Expected instructions with mask have policy operand.");
3656 "Expected instructions with mask have a tied dest.");
3666 bool MergeVLShrunk = VL != OrigVL;
3678 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3679 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3688 if (HasRoundingMode)
3704 if (!cast<MachineSDNode>(True)->memoperands_empty())
3715 doPeepholeMaskedRVV(Result);
3719bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3720 bool MadeChange =
false;
3725 if (
N->use_empty() || !
N->isMachineOpcode())
3729 MadeChange |= performCombineVMergeAndVOps(
N);
3739bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3740 bool MadeChange =
false;
3745 if (
N->use_empty() || !
N->isMachineOpcode())
3748 const unsigned Opc =
N->getMachineOpcode();
3749 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3756 for (
unsigned I = 1,
E =
N->getNumOperands();
I !=
E;
I++) {
3763 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static constexpr uint32_t Opcode
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
unsigned getRealMinVLen() const
bool hasVInstructions() const
unsigned getRealMaxVLen() const
bool hasStdExtZhinxOrZhinxmin() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.