20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#include "RISCVGenSearchableTables.inc"
52 bool MadeChange =
false;
59 switch (
N->getOpcode()) {
63 MVT VT =
N->getSimpleValueType(0);
79 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
80 MVT VT =
N->getSimpleValueType(0);
86 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
94 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
118 MVT::i64, MPI,
Align(8),
125 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
144 bool MadeChange =
false;
148 if (
N->use_empty() || !
N->isMachineOpcode())
151 MadeChange |= doPeepholeSExtW(
N);
156 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
161 MadeChange |= doPeepholeMergeVVMFold();
169 MadeChange |= doPeepholeNoRegPassThru();
181 switch (Inst.getOpndKind()) {
220 if (Seq.
size() > 3) {
221 unsigned ShiftAmt, AddOpc;
241 static const unsigned M1TupleRegClassIDs[] = {
242 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
243 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
244 RISCV::VRN8M1RegClassID};
245 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
246 RISCV::VRN3M2RegClassID,
247 RISCV::VRN4M2RegClassID};
260 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
261 "Unexpected subreg numbering");
262 SubReg0 = RISCV::sub_vrm1_0;
263 RegClassID = M1TupleRegClassIDs[NF - 2];
266 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
267 "Unexpected subreg numbering");
268 SubReg0 = RISCV::sub_vrm2_0;
269 RegClassID = M2TupleRegClassIDs[NF - 2];
272 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
273 "Unexpected subreg numbering");
274 SubReg0 = RISCV::sub_vrm4_0;
275 RegClassID = RISCV::VRN2M4RegClassID;
284 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
294 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
296 bool IsLoad,
MVT *IndexVT) {
297 SDValue Chain = Node->getOperand(0);
300 Operands.push_back(Node->getOperand(CurOp++));
302 if (IsStridedOrIndexed) {
303 Operands.push_back(Node->getOperand(CurOp++));
305 *IndexVT =
Operands.back()->getSimpleValueType(0);
310 SDValue Mask = Node->getOperand(CurOp++);
329 Policy = Node->getConstantOperandVal(CurOp++);
342 unsigned NF = Node->getNumValues() - 1;
343 MVT VT = Node->getSimpleValueType(0);
351 Node->op_begin() + CurOp + NF);
360 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
361 static_cast<unsigned>(LMUL));
365 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
369 for (
unsigned I = 0;
I < NF; ++
I) {
381 unsigned NF = Node->getNumValues() - 2;
382 MVT VT = Node->getSimpleValueType(0);
391 Node->op_begin() + CurOp + NF);
401 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
402 Log2SEW,
static_cast<unsigned>(LMUL));
406 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
410 for (
unsigned I = 0;
I < NF; ++
I) {
424 unsigned NF = Node->getNumValues() - 1;
425 MVT VT = Node->getSimpleValueType(0);
433 Node->op_begin() + CurOp + NF);
444 "Element count mismatch");
448 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
450 "values when XLEN=32");
453 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
454 static_cast<unsigned>(IndexLMUL));
458 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
462 for (
unsigned I = 0;
I < NF; ++
I) {
475 unsigned NF = Node->getNumOperands() - 4;
480 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
488 unsigned CurOp = 2 + NF;
494 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
498 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
507 unsigned NF = Node->getNumOperands() - 5;
510 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
518 unsigned CurOp = 2 + NF;
526 "Element count mismatch");
530 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
532 "values when XLEN=32");
535 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
536 static_cast<unsigned>(IndexLMUL));
540 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
555 unsigned IntNo = Node->getConstantOperandVal(0);
557 assert((IntNo == Intrinsic::riscv_vsetvli ||
558 IntNo == Intrinsic::riscv_vsetvlimax) &&
559 "Unexpected vsetvli intrinsic");
561 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
562 unsigned Offset = (VLMax ? 1 : 2);
565 "Unexpected number of operands");
570 Node->getConstantOperandVal(
Offset + 1) & 0x7);
577 unsigned Opcode = RISCV::PseudoVSETVLI;
578 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
585 Opcode = RISCV::PseudoVSETVLIX0;
587 VLOperand = Node->getOperand(1);
589 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
591 if (isUInt<5>(AVL)) {
594 XLenVT, VLImm, VTypeIOp));
605 MVT VT = Node->getSimpleValueType(0);
606 unsigned Opcode = Node->getOpcode();
608 "Unexpected opcode");
613 SDValue N0 = Node->getOperand(0);
614 SDValue N1 = Node->getOperand(1);
631 bool SignExt =
false;
649 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
650 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
653 int64_t ShiftedVal = Val >> ShAmt;
654 if (!isInt<12>(ShiftedVal))
658 if (SignExt && ShAmt >= 32)
665 case ISD::AND: BinOpc = RISCV::ANDI;
break;
666 case ISD::OR: BinOpc = RISCV::ORI;
break;
667 case ISD::XOR: BinOpc = RISCV::XORI;
break;
670 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
684 if (!Subtarget->hasVendorXTHeadBb())
687 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
691 SDValue N0 = Node->getOperand(0);
695 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
703 MVT VT = Node->getSimpleValueType(0);
704 const unsigned RightShAmt = N1C->getZExtValue();
709 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
713 const unsigned LeftShAmt = N01C->getZExtValue();
716 if (LeftShAmt > RightShAmt)
720 const unsigned Msb = MsbPlusOne - 1;
721 const unsigned Lsb = RightShAmt - LeftShAmt;
723 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
732 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
738 const unsigned Msb = ExtSize - 1;
739 const unsigned Lsb = RightShAmt;
741 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
751 if (!Subtarget->hasVendorXTHeadMemIdx())
765 "Unexpected addressing mode");
768 int64_t
Offset =
C->getSExtValue();
773 for (Shift = 0; Shift < 4; Shift++)
774 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
783 if (LoadVT == MVT::i8 && IsPre)
784 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
785 else if (LoadVT == MVT::i8 && IsPost)
786 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
787 else if (LoadVT == MVT::i16 && IsPre)
788 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
789 else if (LoadVT == MVT::i16 && IsPost)
790 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
791 else if (LoadVT == MVT::i32 && IsPre)
792 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
793 else if (LoadVT == MVT::i32 && IsPost)
794 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
795 else if (LoadVT == MVT::i64 && IsPre)
796 Opcode = RISCV::TH_LDIB;
797 else if (LoadVT == MVT::i64 && IsPost)
798 Opcode = RISCV::TH_LDIA;
825 unsigned IntNo = Node->getConstantOperandVal(1);
827 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
828 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
829 "Unexpected vsetvli intrinsic");
832 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
836 Node->getOperand(4), Node->getOperand(5),
837 Node->getOperand(8), SEWOp,
838 Node->getOperand(0)};
841 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
842 switch (LMulSDNode->getSExtValue()) {
844 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
845 : RISCV::PseudoVC_I_SE_MF8;
848 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
849 : RISCV::PseudoVC_I_SE_MF4;
852 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
853 : RISCV::PseudoVC_I_SE_MF2;
856 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
857 : RISCV::PseudoVC_I_SE_M1;
860 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
861 : RISCV::PseudoVC_I_SE_M2;
864 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
865 : RISCV::PseudoVC_I_SE_M4;
868 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
869 : RISCV::PseudoVC_I_SE_M8;
874 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
879 if (Node->isMachineOpcode()) {
887 unsigned Opcode = Node->getOpcode();
890 MVT VT = Node->getSimpleValueType(0);
892 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
896 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
897 auto *ConstNode = cast<ConstantSDNode>(Node);
898 if (ConstNode->isZero()) {
904 int64_t Imm = ConstNode->getSExtValue();
907 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
909 Imm = SignExtend64<16>(Imm);
912 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
913 Imm = SignExtend64<32>(Imm);
919 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
920 auto [FPImm, NeedsFNeg] =
931 FNegOpc = RISCV::FSGNJN_H;
935 FNegOpc = RISCV::FSGNJN_S;
939 FNegOpc = RISCV::FSGNJN_D;
952 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
962 bool HasZdinx = Subtarget->hasStdExtZdinx();
963 bool Is64Bit = Subtarget->
is64Bit();
969 assert(Subtarget->hasStdExtZfbfmin());
970 Opc = RISCV::FMV_H_X;
973 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
976 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
983 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
985 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
990 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
999 Opc = RISCV::FSGNJN_D;
1001 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1010 if (!Subtarget->hasStdExtZdinx())
1017 Node->getOperand(0),
1019 Node->getOperand(1),
1028 if (Subtarget->hasStdExtZdinx()) {
1031 if (!
SDValue(Node, 0).use_empty()) {
1033 Node->getOperand(0));
1037 if (!
SDValue(Node, 1).use_empty()) {
1039 Node->getOperand(0));
1047 if (!Subtarget->hasStdExtZfa())
1050 "Unexpected subtarget");
1053 if (!
SDValue(Node, 0).use_empty()) {
1055 Node->getOperand(0));
1058 if (!
SDValue(Node, 1).use_empty()) {
1060 Node->getOperand(0));
1068 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1071 SDValue N0 = Node->getOperand(0);
1075 unsigned ShAmt = N1C->getZExtValue();
1081 unsigned XLen = Subtarget->
getXLen();
1084 if (TrailingZeros > 0 && LeadingZeros == 32) {
1098 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1101 SDValue N0 = Node->getOperand(0);
1104 unsigned ShAmt = N1C->getZExtValue();
1110 unsigned XLen = Subtarget->
getXLen();
1113 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1132 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1136 if (ShAmt >= TrailingOnes)
1139 if (TrailingOnes == 32) {
1141 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1152 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1154 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1160 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1182 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1185 SDValue N0 = Node->getOperand(0);
1188 unsigned ShAmt = N1C->getZExtValue();
1190 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1192 if (ExtSize >= 32 || ShAmt >= ExtSize)
1194 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1211 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1216 const bool isC1ANDI = isInt<12>(C1);
1218 SDValue N0 = Node->getOperand(0);
1223 if (!Subtarget->hasVendorXTHeadBb())
1235 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1238 unsigned C2 =
C->getZExtValue();
1239 unsigned XLen = Subtarget->
getXLen();
1240 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1248 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1252 C1 &= maskTrailingZeros<uint64_t>(C2);
1254 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1258 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1264 if (!LeftShift && isC1Mask) {
1268 if (C2 + 32 == Leading) {
1280 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1282 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1287 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1301 const unsigned Lsb = C2;
1302 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1307 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1309 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1311 Skip |= HasBitTest && Leading == XLen - 1;
1312 if (OneUseOrZExtW && !Skip) {
1314 RISCV::SLLI,
DL, VT,
X,
1330 if (C2 + Leading < XLen &&
1331 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1333 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1342 if (OneUseOrZExtW && !IsCANDI) {
1344 RISCV::SLLI,
DL, VT,
X,
1360 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1362 unsigned SrliOpc = RISCV::SRLI;
1365 isa<ConstantSDNode>(
X.getOperand(1)) &&
1366 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1367 SrliOpc = RISCV::SRLIW;
1368 X =
X.getOperand(0);
1380 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1381 OneUseOrZExtW && !IsCANDI) {
1383 RISCV::SRLIW,
DL, VT,
X,
1398 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1400 RISCV::SRLI,
DL, VT,
X,
1409 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1411 RISCV::SRLIW,
DL, VT,
X,
1427 if (isC1Mask && !isC1ANDI) {
1429 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1446 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1447 if (!N1C || !N1C->hasOneUse())
1451 SDValue N0 = Node->getOperand(0);
1468 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1470 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1471 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1475 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1477 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1478 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1484 unsigned XLen = Subtarget->
getXLen();
1490 unsigned ConstantShift = XLen - LeadingZeros;
1494 uint64_t ShiftedC1 = C1 << ConstantShift;
1497 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1515 unsigned IntNo = Node->getConstantOperandVal(0);
1520 case Intrinsic::riscv_vmsgeu:
1521 case Intrinsic::riscv_vmsge: {
1522 SDValue Src1 = Node->getOperand(1);
1523 SDValue Src2 = Node->getOperand(2);
1524 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1525 bool IsCmpUnsignedZero =
false;
1530 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1531 int64_t CVal =
C->getSExtValue();
1532 if (CVal >= -15 && CVal <= 16) {
1533 if (!IsUnsigned || CVal != 0)
1535 IsCmpUnsignedZero =
true;
1539 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1543#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1544 case RISCVII::VLMUL::lmulenum: \
1545 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1546 : RISCV::PseudoVMSLT_VX_##suffix; \
1547 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1548 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1557#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1565 if (IsCmpUnsignedZero) {
1576 {Cmp, Cmp, VL, SEW}));
1579 case Intrinsic::riscv_vmsgeu_mask:
1580 case Intrinsic::riscv_vmsge_mask: {
1581 SDValue Src1 = Node->getOperand(2);
1582 SDValue Src2 = Node->getOperand(3);
1583 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1584 bool IsCmpUnsignedZero =
false;
1589 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1590 int64_t CVal =
C->getSExtValue();
1591 if (CVal >= -15 && CVal <= 16) {
1592 if (!IsUnsigned || CVal != 0)
1594 IsCmpUnsignedZero =
true;
1598 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1603#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1604 case RISCVII::VLMUL::lmulenum: \
1605 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1606 : RISCV::PseudoVMSLT_VX_##suffix; \
1607 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1608 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1617#undef CASE_VMSLT_OPCODES
1623#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1624 case RISCVII::VLMUL::lmulenum: \
1625 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1626 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1627 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1636#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1643 SDValue MaskedOff = Node->getOperand(1);
1644 SDValue Mask = Node->getOperand(4);
1647 if (IsCmpUnsignedZero) {
1650 if (Mask == MaskedOff) {
1656 {Mask, MaskedOff, VL, MaskSEW}));
1663 if (Mask == MaskedOff) {
1668 {Mask, Cmp, VL, MaskSEW}));
1685 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1689 {Cmp, Mask, VL, MaskSEW}));
1692 case Intrinsic::riscv_vsetvli:
1693 case Intrinsic::riscv_vsetvlimax:
1699 unsigned IntNo = Node->getConstantOperandVal(1);
1704 case Intrinsic::riscv_vlseg2:
1705 case Intrinsic::riscv_vlseg3:
1706 case Intrinsic::riscv_vlseg4:
1707 case Intrinsic::riscv_vlseg5:
1708 case Intrinsic::riscv_vlseg6:
1709 case Intrinsic::riscv_vlseg7:
1710 case Intrinsic::riscv_vlseg8: {
1714 case Intrinsic::riscv_vlseg2_mask:
1715 case Intrinsic::riscv_vlseg3_mask:
1716 case Intrinsic::riscv_vlseg4_mask:
1717 case Intrinsic::riscv_vlseg5_mask:
1718 case Intrinsic::riscv_vlseg6_mask:
1719 case Intrinsic::riscv_vlseg7_mask:
1720 case Intrinsic::riscv_vlseg8_mask: {
1724 case Intrinsic::riscv_vlsseg2:
1725 case Intrinsic::riscv_vlsseg3:
1726 case Intrinsic::riscv_vlsseg4:
1727 case Intrinsic::riscv_vlsseg5:
1728 case Intrinsic::riscv_vlsseg6:
1729 case Intrinsic::riscv_vlsseg7:
1730 case Intrinsic::riscv_vlsseg8: {
1734 case Intrinsic::riscv_vlsseg2_mask:
1735 case Intrinsic::riscv_vlsseg3_mask:
1736 case Intrinsic::riscv_vlsseg4_mask:
1737 case Intrinsic::riscv_vlsseg5_mask:
1738 case Intrinsic::riscv_vlsseg6_mask:
1739 case Intrinsic::riscv_vlsseg7_mask:
1740 case Intrinsic::riscv_vlsseg8_mask: {
1744 case Intrinsic::riscv_vloxseg2:
1745 case Intrinsic::riscv_vloxseg3:
1746 case Intrinsic::riscv_vloxseg4:
1747 case Intrinsic::riscv_vloxseg5:
1748 case Intrinsic::riscv_vloxseg6:
1749 case Intrinsic::riscv_vloxseg7:
1750 case Intrinsic::riscv_vloxseg8:
1753 case Intrinsic::riscv_vluxseg2:
1754 case Intrinsic::riscv_vluxseg3:
1755 case Intrinsic::riscv_vluxseg4:
1756 case Intrinsic::riscv_vluxseg5:
1757 case Intrinsic::riscv_vluxseg6:
1758 case Intrinsic::riscv_vluxseg7:
1759 case Intrinsic::riscv_vluxseg8:
1762 case Intrinsic::riscv_vloxseg2_mask:
1763 case Intrinsic::riscv_vloxseg3_mask:
1764 case Intrinsic::riscv_vloxseg4_mask:
1765 case Intrinsic::riscv_vloxseg5_mask:
1766 case Intrinsic::riscv_vloxseg6_mask:
1767 case Intrinsic::riscv_vloxseg7_mask:
1768 case Intrinsic::riscv_vloxseg8_mask:
1771 case Intrinsic::riscv_vluxseg2_mask:
1772 case Intrinsic::riscv_vluxseg3_mask:
1773 case Intrinsic::riscv_vluxseg4_mask:
1774 case Intrinsic::riscv_vluxseg5_mask:
1775 case Intrinsic::riscv_vluxseg6_mask:
1776 case Intrinsic::riscv_vluxseg7_mask:
1777 case Intrinsic::riscv_vluxseg8_mask:
1780 case Intrinsic::riscv_vlseg8ff:
1781 case Intrinsic::riscv_vlseg7ff:
1782 case Intrinsic::riscv_vlseg6ff:
1783 case Intrinsic::riscv_vlseg5ff:
1784 case Intrinsic::riscv_vlseg4ff:
1785 case Intrinsic::riscv_vlseg3ff:
1786 case Intrinsic::riscv_vlseg2ff: {
1790 case Intrinsic::riscv_vlseg8ff_mask:
1791 case Intrinsic::riscv_vlseg7ff_mask:
1792 case Intrinsic::riscv_vlseg6ff_mask:
1793 case Intrinsic::riscv_vlseg5ff_mask:
1794 case Intrinsic::riscv_vlseg4ff_mask:
1795 case Intrinsic::riscv_vlseg3ff_mask:
1796 case Intrinsic::riscv_vlseg2ff_mask: {
1800 case Intrinsic::riscv_vloxei:
1801 case Intrinsic::riscv_vloxei_mask:
1802 case Intrinsic::riscv_vluxei:
1803 case Intrinsic::riscv_vluxei_mask: {
1804 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1805 IntNo == Intrinsic::riscv_vluxei_mask;
1806 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1807 IntNo == Intrinsic::riscv_vloxei_mask;
1809 MVT VT = Node->getSimpleValueType(0);
1814 Operands.push_back(Node->getOperand(CurOp++));
1822 "Element count mismatch");
1827 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1829 "values when XLEN=32");
1832 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1833 static_cast<unsigned>(IndexLMUL));
1837 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1843 case Intrinsic::riscv_vlm:
1844 case Intrinsic::riscv_vle:
1845 case Intrinsic::riscv_vle_mask:
1846 case Intrinsic::riscv_vlse:
1847 case Intrinsic::riscv_vlse_mask: {
1848 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1849 IntNo == Intrinsic::riscv_vlse_mask;
1851 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1853 MVT VT = Node->getSimpleValueType(0);
1862 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1865 if (HasPassthruOperand)
1866 Operands.push_back(Node->getOperand(CurOp++));
1879 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
1880 static_cast<unsigned>(LMUL));
1884 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1890 case Intrinsic::riscv_vleff:
1891 case Intrinsic::riscv_vleff_mask: {
1892 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1894 MVT VT = Node->getSimpleValueType(0);
1899 Operands.push_back(Node->getOperand(CurOp++));
1906 RISCV::getVLEPseudo(IsMasked,
false,
true,
1907 Log2SEW,
static_cast<unsigned>(LMUL));
1910 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1920 unsigned IntNo = Node->getConstantOperandVal(1);
1922 case Intrinsic::riscv_vsseg2:
1923 case Intrinsic::riscv_vsseg3:
1924 case Intrinsic::riscv_vsseg4:
1925 case Intrinsic::riscv_vsseg5:
1926 case Intrinsic::riscv_vsseg6:
1927 case Intrinsic::riscv_vsseg7:
1928 case Intrinsic::riscv_vsseg8: {
1932 case Intrinsic::riscv_vsseg2_mask:
1933 case Intrinsic::riscv_vsseg3_mask:
1934 case Intrinsic::riscv_vsseg4_mask:
1935 case Intrinsic::riscv_vsseg5_mask:
1936 case Intrinsic::riscv_vsseg6_mask:
1937 case Intrinsic::riscv_vsseg7_mask:
1938 case Intrinsic::riscv_vsseg8_mask: {
1942 case Intrinsic::riscv_vssseg2:
1943 case Intrinsic::riscv_vssseg3:
1944 case Intrinsic::riscv_vssseg4:
1945 case Intrinsic::riscv_vssseg5:
1946 case Intrinsic::riscv_vssseg6:
1947 case Intrinsic::riscv_vssseg7:
1948 case Intrinsic::riscv_vssseg8: {
1952 case Intrinsic::riscv_vssseg2_mask:
1953 case Intrinsic::riscv_vssseg3_mask:
1954 case Intrinsic::riscv_vssseg4_mask:
1955 case Intrinsic::riscv_vssseg5_mask:
1956 case Intrinsic::riscv_vssseg6_mask:
1957 case Intrinsic::riscv_vssseg7_mask:
1958 case Intrinsic::riscv_vssseg8_mask: {
1962 case Intrinsic::riscv_vsoxseg2:
1963 case Intrinsic::riscv_vsoxseg3:
1964 case Intrinsic::riscv_vsoxseg4:
1965 case Intrinsic::riscv_vsoxseg5:
1966 case Intrinsic::riscv_vsoxseg6:
1967 case Intrinsic::riscv_vsoxseg7:
1968 case Intrinsic::riscv_vsoxseg8:
1971 case Intrinsic::riscv_vsuxseg2:
1972 case Intrinsic::riscv_vsuxseg3:
1973 case Intrinsic::riscv_vsuxseg4:
1974 case Intrinsic::riscv_vsuxseg5:
1975 case Intrinsic::riscv_vsuxseg6:
1976 case Intrinsic::riscv_vsuxseg7:
1977 case Intrinsic::riscv_vsuxseg8:
1980 case Intrinsic::riscv_vsoxseg2_mask:
1981 case Intrinsic::riscv_vsoxseg3_mask:
1982 case Intrinsic::riscv_vsoxseg4_mask:
1983 case Intrinsic::riscv_vsoxseg5_mask:
1984 case Intrinsic::riscv_vsoxseg6_mask:
1985 case Intrinsic::riscv_vsoxseg7_mask:
1986 case Intrinsic::riscv_vsoxseg8_mask:
1989 case Intrinsic::riscv_vsuxseg2_mask:
1990 case Intrinsic::riscv_vsuxseg3_mask:
1991 case Intrinsic::riscv_vsuxseg4_mask:
1992 case Intrinsic::riscv_vsuxseg5_mask:
1993 case Intrinsic::riscv_vsuxseg6_mask:
1994 case Intrinsic::riscv_vsuxseg7_mask:
1995 case Intrinsic::riscv_vsuxseg8_mask:
1998 case Intrinsic::riscv_vsoxei:
1999 case Intrinsic::riscv_vsoxei_mask:
2000 case Intrinsic::riscv_vsuxei:
2001 case Intrinsic::riscv_vsuxei_mask: {
2002 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2003 IntNo == Intrinsic::riscv_vsuxei_mask;
2004 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2005 IntNo == Intrinsic::riscv_vsoxei_mask;
2007 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2012 Operands.push_back(Node->getOperand(CurOp++));
2020 "Element count mismatch");
2025 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2027 "values when XLEN=32");
2030 IsMasked, IsOrdered, IndexLog2EEW,
2031 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2035 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2041 case Intrinsic::riscv_vsm:
2042 case Intrinsic::riscv_vse:
2043 case Intrinsic::riscv_vse_mask:
2044 case Intrinsic::riscv_vsse:
2045 case Intrinsic::riscv_vsse_mask: {
2046 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2047 IntNo == Intrinsic::riscv_vsse_mask;
2049 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2051 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2056 Operands.push_back(Node->getOperand(CurOp++));
2063 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2066 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2072 case Intrinsic::riscv_sf_vc_x_se:
2073 case Intrinsic::riscv_sf_vc_i_se:
2080 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2092 SDValue V = Node->getOperand(0);
2093 SDValue SubV = Node->getOperand(1);
2095 auto Idx = Node->getConstantOperandVal(2);
2099 MVT SubVecContainerVT = SubVecVT;
2103 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2105 MVT ContainerVT = VT;
2107 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2111 std::tie(SubRegIdx,
Idx) =
2113 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2122 [[maybe_unused]]
bool IsSubVecPartReg =
2126 assert((!IsSubVecPartReg || V.isUndef()) &&
2127 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2128 "the subvector is smaller than a full-sized register");
2132 if (SubRegIdx == RISCV::NoSubRegister) {
2133 unsigned InRegClassID =
2137 "Unexpected subvector extraction");
2150 SDValue V = Node->getOperand(0);
2151 auto Idx = Node->getConstantOperandVal(1);
2152 MVT InVT = V.getSimpleValueType();
2156 MVT SubVecContainerVT = VT;
2160 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2163 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2167 std::tie(SubRegIdx,
Idx) =
2169 InVT, SubVecContainerVT,
Idx,
TRI);
2179 if (SubRegIdx == RISCV::NoSubRegister) {
2183 "Unexpected subvector extraction");
2202 if (!Node->getOperand(0).isUndef())
2204 SDValue Src = Node->getOperand(1);
2205 auto *Ld = dyn_cast<LoadSDNode>(Src);
2208 if (!Ld || Ld->isIndexed())
2210 EVT MemVT = Ld->getMemoryVT();
2236 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2246 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2250 false, IsStrided,
false,
2251 Log2SEW,
static_cast<unsigned>(LMUL));
2263 unsigned Locality = Node->getConstantOperandVal(3);
2267 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2271 int NontemporalLevel = 0;
2274 NontemporalLevel = 3;
2277 NontemporalLevel = 1;
2280 NontemporalLevel = 0;
2286 if (NontemporalLevel & 0b1)
2288 if (NontemporalLevel & 0b10)
2300 std::vector<SDValue> &OutOps) {
2303 switch (ConstraintID) {
2308 assert(Found &&
"SelectAddrRegImm should always succeed");
2309 OutOps.push_back(Op0);
2310 OutOps.push_back(Op1);
2314 OutOps.push_back(
Op);
2328 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2346 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2347 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2348 if (isInt<12>(CVal)) {
2364 bool IsPrefetch =
false) {
2365 if (!isa<ConstantSDNode>(
Addr))
2368 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2373 int64_t Lo12 = SignExtend64<12>(CVal);
2375 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2376 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2380 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2397 if (Seq.
back().getOpcode() != RISCV::ADDI)
2399 Lo12 = Seq.
back().getImm();
2400 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2405 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2415 for (
auto *
Use :
Add->uses()) {
2420 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2426 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2429 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2437 unsigned MaxShiftAmount,
2440 EVT VT =
Addr.getSimpleValueType();
2446 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2448 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2450 ShiftAmt =
N.getConstantOperandVal(1);
2455 return ShiftAmt != 0;
2459 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2464 isInt<12>(C1->getSExtValue())) {
2473 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2477 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2481 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2496 MVT VT =
Addr.getSimpleValueType();
2504 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2506 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2507 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2511 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2519 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2520 if (CVal == 0 || Alignment > CVal) {
2521 int64_t CombinedOffset = CVal + GA->getOffset();
2525 CombinedOffset, GA->getTargetFlags());
2531 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2539 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2540 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2541 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2542 "simm12 not already handled?");
2547 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2548 int64_t Adj = CVal < 0 ? -2048 : 2047;
2590 MVT VT =
Addr.getSimpleValueType();
2593 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2594 if (isInt<12>(CVal)) {
2598 if ((CVal & 0b11111) != 0) {
2604 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2612 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2613 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2614 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2615 "simm12 not already handled?");
2619 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2620 int64_t Adj = CVal < 0 ? -2048 : 2016;
2621 int64_t AdjustedOffset = CVal - Adj;
2623 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2685 if (Imm != 0 && Imm % ShiftWidth == 0) {
2694 if (Imm != 0 && Imm % ShiftWidth == 0) {
2698 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2706 if (Imm % ShiftWidth == ShiftWidth - 1) {
2728 "Unexpected condition code!");
2735 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2736 if (CCVal != ExpectedCCVal)
2742 if (!
LHS.getValueType().isScalarInteger())
2753 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2754 int64_t CVal =
C->getSExtValue();
2757 if (CVal == -2048) {
2760 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2767 if (isInt<12>(CVal) || CVal == 2048) {
2770 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2786 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2787 Val =
N.getOperand(0);
2791 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2792 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2797 N.getConstantOperandVal(1) == ShiftAmt &&
2804 MVT VT =
N.getSimpleValueType();
2815 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2816 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2817 Val =
N.getOperand(0);
2821 MVT VT =
N.getSimpleValueType();
2836 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2842 uint64_t Mask =
N.getConstantOperandVal(1);
2845 unsigned XLen = Subtarget->
getXLen();
2847 Mask &= maskTrailingZeros<uint64_t>(C2);
2849 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2857 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2859 EVT VT =
N.getValueType();
2869 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2871 EVT VT =
N.getValueType();
2883 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2884 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2885 isa<ConstantSDNode>(
N.getOperand(1))) {
2891 unsigned C1 =
N.getConstantOperandVal(1);
2892 unsigned XLen = Subtarget->
getXLen();
2897 if (LeftShift && Leading == 32 && Trailing > 0 &&
2898 (Trailing + C1) == ShAmt) {
2900 EVT VT =
N.getValueType();
2909 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2910 (Trailing - C1) == ShAmt) {
2912 EVT VT =
N.getValueType();
2931 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2936 uint64_t Mask =
N.getConstantOperandVal(1);
2939 Mask &= maskTrailingZeros<uint64_t>(C2);
2947 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2949 EVT VT =
N.getValueType();
2977 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
2979 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
2983 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
2985 if (UserOpNo == VLIdx)
2988 auto NumDemandedBits =
2990 return NumDemandedBits && Bits >= *NumDemandedBits;
3003 const unsigned Depth)
const {
3009 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3010 "Unexpected opcode");
3017 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3020 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3023 if (!
User->isMachineOpcode())
3027 switch (
User->getMachineOpcode()) {
3052 case RISCV::SLLI_UW:
3053 case RISCV::FMV_W_X:
3054 case RISCV::FCVT_H_W:
3055 case RISCV::FCVT_H_WU:
3056 case RISCV::FCVT_S_W:
3057 case RISCV::FCVT_S_WU:
3058 case RISCV::FCVT_D_W:
3059 case RISCV::FCVT_D_WU:
3060 case RISCV::TH_REVW:
3061 case RISCV::TH_SRRIW:
3074 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
3079 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
3088 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3107 unsigned ShAmt =
User->getConstantOperandVal(1);
3121 case RISCV::FMV_H_X:
3122 case RISCV::ZEXT_H_RV32:
3123 case RISCV::ZEXT_H_RV64:
3129 if (Bits < (Subtarget->
getXLen() / 2))
3133 case RISCV::SH1ADD_UW:
3134 case RISCV::SH2ADD_UW:
3135 case RISCV::SH3ADD_UW:
3138 if (UI.getOperandNo() != 0 || Bits < 32)
3142 if (UI.getOperandNo() != 0 || Bits < 8)
3146 if (UI.getOperandNo() != 0 || Bits < 16)
3150 if (UI.getOperandNo() != 0 || Bits < 32)
3162 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3163 int64_t
Offset =
C->getSExtValue();
3165 for (Shift = 0; Shift < 4; Shift++)
3166 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3173 EVT Ty =
N->getValueType(0);
3185 auto *
C = dyn_cast<ConstantSDNode>(
N);
3186 if (
C && isUInt<5>(
C->getZExtValue())) {
3188 N->getValueType(0));
3189 }
else if (
C &&
C->isAllOnes()) {
3192 N->getValueType(0));
3193 }
else if (isa<RegisterSDNode>(
N) &&
3194 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3200 N->getValueType(0));
3210 if (!
N.getOperand(0).isUndef())
3212 N =
N.getOperand(1);
3217 !
Splat.getOperand(0).isUndef())
3219 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3228 SplatVal =
Splat.getOperand(1);
3235 std::function<
bool(int64_t)> ValidateImm) {
3237 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3240 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3242 "Unexpected splat operand type");
3251 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3255 if (!ValidateImm(SplatImm))
3264 [](int64_t Imm) {
return isInt<5>(Imm); });
3269 N, SplatVal, *
CurDAG, *Subtarget,
3270 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3276 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3277 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3284 N, SplatVal, *
CurDAG, *Subtarget,
3285 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3289 auto IsExtOrTrunc = [](
SDValue N) {
3290 switch (
N->getOpcode()) {
3305 while (IsExtOrTrunc(
N)) {
3306 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3308 N =
N->getOperand(0);
3329 ->getLegalZfaFPImm(APF, VT)
3334 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3346 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3349 if (!isInt<5>(ImmVal))
3361bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3363 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3385 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3386 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3387 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3388 case RISCV::MUL: Opc = RISCV::MULW;
break;
3389 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3397 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3412 case RISCV::TH_MULAW:
3413 case RISCV::TH_MULAH:
3414 case RISCV::TH_MULSW:
3415 case RISCV::TH_MULSH:
3430 if (!isa<RegisterSDNode>(MaskOp) ||
3431 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3435 const auto *Glued = GlueOp.
getNode();
3441 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3442 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3454 const auto IsVMSet = [](
unsigned Opc) {
3455 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3456 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3457 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3458 Opc == RISCV::PseudoVMSET_M_B8;
3471 N->getOperand(
N->getNumOperands() - 1));
3475 return V.isMachineOpcode() &&
3476 V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3485 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3489 unsigned MaskOpIdx =
I->MaskOpIdx;
3495 const unsigned Opc =
I->UnmaskedPseudo;
3502 "Masked and unmasked pseudos are inconsistent");
3504 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3509 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3512 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3518 const auto *Glued =
N->getGluedNode();
3519 if (
auto *TGlued = Glued->getGluedNode())
3525 if (!
N->memoperands_empty())
3528 Result->setFlags(
N->getFlags());
3545 return RISCV::PseudoVMSET_M_B1;
3547 return RISCV::PseudoVMSET_M_B2;
3549 return RISCV::PseudoVMSET_M_B4;
3551 return RISCV::PseudoVMSET_M_B8;
3553 return RISCV::PseudoVMSET_M_B16;
3555 return RISCV::PseudoVMSET_M_B32;
3557 return RISCV::PseudoVMSET_M_B64;
3585bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3589 Merge =
N->getOperand(0);
3590 False =
N->getOperand(0);
3591 True =
N->getOperand(1);
3592 VL =
N->getOperand(2);
3597 Merge =
N->getOperand(0);
3598 False =
N->getOperand(1);
3599 True =
N->getOperand(2);
3600 Mask =
N->getOperand(3);
3601 VL =
N->getOperand(4);
3603 Glue =
N->getOperand(
N->getNumOperands() - 1);
3605 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3614 "Expect True is the first output of an instruction.");
3628 bool IsMasked =
false;
3630 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3631 if (!Info && HasTiedDest) {
3632 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3653 if (False != MergeOpTrue)
3660 assert(HasTiedDest &&
"Expected tied dest");
3701 unsigned TrueVLIndex =
3702 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3713 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3714 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3717 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3723 VL = GetMinVL(TrueVL, VL);
3730 if (TrueVL != VL || !IsMasked)
3755 RISCV::V0, AllOnesMask,
SDValue());
3760 unsigned MaskedOpc =
Info->MaskedPseudo;
3764 "Expected instructions with mask have policy operand.");
3767 "Expected instructions with mask have a tied dest.");
3777 bool MergeVLShrunk = VL != OrigVL;
3789 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3790 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3799 if (HasRoundingMode)
3802 Ops.
append({VL, SEW, PolicyOp});
3815 if (!cast<MachineSDNode>(True)->memoperands_empty())
3828bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3829 bool MadeChange =
false;
3834 if (
N->use_empty() || !
N->isMachineOpcode())
3838 MadeChange |= performCombineVMergeAndVOps(
N);
3848bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3849 bool MadeChange =
false;
3854 if (
N->use_empty() || !
N->isMachineOpcode())
3857 const unsigned Opc =
N->getMachineOpcode();
3858 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3865 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
3872 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.