21#include "llvm/IR/IntrinsicsRISCV.h"
29#define DEBUG_TYPE "riscv-isel"
30#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
33 "riscv-use-rematerializable-movimm",
cl::Hidden,
34 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
35 "constant materialization"),
39#define GET_RISCVVSSEGTable_IMPL
40#define GET_RISCVVLSEGTable_IMPL
41#define GET_RISCVVLXSEGTable_IMPL
42#define GET_RISCVVSXSEGTable_IMPL
43#define GET_RISCVVLETable_IMPL
44#define GET_RISCVVSETable_IMPL
45#define GET_RISCVVLXTable_IMPL
46#define GET_RISCVVSXTable_IMPL
47#include "RISCVGenSearchableTables.inc"
53 bool MadeChange =
false;
60 switch (
N->getOpcode()) {
64 MVT VT =
N->getSimpleValueType(0);
80 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
81 MVT VT =
N->getSimpleValueType(0);
87 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
95 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
119 MVT::i64, MPI,
Align(8),
126 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
145 bool MadeChange =
false;
149 if (
N->use_empty() || !
N->isMachineOpcode())
152 MadeChange |= doPeepholeSExtW(
N);
157 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
162 MadeChange |= doPeepholeMergeVVMFold();
170 MadeChange |= doPeepholeNoRegPassThru();
183 switch (Inst.getOpndKind()) {
222 if (Seq.
size() > 3) {
223 unsigned ShiftAmt, AddOpc;
243 static const unsigned M1TupleRegClassIDs[] = {
244 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
245 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
246 RISCV::VRN8M1RegClassID};
247 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
248 RISCV::VRN3M2RegClassID,
249 RISCV::VRN4M2RegClassID};
262 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
263 "Unexpected subreg numbering");
264 SubReg0 = RISCV::sub_vrm1_0;
265 RegClassID = M1TupleRegClassIDs[NF - 2];
268 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
269 "Unexpected subreg numbering");
270 SubReg0 = RISCV::sub_vrm2_0;
271 RegClassID = M2TupleRegClassIDs[NF - 2];
274 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
275 "Unexpected subreg numbering");
276 SubReg0 = RISCV::sub_vrm4_0;
277 RegClassID = RISCV::VRN2M4RegClassID;
286 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
296 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
298 bool IsLoad,
MVT *IndexVT) {
299 SDValue Chain = Node->getOperand(0);
302 Operands.push_back(Node->getOperand(CurOp++));
304 if (IsStridedOrIndexed) {
305 Operands.push_back(Node->getOperand(CurOp++));
307 *IndexVT =
Operands.back()->getSimpleValueType(0);
312 SDValue Mask = Node->getOperand(CurOp++);
331 Policy = Node->getConstantOperandVal(CurOp++);
344 unsigned NF = Node->getNumValues() - 1;
345 MVT VT = Node->getSimpleValueType(0);
353 Node->op_begin() + CurOp + NF);
362 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
363 static_cast<unsigned>(LMUL));
367 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
371 for (
unsigned I = 0;
I < NF; ++
I) {
383 unsigned NF = Node->getNumValues() - 2;
384 MVT VT = Node->getSimpleValueType(0);
393 Node->op_begin() + CurOp + NF);
403 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
404 Log2SEW,
static_cast<unsigned>(LMUL));
408 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
412 for (
unsigned I = 0;
I < NF; ++
I) {
426 unsigned NF = Node->getNumValues() - 1;
427 MVT VT = Node->getSimpleValueType(0);
435 Node->op_begin() + CurOp + NF);
446 "Element count mismatch");
450 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
452 "values when XLEN=32");
455 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
456 static_cast<unsigned>(IndexLMUL));
460 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
464 for (
unsigned I = 0;
I < NF; ++
I) {
477 unsigned NF = Node->getNumOperands() - 4;
482 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
490 unsigned CurOp = 2 + NF;
496 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
500 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
509 unsigned NF = Node->getNumOperands() - 5;
512 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
520 unsigned CurOp = 2 + NF;
528 "Element count mismatch");
532 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
534 "values when XLEN=32");
537 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
538 static_cast<unsigned>(IndexLMUL));
542 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
557 unsigned IntNo = Node->getConstantOperandVal(0);
559 assert((IntNo == Intrinsic::riscv_vsetvli ||
560 IntNo == Intrinsic::riscv_vsetvlimax) &&
561 "Unexpected vsetvli intrinsic");
563 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
564 unsigned Offset = (VLMax ? 1 : 2);
567 "Unexpected number of operands");
572 Node->getConstantOperandVal(
Offset + 1) & 0x7);
579 unsigned Opcode = RISCV::PseudoVSETVLI;
580 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
587 Opcode = RISCV::PseudoVSETVLIX0;
589 VLOperand = Node->getOperand(1);
591 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
593 if (isUInt<5>(AVL)) {
596 XLenVT, VLImm, VTypeIOp));
607 MVT VT = Node->getSimpleValueType(0);
608 unsigned Opcode = Node->getOpcode();
610 "Unexpected opcode");
615 SDValue N0 = Node->getOperand(0);
616 SDValue N1 = Node->getOperand(1);
633 bool SignExt =
false;
651 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
652 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
655 int64_t ShiftedVal = Val >> ShAmt;
656 if (!isInt<12>(ShiftedVal))
660 if (SignExt && ShAmt >= 32)
667 case ISD::AND: BinOpc = RISCV::ANDI;
break;
668 case ISD::OR: BinOpc = RISCV::ORI;
break;
669 case ISD::XOR: BinOpc = RISCV::XORI;
break;
672 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
686 if (!Subtarget->hasVendorXTHeadBb())
689 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
693 SDValue N0 = Node->getOperand(0);
697 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
705 MVT VT = Node->getSimpleValueType(0);
706 const unsigned RightShAmt = N1C->getZExtValue();
711 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
715 const unsigned LeftShAmt = N01C->getZExtValue();
718 if (LeftShAmt > RightShAmt)
722 const unsigned Msb = MsbPlusOne - 1;
723 const unsigned Lsb = RightShAmt - LeftShAmt;
725 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
734 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
740 const unsigned Msb = ExtSize - 1;
741 const unsigned Lsb = RightShAmt;
743 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
753 if (!Subtarget->hasVendorXTHeadMemIdx())
767 "Unexpected addressing mode");
770 int64_t
Offset =
C->getSExtValue();
775 for (Shift = 0; Shift < 4; Shift++)
776 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
785 if (LoadVT == MVT::i8 && IsPre)
786 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
787 else if (LoadVT == MVT::i8 && IsPost)
788 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
789 else if (LoadVT == MVT::i16 && IsPre)
790 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
791 else if (LoadVT == MVT::i16 && IsPost)
792 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
793 else if (LoadVT == MVT::i32 && IsPre)
794 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
795 else if (LoadVT == MVT::i32 && IsPost)
796 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
797 else if (LoadVT == MVT::i64 && IsPre)
798 Opcode = RISCV::TH_LDIB;
799 else if (LoadVT == MVT::i64 && IsPost)
800 Opcode = RISCV::TH_LDIA;
828 unsigned IntNo = Node->getConstantOperandVal(1);
830 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
831 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
832 "Unexpected vsetvli intrinsic");
835 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
839 Node->getOperand(4), Node->getOperand(5),
840 Node->getOperand(8), SEWOp,
841 Node->getOperand(0)};
844 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
845 switch (LMulSDNode->getSExtValue()) {
847 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
848 : RISCV::PseudoVC_I_SE_MF8;
851 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
852 : RISCV::PseudoVC_I_SE_MF4;
855 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
856 : RISCV::PseudoVC_I_SE_MF2;
859 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
860 : RISCV::PseudoVC_I_SE_M1;
863 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
864 : RISCV::PseudoVC_I_SE_M2;
867 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
868 : RISCV::PseudoVC_I_SE_M4;
871 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
872 : RISCV::PseudoVC_I_SE_M8;
877 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
882 if (Node->isMachineOpcode()) {
890 unsigned Opcode = Node->getOpcode();
893 MVT VT = Node->getSimpleValueType(0);
895 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
899 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
900 auto *ConstNode = cast<ConstantSDNode>(Node);
901 if (ConstNode->isZero()) {
907 int64_t Imm = ConstNode->getSExtValue();
911 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
912 Imm = SignExtend64<8>(Imm);
915 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
917 Imm = SignExtend64<16>(Imm);
920 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
921 Imm = SignExtend64<32>(Imm);
927 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
928 auto [FPImm, NeedsFNeg] =
939 FNegOpc = RISCV::FSGNJN_H;
943 FNegOpc = RISCV::FSGNJN_S;
947 FNegOpc = RISCV::FSGNJN_D;
960 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
970 bool HasZdinx = Subtarget->hasStdExtZdinx();
971 bool Is64Bit = Subtarget->
is64Bit();
977 assert(Subtarget->hasStdExtZfbfmin());
978 Opc = RISCV::FMV_H_X;
981 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
984 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
991 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
993 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
998 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1007 Opc = RISCV::FSGNJN_D;
1009 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1018 if (!Subtarget->hasStdExtZdinx())
1025 Node->getOperand(0),
1027 Node->getOperand(1),
1036 if (Subtarget->hasStdExtZdinx()) {
1039 if (!
SDValue(Node, 0).use_empty()) {
1041 Node->getOperand(0));
1045 if (!
SDValue(Node, 1).use_empty()) {
1047 Node->getOperand(0));
1055 if (!Subtarget->hasStdExtZfa())
1058 "Unexpected subtarget");
1061 if (!
SDValue(Node, 0).use_empty()) {
1063 Node->getOperand(0));
1066 if (!
SDValue(Node, 1).use_empty()) {
1068 Node->getOperand(0));
1076 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1079 SDValue N0 = Node->getOperand(0);
1083 unsigned ShAmt = N1C->getZExtValue();
1089 unsigned XLen = Subtarget->
getXLen();
1092 if (TrailingZeros > 0 && LeadingZeros == 32) {
1106 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1109 SDValue N0 = Node->getOperand(0);
1112 unsigned ShAmt = N1C->getZExtValue();
1118 unsigned XLen = Subtarget->
getXLen();
1121 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1140 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1144 if (ShAmt >= TrailingOnes)
1147 if (TrailingOnes == 32) {
1149 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1160 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1162 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1168 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1169 if (Subtarget->hasVendorXTHeadBb()) {
1199 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1202 SDValue N0 = Node->getOperand(0);
1205 unsigned ShAmt = N1C->getZExtValue();
1207 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1209 if (ExtSize >= 32 || ShAmt >= ExtSize)
1211 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1228 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1232 SDValue N0 = Node->getOperand(0);
1237 if (!Subtarget->hasVendorXTHeadBb())
1249 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1252 unsigned C2 =
C->getZExtValue();
1253 unsigned XLen = Subtarget->
getXLen();
1254 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1262 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1268 C1 &= maskTrailingZeros<uint64_t>(C2);
1270 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1274 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1284 if (C2 + 32 == Leading) {
1296 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1298 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1303 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1317 const unsigned Lsb = C2;
1318 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1323 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1325 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1327 Skip |= HasBitTest && Leading == XLen - 1;
1328 if (OneUseOrZExtW && !Skip) {
1330 RISCV::SLLI,
DL, VT,
X,
1346 if (C2 + Leading < XLen &&
1347 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1349 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1358 if (OneUseOrZExtW && !IsCANDI) {
1360 RISCV::SLLI,
DL, VT,
X,
1376 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1378 unsigned SrliOpc = RISCV::SRLI;
1381 isa<ConstantSDNode>(
X.getOperand(1)) &&
1382 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1383 SrliOpc = RISCV::SRLIW;
1384 X =
X.getOperand(0);
1396 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1397 OneUseOrZExtW && !IsCANDI) {
1399 RISCV::SRLIW,
DL, VT,
X,
1408 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1409 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1411 RISCV::SRLI,
DL, VT,
X,
1414 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1426 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1428 RISCV::SRLI,
DL, VT,
X,
1437 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1439 RISCV::SRLIW,
DL, VT,
X,
1449 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1450 Subtarget->hasStdExtZba()) {
1452 RISCV::SRLI,
DL, VT,
X,
1455 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1463 const uint64_t C1 = N1C->getZExtValue();
1468 unsigned XLen = Subtarget->
getXLen();
1469 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1474 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1476 isa<ConstantSDNode>(
X.getOperand(1)) &&
1477 X.getConstantOperandVal(1) == 32;
1484 RISCV::SRAI,
DL, VT,
X,
1500 if (C2 > Leading && Leading > 0 && Trailing > 0) {
1521 if (
isMask_64(C1) && !isInt<12>(N1C->getSExtValue())) {
1523 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1540 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1541 if (!N1C || !N1C->hasOneUse())
1545 SDValue N0 = Node->getOperand(0);
1562 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1564 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1565 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1569 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1571 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1572 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1578 unsigned XLen = Subtarget->
getXLen();
1584 unsigned ConstantShift = XLen - LeadingZeros;
1588 uint64_t ShiftedC1 = C1 << ConstantShift;
1591 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1607 if (Subtarget->hasVendorXCVmem() && !Subtarget->
is64Bit()) {
1613 SDValue Chain = Node->getOperand(0);
1617 bool Simm12 =
false;
1618 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1620 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1621 int ConstantVal = ConstantOffset->getSExtValue();
1622 Simm12 = isInt<12>(ConstantVal);
1628 unsigned Opcode = 0;
1629 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1631 if (Simm12 && SignExtend)
1632 Opcode = RISCV::CV_LB_ri_inc;
1633 else if (Simm12 && !SignExtend)
1634 Opcode = RISCV::CV_LBU_ri_inc;
1635 else if (!Simm12 && SignExtend)
1636 Opcode = RISCV::CV_LB_rr_inc;
1638 Opcode = RISCV::CV_LBU_rr_inc;
1641 if (Simm12 && SignExtend)
1642 Opcode = RISCV::CV_LH_ri_inc;
1643 else if (Simm12 && !SignExtend)
1644 Opcode = RISCV::CV_LHU_ri_inc;
1645 else if (!Simm12 && SignExtend)
1646 Opcode = RISCV::CV_LH_rr_inc;
1648 Opcode = RISCV::CV_LHU_rr_inc;
1652 Opcode = RISCV::CV_LW_ri_inc;
1654 Opcode = RISCV::CV_LW_rr_inc;
1670 unsigned IntNo = Node->getConstantOperandVal(0);
1675 case Intrinsic::riscv_vmsgeu:
1676 case Intrinsic::riscv_vmsge: {
1677 SDValue Src1 = Node->getOperand(1);
1678 SDValue Src2 = Node->getOperand(2);
1679 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1680 bool IsCmpUnsignedZero =
false;
1685 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1686 int64_t CVal =
C->getSExtValue();
1687 if (CVal >= -15 && CVal <= 16) {
1688 if (!IsUnsigned || CVal != 0)
1690 IsCmpUnsignedZero =
true;
1694 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1698#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1699 case RISCVII::VLMUL::lmulenum: \
1700 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1701 : RISCV::PseudoVMSLT_VX_##suffix; \
1702 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1703 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1712#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1720 if (IsCmpUnsignedZero) {
1731 {Cmp, Cmp, VL, SEW}));
1734 case Intrinsic::riscv_vmsgeu_mask:
1735 case Intrinsic::riscv_vmsge_mask: {
1736 SDValue Src1 = Node->getOperand(2);
1737 SDValue Src2 = Node->getOperand(3);
1738 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1739 bool IsCmpUnsignedZero =
false;
1744 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1745 int64_t CVal =
C->getSExtValue();
1746 if (CVal >= -15 && CVal <= 16) {
1747 if (!IsUnsigned || CVal != 0)
1749 IsCmpUnsignedZero =
true;
1753 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1758#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1759 case RISCVII::VLMUL::lmulenum: \
1760 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1761 : RISCV::PseudoVMSLT_VX_##suffix; \
1762 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1763 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1772#undef CASE_VMSLT_OPCODES
1778#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1779 case RISCVII::VLMUL::lmulenum: \
1780 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1781 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1782 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1791#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1798 SDValue MaskedOff = Node->getOperand(1);
1799 SDValue Mask = Node->getOperand(4);
1802 if (IsCmpUnsignedZero) {
1805 if (Mask == MaskedOff) {
1811 {Mask, MaskedOff, VL, MaskSEW}));
1818 if (Mask == MaskedOff) {
1823 {Mask, Cmp, VL, MaskSEW}));
1840 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1844 {Cmp, Mask, VL, MaskSEW}));
1847 case Intrinsic::riscv_vsetvli:
1848 case Intrinsic::riscv_vsetvlimax:
1854 unsigned IntNo = Node->getConstantOperandVal(1);
1859 case Intrinsic::riscv_vlseg2:
1860 case Intrinsic::riscv_vlseg3:
1861 case Intrinsic::riscv_vlseg4:
1862 case Intrinsic::riscv_vlseg5:
1863 case Intrinsic::riscv_vlseg6:
1864 case Intrinsic::riscv_vlseg7:
1865 case Intrinsic::riscv_vlseg8: {
1869 case Intrinsic::riscv_vlseg2_mask:
1870 case Intrinsic::riscv_vlseg3_mask:
1871 case Intrinsic::riscv_vlseg4_mask:
1872 case Intrinsic::riscv_vlseg5_mask:
1873 case Intrinsic::riscv_vlseg6_mask:
1874 case Intrinsic::riscv_vlseg7_mask:
1875 case Intrinsic::riscv_vlseg8_mask: {
1879 case Intrinsic::riscv_vlsseg2:
1880 case Intrinsic::riscv_vlsseg3:
1881 case Intrinsic::riscv_vlsseg4:
1882 case Intrinsic::riscv_vlsseg5:
1883 case Intrinsic::riscv_vlsseg6:
1884 case Intrinsic::riscv_vlsseg7:
1885 case Intrinsic::riscv_vlsseg8: {
1889 case Intrinsic::riscv_vlsseg2_mask:
1890 case Intrinsic::riscv_vlsseg3_mask:
1891 case Intrinsic::riscv_vlsseg4_mask:
1892 case Intrinsic::riscv_vlsseg5_mask:
1893 case Intrinsic::riscv_vlsseg6_mask:
1894 case Intrinsic::riscv_vlsseg7_mask:
1895 case Intrinsic::riscv_vlsseg8_mask: {
1899 case Intrinsic::riscv_vloxseg2:
1900 case Intrinsic::riscv_vloxseg3:
1901 case Intrinsic::riscv_vloxseg4:
1902 case Intrinsic::riscv_vloxseg5:
1903 case Intrinsic::riscv_vloxseg6:
1904 case Intrinsic::riscv_vloxseg7:
1905 case Intrinsic::riscv_vloxseg8:
1908 case Intrinsic::riscv_vluxseg2:
1909 case Intrinsic::riscv_vluxseg3:
1910 case Intrinsic::riscv_vluxseg4:
1911 case Intrinsic::riscv_vluxseg5:
1912 case Intrinsic::riscv_vluxseg6:
1913 case Intrinsic::riscv_vluxseg7:
1914 case Intrinsic::riscv_vluxseg8:
1917 case Intrinsic::riscv_vloxseg2_mask:
1918 case Intrinsic::riscv_vloxseg3_mask:
1919 case Intrinsic::riscv_vloxseg4_mask:
1920 case Intrinsic::riscv_vloxseg5_mask:
1921 case Intrinsic::riscv_vloxseg6_mask:
1922 case Intrinsic::riscv_vloxseg7_mask:
1923 case Intrinsic::riscv_vloxseg8_mask:
1926 case Intrinsic::riscv_vluxseg2_mask:
1927 case Intrinsic::riscv_vluxseg3_mask:
1928 case Intrinsic::riscv_vluxseg4_mask:
1929 case Intrinsic::riscv_vluxseg5_mask:
1930 case Intrinsic::riscv_vluxseg6_mask:
1931 case Intrinsic::riscv_vluxseg7_mask:
1932 case Intrinsic::riscv_vluxseg8_mask:
1935 case Intrinsic::riscv_vlseg8ff:
1936 case Intrinsic::riscv_vlseg7ff:
1937 case Intrinsic::riscv_vlseg6ff:
1938 case Intrinsic::riscv_vlseg5ff:
1939 case Intrinsic::riscv_vlseg4ff:
1940 case Intrinsic::riscv_vlseg3ff:
1941 case Intrinsic::riscv_vlseg2ff: {
1945 case Intrinsic::riscv_vlseg8ff_mask:
1946 case Intrinsic::riscv_vlseg7ff_mask:
1947 case Intrinsic::riscv_vlseg6ff_mask:
1948 case Intrinsic::riscv_vlseg5ff_mask:
1949 case Intrinsic::riscv_vlseg4ff_mask:
1950 case Intrinsic::riscv_vlseg3ff_mask:
1951 case Intrinsic::riscv_vlseg2ff_mask: {
1955 case Intrinsic::riscv_vloxei:
1956 case Intrinsic::riscv_vloxei_mask:
1957 case Intrinsic::riscv_vluxei:
1958 case Intrinsic::riscv_vluxei_mask: {
1959 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1960 IntNo == Intrinsic::riscv_vluxei_mask;
1961 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1962 IntNo == Intrinsic::riscv_vloxei_mask;
1964 MVT VT = Node->getSimpleValueType(0);
1969 Operands.push_back(Node->getOperand(CurOp++));
1977 "Element count mismatch");
1982 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1984 "values when XLEN=32");
1987 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1988 static_cast<unsigned>(IndexLMUL));
1992 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1998 case Intrinsic::riscv_vlm:
1999 case Intrinsic::riscv_vle:
2000 case Intrinsic::riscv_vle_mask:
2001 case Intrinsic::riscv_vlse:
2002 case Intrinsic::riscv_vlse_mask: {
2003 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
2004 IntNo == Intrinsic::riscv_vlse_mask;
2006 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
2008 MVT VT = Node->getSimpleValueType(0);
2017 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
2020 if (HasPassthruOperand)
2021 Operands.push_back(Node->getOperand(CurOp++));
2034 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
2035 static_cast<unsigned>(LMUL));
2039 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2045 case Intrinsic::riscv_vleff:
2046 case Intrinsic::riscv_vleff_mask: {
2047 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2049 MVT VT = Node->getSimpleValueType(0);
2054 Operands.push_back(Node->getOperand(CurOp++));
2061 RISCV::getVLEPseudo(IsMasked,
false,
true,
2062 Log2SEW,
static_cast<unsigned>(LMUL));
2065 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2075 unsigned IntNo = Node->getConstantOperandVal(1);
2077 case Intrinsic::riscv_vsseg2:
2078 case Intrinsic::riscv_vsseg3:
2079 case Intrinsic::riscv_vsseg4:
2080 case Intrinsic::riscv_vsseg5:
2081 case Intrinsic::riscv_vsseg6:
2082 case Intrinsic::riscv_vsseg7:
2083 case Intrinsic::riscv_vsseg8: {
2087 case Intrinsic::riscv_vsseg2_mask:
2088 case Intrinsic::riscv_vsseg3_mask:
2089 case Intrinsic::riscv_vsseg4_mask:
2090 case Intrinsic::riscv_vsseg5_mask:
2091 case Intrinsic::riscv_vsseg6_mask:
2092 case Intrinsic::riscv_vsseg7_mask:
2093 case Intrinsic::riscv_vsseg8_mask: {
2097 case Intrinsic::riscv_vssseg2:
2098 case Intrinsic::riscv_vssseg3:
2099 case Intrinsic::riscv_vssseg4:
2100 case Intrinsic::riscv_vssseg5:
2101 case Intrinsic::riscv_vssseg6:
2102 case Intrinsic::riscv_vssseg7:
2103 case Intrinsic::riscv_vssseg8: {
2107 case Intrinsic::riscv_vssseg2_mask:
2108 case Intrinsic::riscv_vssseg3_mask:
2109 case Intrinsic::riscv_vssseg4_mask:
2110 case Intrinsic::riscv_vssseg5_mask:
2111 case Intrinsic::riscv_vssseg6_mask:
2112 case Intrinsic::riscv_vssseg7_mask:
2113 case Intrinsic::riscv_vssseg8_mask: {
2117 case Intrinsic::riscv_vsoxseg2:
2118 case Intrinsic::riscv_vsoxseg3:
2119 case Intrinsic::riscv_vsoxseg4:
2120 case Intrinsic::riscv_vsoxseg5:
2121 case Intrinsic::riscv_vsoxseg6:
2122 case Intrinsic::riscv_vsoxseg7:
2123 case Intrinsic::riscv_vsoxseg8:
2126 case Intrinsic::riscv_vsuxseg2:
2127 case Intrinsic::riscv_vsuxseg3:
2128 case Intrinsic::riscv_vsuxseg4:
2129 case Intrinsic::riscv_vsuxseg5:
2130 case Intrinsic::riscv_vsuxseg6:
2131 case Intrinsic::riscv_vsuxseg7:
2132 case Intrinsic::riscv_vsuxseg8:
2135 case Intrinsic::riscv_vsoxseg2_mask:
2136 case Intrinsic::riscv_vsoxseg3_mask:
2137 case Intrinsic::riscv_vsoxseg4_mask:
2138 case Intrinsic::riscv_vsoxseg5_mask:
2139 case Intrinsic::riscv_vsoxseg6_mask:
2140 case Intrinsic::riscv_vsoxseg7_mask:
2141 case Intrinsic::riscv_vsoxseg8_mask:
2144 case Intrinsic::riscv_vsuxseg2_mask:
2145 case Intrinsic::riscv_vsuxseg3_mask:
2146 case Intrinsic::riscv_vsuxseg4_mask:
2147 case Intrinsic::riscv_vsuxseg5_mask:
2148 case Intrinsic::riscv_vsuxseg6_mask:
2149 case Intrinsic::riscv_vsuxseg7_mask:
2150 case Intrinsic::riscv_vsuxseg8_mask:
2153 case Intrinsic::riscv_vsoxei:
2154 case Intrinsic::riscv_vsoxei_mask:
2155 case Intrinsic::riscv_vsuxei:
2156 case Intrinsic::riscv_vsuxei_mask: {
2157 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2158 IntNo == Intrinsic::riscv_vsuxei_mask;
2159 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2160 IntNo == Intrinsic::riscv_vsoxei_mask;
2162 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2167 Operands.push_back(Node->getOperand(CurOp++));
2175 "Element count mismatch");
2180 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2182 "values when XLEN=32");
2185 IsMasked, IsOrdered, IndexLog2EEW,
2186 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2190 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2196 case Intrinsic::riscv_vsm:
2197 case Intrinsic::riscv_vse:
2198 case Intrinsic::riscv_vse_mask:
2199 case Intrinsic::riscv_vsse:
2200 case Intrinsic::riscv_vsse_mask: {
2201 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2202 IntNo == Intrinsic::riscv_vsse_mask;
2204 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2206 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2211 Operands.push_back(Node->getOperand(CurOp++));
2218 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2221 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2227 case Intrinsic::riscv_sf_vc_x_se:
2228 case Intrinsic::riscv_sf_vc_i_se:
2235 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2247 SDValue V = Node->getOperand(0);
2248 SDValue SubV = Node->getOperand(1);
2250 auto Idx = Node->getConstantOperandVal(2);
2254 MVT SubVecContainerVT = SubVecVT;
2257 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2259 [[maybe_unused]]
bool ExactlyVecRegSized =
2261 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2263 .getKnownMinValue()));
2264 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2266 MVT ContainerVT = VT;
2268 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2272 std::tie(SubRegIdx,
Idx) =
2274 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2283 [[maybe_unused]]
bool IsSubVecPartReg =
2287 assert((!IsSubVecPartReg || V.isUndef()) &&
2288 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2289 "the subvector is smaller than a full-sized register");
2293 if (SubRegIdx == RISCV::NoSubRegister) {
2294 unsigned InRegClassID =
2298 "Unexpected subvector extraction");
2311 SDValue V = Node->getOperand(0);
2312 auto Idx = Node->getConstantOperandVal(1);
2313 MVT InVT = V.getSimpleValueType();
2317 MVT SubVecContainerVT = VT;
2321 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2324 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2328 std::tie(SubRegIdx,
Idx) =
2330 InVT, SubVecContainerVT,
Idx,
TRI);
2340 if (SubRegIdx == RISCV::NoSubRegister) {
2344 "Unexpected subvector extraction");
2363 if (!Node->getOperand(0).isUndef())
2365 SDValue Src = Node->getOperand(1);
2366 auto *Ld = dyn_cast<LoadSDNode>(Src);
2369 if (!Ld || Ld->isIndexed())
2371 EVT MemVT = Ld->getMemoryVT();
2397 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2407 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2411 false, IsStrided,
false,
2412 Log2SEW,
static_cast<unsigned>(LMUL));
2424 unsigned Locality = Node->getConstantOperandVal(3);
2428 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2432 int NontemporalLevel = 0;
2435 NontemporalLevel = 3;
2438 NontemporalLevel = 1;
2441 NontemporalLevel = 0;
2447 if (NontemporalLevel & 0b1)
2449 if (NontemporalLevel & 0b10)
2461 std::vector<SDValue> &OutOps) {
2464 switch (ConstraintID) {
2469 assert(Found &&
"SelectAddrRegImm should always succeed");
2470 OutOps.push_back(Op0);
2471 OutOps.push_back(Op1);
2475 OutOps.push_back(
Op);
2489 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2507 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2508 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2509 if (isInt<12>(CVal)) {
2525 bool IsPrefetch =
false) {
2526 if (!isa<ConstantSDNode>(
Addr))
2529 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2534 int64_t Lo12 = SignExtend64<12>(CVal);
2536 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2537 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2541 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2558 if (Seq.
back().getOpcode() != RISCV::ADDI)
2560 Lo12 = Seq.
back().getImm();
2561 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2566 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2576 for (
auto *
Use :
Add->uses()) {
2581 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2587 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2590 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2598 unsigned MaxShiftAmount,
2601 EVT VT =
Addr.getSimpleValueType();
2607 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2609 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2611 ShiftAmt =
N.getConstantOperandVal(1);
2616 return ShiftAmt != 0;
2620 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2625 isInt<12>(C1->getSExtValue())) {
2634 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2638 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2642 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2657 MVT VT =
Addr.getSimpleValueType();
2665 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2667 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2668 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2672 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2680 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2681 if (CVal == 0 || Alignment > CVal) {
2682 int64_t CombinedOffset = CVal + GA->getOffset();
2686 CombinedOffset, GA->getTargetFlags());
2692 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2700 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2701 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2702 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2703 "simm12 not already handled?");
2708 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2709 int64_t Adj = CVal < 0 ? -2048 : 2047;
2712 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2752 MVT VT =
Addr.getSimpleValueType();
2755 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2756 if (isInt<12>(CVal)) {
2760 if ((CVal & 0b11111) != 0) {
2766 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2774 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2775 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2776 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2777 "simm12 not already handled?");
2781 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2782 int64_t Adj = CVal < 0 ? -2048 : 2016;
2783 int64_t AdjustedOffset = CVal - Adj;
2785 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2816 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
2861 if (Imm != 0 && Imm % ShiftWidth == 0) {
2870 if (Imm != 0 && Imm % ShiftWidth == 0) {
2874 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2882 if (Imm % ShiftWidth == ShiftWidth - 1) {
2904 "Unexpected condition code!");
2911 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2912 if (CCVal != ExpectedCCVal)
2918 if (!
LHS.getValueType().isScalarInteger())
2929 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2930 int64_t CVal =
C->getSExtValue();
2933 if (CVal == -2048) {
2935 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2943 if (isInt<12>(CVal) || CVal == 2048) {
2945 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2962 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2963 Val =
N.getOperand(0);
2967 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2968 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2973 N.getConstantOperandVal(1) == ShiftAmt &&
2980 MVT VT =
N.getSimpleValueType();
2991 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2992 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2993 Val =
N.getOperand(0);
2997 MVT VT =
N.getSimpleValueType();
3012 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
3018 uint64_t Mask =
N.getConstantOperandVal(1);
3021 unsigned XLen = Subtarget->
getXLen();
3023 Mask &= maskTrailingZeros<uint64_t>(C2);
3025 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
3033 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
3035 EVT VT =
N.getValueType();
3045 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
3047 EVT VT =
N.getValueType();
3057 isa<ConstantSDNode>(
N.getOperand(1))) {
3058 uint64_t Mask =
N.getConstantOperandVal(1);
3066 unsigned XLen = Subtarget->
getXLen();
3069 if (C2 > Leading && Leading > 0 && Trailing == ShAmt) {
3071 EVT VT =
N.getValueType();
3077 RISCV::SRLI,
DL, VT, Val,
3084 }
else if (
bool LeftShift =
N.getOpcode() ==
ISD::SHL;
3085 (LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
3086 isa<ConstantSDNode>(
N.getOperand(1))) {
3092 unsigned C1 =
N.getConstantOperandVal(1);
3093 unsigned XLen = Subtarget->
getXLen();
3098 if (LeftShift && Leading == 32 && Trailing > 0 &&
3099 (Trailing + C1) == ShAmt) {
3101 EVT VT =
N.getValueType();
3110 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3111 (Trailing - C1) == ShAmt) {
3113 EVT VT =
N.getValueType();
3132 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3137 uint64_t Mask =
N.getConstantOperandVal(1);
3140 Mask &= maskTrailingZeros<uint64_t>(C2);
3148 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3150 EVT VT =
N.getValueType();
3178 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
3180 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3184 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3186 if (UserOpNo == VLIdx)
3189 auto NumDemandedBits =
3191 return NumDemandedBits && Bits >= *NumDemandedBits;
3204 const unsigned Depth)
const {
3210 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3211 "Unexpected opcode");
3218 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3221 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3224 if (!
User->isMachineOpcode())
3228 switch (
User->getMachineOpcode()) {
3253 case RISCV::SLLI_UW:
3254 case RISCV::FMV_W_X:
3255 case RISCV::FCVT_H_W:
3256 case RISCV::FCVT_H_W_INX:
3257 case RISCV::FCVT_H_WU:
3258 case RISCV::FCVT_H_WU_INX:
3259 case RISCV::FCVT_S_W:
3260 case RISCV::FCVT_S_W_INX:
3261 case RISCV::FCVT_S_WU:
3262 case RISCV::FCVT_S_WU_INX:
3263 case RISCV::FCVT_D_W:
3264 case RISCV::FCVT_D_W_INX:
3265 case RISCV::FCVT_D_WU:
3266 case RISCV::FCVT_D_WU_INX:
3267 case RISCV::TH_REVW:
3268 case RISCV::TH_SRRIW:
3281 if (UI.getOperandNo() == 1 && Bits >=
Log2_32(Subtarget->
getXLen()))
3286 if (Bits >= Subtarget->
getXLen() -
User->getConstantOperandVal(1))
3295 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3314 unsigned ShAmt =
User->getConstantOperandVal(1);
3328 case RISCV::FMV_H_X:
3329 case RISCV::ZEXT_H_RV32:
3330 case RISCV::ZEXT_H_RV64:
3336 if (Bits >= (Subtarget->
getXLen() / 2))
3340 case RISCV::SH1ADD_UW:
3341 case RISCV::SH2ADD_UW:
3342 case RISCV::SH3ADD_UW:
3345 if (UI.getOperandNo() == 0 && Bits >= 32)
3349 if (UI.getOperandNo() == 0 && Bits >= 8)
3353 if (UI.getOperandNo() == 0 && Bits >= 16)
3357 if (UI.getOperandNo() == 0 && Bits >= 32)
3369 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3370 int64_t
Offset =
C->getSExtValue();
3372 for (Shift = 0; Shift < 4; Shift++)
3373 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3380 EVT Ty =
N->getValueType(0);
3393 auto *
C = dyn_cast<ConstantSDNode>(
N);
3394 if (
C && isUInt<5>(
C->getZExtValue())) {
3396 N->getValueType(0));
3397 }
else if (
C &&
C->isAllOnes()) {
3400 N->getValueType(0),
true);
3401 }
else if (isa<RegisterSDNode>(
N) &&
3402 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3408 N->getValueType(0),
true);
3418 if (!
N.getOperand(0).isUndef())
3420 N =
N.getOperand(1);
3425 !
Splat.getOperand(0).isUndef())
3427 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3436 SplatVal =
Splat.getOperand(1);
3443 std::function<
bool(int64_t)> ValidateImm) {
3445 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3448 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3450 "Unexpected splat operand type");
3459 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3463 if (!ValidateImm(SplatImm))
3473 [](int64_t Imm) {
return isInt<5>(Imm); });
3478 N, SplatVal, *
CurDAG, *Subtarget,
3479 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3485 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3486 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3493 N, SplatVal, *
CurDAG, *Subtarget,
3494 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3498 auto IsExtOrTrunc = [](
SDValue N) {
3499 switch (
N->getOpcode()) {
3514 while (IsExtOrTrunc(
N)) {
3515 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3517 N =
N->getOperand(0);
3538 ->getLegalZfaFPImm(APF, VT)
3543 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3555 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3558 if (!isInt<5>(ImmVal))
3571bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3573 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3595 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3596 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3597 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3598 case RISCV::MUL: Opc = RISCV::MULW;
break;
3599 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3607 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3622 case RISCV::TH_MULAW:
3623 case RISCV::TH_MULAH:
3624 case RISCV::TH_MULSW:
3625 case RISCV::TH_MULSH:
3643 if (!isa<RegisterSDNode>(MaskOp) ||
3644 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3648 const auto *Glued = GlueOp.
getNode();
3654 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3655 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3675 const auto IsVMSet = [](
unsigned Opc) {
3676 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3677 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3678 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3679 Opc == RISCV::PseudoVMSET_M_B8;
3692 N->getOperand(
N->getNumOperands() - 1));
3696 if (!V.isMachineOpcode())
3698 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3699 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3704 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3713 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3717 unsigned MaskOpIdx =
I->MaskOpIdx;
3723 const unsigned Opc =
I->UnmaskedPseudo;
3730 "Masked and unmasked pseudos are inconsistent");
3732 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3737 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3740 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3746 const auto *Glued =
N->getGluedNode();
3747 if (
auto *TGlued = Glued->getGluedNode())
3753 if (!
N->memoperands_empty())
3756 Result->setFlags(
N->getFlags());
3784bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3787 Passthru =
N->getOperand(0);
3788 False =
N->getOperand(1);
3789 True =
N->getOperand(2);
3790 Mask =
N->getOperand(3);
3791 VL =
N->getOperand(4);
3793 Glue =
N->getOperand(
N->getNumOperands() - 1);
3794 assert(cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3807 "Expect True is the first output of an instruction.");
3821 bool IsMasked =
false;
3823 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3824 if (!Info && HasTiedDest) {
3825 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3828 assert(!(IsMasked && !HasTiedDest) &&
"Expected tied dest");
3837 if (False != PassthruOpTrue)
3882 unsigned TrueVLIndex =
3883 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3894 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3895 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3898 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3904 VL = GetMinVL(TrueVL, VL);
3922 if (TrueVL != VL || !IsMasked)
3937 unsigned MaskedOpc =
Info->MaskedPseudo;
3941 "Expected instructions with mask have policy operand.");
3944 "Expected instructions with mask have a tied dest.");
3954 bool MergeVLShrunk = VL != OrigVL;
3966 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3967 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3976 if (HasRoundingMode)
3979 Ops.
append({VL, SEW, PolicyOp});
3992 if (!cast<MachineSDNode>(True)->memoperands_empty())
4005bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
4006 bool MadeChange =
false;
4011 if (
N->use_empty() || !
N->isMachineOpcode())
4015 MadeChange |= performCombineVMergeAndVOps(
N);
4025bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4026 bool MadeChange =
false;
4031 if (
N->use_empty() || !
N->isMachineOpcode())
4034 const unsigned Opc =
N->getMachineOpcode();
4035 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
4042 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
4049 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static SDValue getMaskSetter(SDValue MaskOp, SDValue GlueOp)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool hasAllBUsers(SDNode *Node) const
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool activeElementsAffectResult(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.