20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#include "RISCVGenSearchableTables.inc"
52 bool MadeChange =
false;
59 switch (
N->getOpcode()) {
63 MVT VT =
N->getSimpleValueType(0);
79 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
80 MVT VT =
N->getSimpleValueType(0);
86 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
94 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
118 MVT::i64, MPI,
Align(8),
125 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
144 bool MadeChange =
false;
148 if (
N->use_empty() || !
N->isMachineOpcode())
151 MadeChange |= doPeepholeSExtW(
N);
156 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
161 MadeChange |= doPeepholeMergeVVMFold();
169 MadeChange |= doPeepholeNoRegPassThru();
181 switch (Inst.getOpndKind()) {
220 if (Seq.
size() > 3) {
221 unsigned ShiftAmt, AddOpc;
240 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
242 bool IsLoad,
MVT *IndexVT) {
243 SDValue Chain = Node->getOperand(0);
246 Operands.push_back(Node->getOperand(CurOp++));
248 if (IsStridedOrIndexed) {
249 Operands.push_back(Node->getOperand(CurOp++));
251 *IndexVT =
Operands.back()->getSimpleValueType(0);
256 SDValue Mask = Node->getOperand(CurOp++);
275 Policy = Node->getConstantOperandVal(CurOp++);
288 MVT VT = Node->getSimpleValueType(0);
289 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
295 Operands.push_back(Node->getOperand(CurOp++));
301 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
302 static_cast<unsigned>(LMUL));
306 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
317 MVT VT = Node->getSimpleValueType(0);
319 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
325 Operands.push_back(Node->getOperand(CurOp++));
332 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
333 Log2SEW,
static_cast<unsigned>(LMUL));
337 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
349 MVT VT = Node->getSimpleValueType(0);
350 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
356 Operands.push_back(Node->getOperand(CurOp++));
367 if (DecodedLMUL.second)
368 ContainedTyNumElts /= DecodedLMUL.first;
370 ContainedTyNumElts *= DecodedLMUL.first;
372 "Element count mismatch");
377 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
379 "values when XLEN=32");
382 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
383 static_cast<unsigned>(IndexLMUL));
387 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
398 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
399 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
405 Operands.push_back(Node->getOperand(CurOp++));
411 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
415 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
424 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
425 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
431 Operands.push_back(Node->getOperand(CurOp++));
442 if (DecodedLMUL.second)
443 ContainedTyNumElts /= DecodedLMUL.first;
445 ContainedTyNumElts *= DecodedLMUL.first;
447 "Element count mismatch");
452 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
454 "values when XLEN=32");
457 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
458 static_cast<unsigned>(IndexLMUL));
462 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
477 unsigned IntNo = Node->getConstantOperandVal(0);
479 assert((IntNo == Intrinsic::riscv_vsetvli ||
480 IntNo == Intrinsic::riscv_vsetvlimax) &&
481 "Unexpected vsetvli intrinsic");
483 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
484 unsigned Offset = (VLMax ? 1 : 2);
487 "Unexpected number of operands");
492 Node->getConstantOperandVal(
Offset + 1) & 0x7);
499 unsigned Opcode = RISCV::PseudoVSETVLI;
500 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
507 Opcode = RISCV::PseudoVSETVLIX0;
509 VLOperand = Node->getOperand(1);
511 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
513 if (isUInt<5>(AVL)) {
516 XLenVT, VLImm, VTypeIOp));
527 MVT VT = Node->getSimpleValueType(0);
528 unsigned Opcode = Node->getOpcode();
530 "Unexpected opcode");
535 SDValue N0 = Node->getOperand(0);
536 SDValue N1 = Node->getOperand(1);
553 bool SignExt =
false;
571 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
572 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
575 int64_t ShiftedVal = Val >> ShAmt;
576 if (!isInt<12>(ShiftedVal))
580 if (SignExt && ShAmt >= 32)
587 case ISD::AND: BinOpc = RISCV::ANDI;
break;
588 case ISD::OR: BinOpc = RISCV::ORI;
break;
589 case ISD::XOR: BinOpc = RISCV::XORI;
break;
592 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
606 if (!Subtarget->hasVendorXTHeadBb())
609 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
613 SDValue N0 = Node->getOperand(0);
617 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
625 MVT VT = Node->getSimpleValueType(0);
626 const unsigned RightShAmt = N1C->getZExtValue();
631 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
635 const unsigned LeftShAmt = N01C->getZExtValue();
638 if (LeftShAmt > RightShAmt)
642 const unsigned Msb = MsbPlusOne - 1;
643 const unsigned Lsb = RightShAmt - LeftShAmt;
645 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
654 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
660 const unsigned Msb = ExtSize - 1;
661 const unsigned Lsb = RightShAmt;
663 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
673 if (!Subtarget->hasVendorXTHeadMemIdx())
687 "Unexpected addressing mode");
690 int64_t
Offset =
C->getSExtValue();
695 for (Shift = 0; Shift < 4; Shift++)
696 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
705 if (LoadVT == MVT::i8 && IsPre)
706 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
707 else if (LoadVT == MVT::i8 && IsPost)
708 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
709 else if (LoadVT == MVT::i16 && IsPre)
710 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
711 else if (LoadVT == MVT::i16 && IsPost)
712 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
713 else if (LoadVT == MVT::i32 && IsPre)
714 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
715 else if (LoadVT == MVT::i32 && IsPost)
716 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
717 else if (LoadVT == MVT::i64 && IsPre)
718 Opcode = RISCV::TH_LDIB;
719 else if (LoadVT == MVT::i64 && IsPost)
720 Opcode = RISCV::TH_LDIA;
747 unsigned IntNo = Node->getConstantOperandVal(1);
749 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
750 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
751 "Unexpected vsetvli intrinsic");
754 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
758 Node->getOperand(4), Node->getOperand(5),
759 Node->getOperand(8), SEWOp,
760 Node->getOperand(0)};
763 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
764 switch (LMulSDNode->getSExtValue()) {
766 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
767 : RISCV::PseudoVC_I_SE_MF8;
770 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
771 : RISCV::PseudoVC_I_SE_MF4;
774 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
775 : RISCV::PseudoVC_I_SE_MF2;
778 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
779 : RISCV::PseudoVC_I_SE_M1;
782 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
783 : RISCV::PseudoVC_I_SE_M2;
786 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
787 : RISCV::PseudoVC_I_SE_M4;
790 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
791 : RISCV::PseudoVC_I_SE_M8;
796 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
800#define INST_NF_CASE(NAME, NF) \
801 case Intrinsic::riscv_##NAME##NF: \
803#define INST_NF_CASE_MASK(NAME, NF) \
804 case Intrinsic::riscv_##NAME##NF##_mask: \
806#define INST_NF_CASE_FF(NAME, NF) \
807 case Intrinsic::riscv_##NAME##NF##ff: \
809#define INST_NF_CASE_FF_MASK(NAME, NF) \
810 case Intrinsic::riscv_##NAME##NF##ff_mask: \
812#define INST_ALL_NF_CASE_BASE(MACRO_NAME, NAME) \
813 MACRO_NAME(NAME, 2) \
814 MACRO_NAME(NAME, 3) \
815 MACRO_NAME(NAME, 4) \
816 MACRO_NAME(NAME, 5) \
817 MACRO_NAME(NAME, 6) \
818 MACRO_NAME(NAME, 7) \
820#define INST_ALL_NF_CASE(NAME) \
821 INST_ALL_NF_CASE_BASE(INST_NF_CASE, NAME) \
822 INST_ALL_NF_CASE_BASE(INST_NF_CASE_MASK, NAME)
823#define INST_ALL_NF_CASE_WITH_FF(NAME) \
824 INST_ALL_NF_CASE(NAME) \
825 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF, NAME) \
826 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF_MASK, NAME)
843 if (Node->isMachineOpcode()) {
851 unsigned Opcode = Node->getOpcode();
854 MVT VT = Node->getSimpleValueType(0);
856 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
860 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
861 auto *ConstNode = cast<ConstantSDNode>(Node);
862 if (ConstNode->isZero()) {
868 int64_t Imm = ConstNode->getSExtValue();
872 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
873 Imm = SignExtend64<8>(Imm);
876 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
878 Imm = SignExtend64<16>(Imm);
881 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
882 Imm = SignExtend64<32>(Imm);
888 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
890 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
900 bool HasZdinx = Subtarget->hasStdExtZdinx();
901 bool Is64Bit = Subtarget->
is64Bit();
907 assert(Subtarget->hasStdExtZfbfmin());
908 Opc = RISCV::FMV_H_X;
911 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
914 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
921 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
923 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
928 if (VT.
SimpleTy == MVT::f16 && Opc == RISCV::COPY) {
931 }
else if (VT.
SimpleTy == MVT::f32 && Opc == RISCV::COPY) {
934 }
else if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
943 Opc = RISCV::FSGNJN_D;
945 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
959 "BuildPairF64 only handled here on rv32i_zdinx");
976 "SplitF64 only handled here on rv32i_zdinx");
978 if (!
SDValue(Node, 0).use_empty()) {
980 Node->getValueType(0),
981 Node->getOperand(0));
985 if (!
SDValue(Node, 1).use_empty()) {
987 RISCV::sub_gpr_odd,
DL, Node->getValueType(1), Node->getOperand(0));
996 "SplitGPRPair should already be handled");
998 if (!Subtarget->hasStdExtZfa())
1001 "Unexpected subtarget");
1004 if (!
SDValue(Node, 0).use_empty()) {
1006 Node->getOperand(0));
1009 if (!
SDValue(Node, 1).use_empty()) {
1011 Node->getOperand(0));
1019 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1022 SDValue N0 = Node->getOperand(0);
1026 unsigned ShAmt = N1C->getZExtValue();
1030 unsigned XLen = Subtarget->
getXLen();
1033 if (TrailingZeros > 0 && LeadingZeros == 32) {
1045 if (TrailingZeros == 0 && LeadingZeros > ShAmt &&
1046 XLen - LeadingZeros > 11 && LeadingZeros != 32) {
1068 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1071 SDValue N0 = Node->getOperand(0);
1074 unsigned ShAmt = N1C->getZExtValue();
1080 unsigned XLen = Subtarget->
getXLen();
1083 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1102 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1106 if (ShAmt >= TrailingOnes)
1109 if (TrailingOnes == 32) {
1111 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1122 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1124 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1130 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1131 if (Subtarget->hasVendorXTHeadBb()) {
1161 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1164 SDValue N0 = Node->getOperand(0);
1167 unsigned ShAmt = N1C->getZExtValue();
1169 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1171 if (ExtSize >= 32 || ShAmt >= ExtSize)
1173 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1190 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1194 SDValue N0 = Node->getOperand(0);
1199 if (!Subtarget->hasVendorXTHeadBb())
1211 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1214 unsigned C2 =
C->getZExtValue();
1215 unsigned XLen = Subtarget->
getXLen();
1216 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1224 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1230 C1 &= maskTrailingZeros<uint64_t>(C2);
1232 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1236 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1246 if (C2 + 32 == Leading) {
1258 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1260 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1265 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1279 const unsigned Lsb = C2;
1280 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1285 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1287 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1289 Skip |= HasBitTest && Leading == XLen - 1;
1290 if (OneUseOrZExtW && !Skip) {
1292 RISCV::SLLI,
DL, VT,
X,
1308 if (C2 + Leading < XLen &&
1309 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1311 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1320 if (OneUseOrZExtW && !IsCANDI) {
1322 RISCV::SLLI,
DL, VT,
X,
1338 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1340 unsigned SrliOpc = RISCV::SRLI;
1343 isa<ConstantSDNode>(
X.getOperand(1)) &&
1344 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1345 SrliOpc = RISCV::SRLIW;
1346 X =
X.getOperand(0);
1358 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1359 OneUseOrZExtW && !IsCANDI) {
1361 RISCV::SRLIW,
DL, VT,
X,
1370 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1371 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1373 RISCV::SRLI,
DL, VT,
X,
1376 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1388 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1390 RISCV::SRLI,
DL, VT,
X,
1399 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1401 RISCV::SRLIW,
DL, VT,
X,
1411 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1412 Subtarget->hasStdExtZba()) {
1414 RISCV::SRLI,
DL, VT,
X,
1417 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1425 const uint64_t C1 = N1C->getZExtValue();
1430 unsigned XLen = Subtarget->
getXLen();
1431 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1436 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1438 isa<ConstantSDNode>(
X.getOperand(1)) &&
1439 X.getConstantOperandVal(1) == 32;
1446 RISCV::SRAI,
DL, VT,
X,
1462 if (C2 > Leading && Leading > 0 && Trailing > 0) {
1483 if (
isMask_64(C1) && !isInt<12>(N1C->getSExtValue())) {
1485 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1502 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1503 if (!N1C || !N1C->hasOneUse())
1507 SDValue N0 = Node->getOperand(0);
1524 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1526 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1527 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1531 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1533 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1534 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1540 unsigned XLen = Subtarget->
getXLen();
1546 unsigned ConstantShift = XLen - LeadingZeros;
1550 uint64_t ShiftedC1 = C1 << ConstantShift;
1553 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1569 if (Subtarget->hasVendorXCVmem() && !Subtarget->
is64Bit()) {
1575 SDValue Chain = Node->getOperand(0);
1579 bool Simm12 =
false;
1580 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1582 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1583 int ConstantVal = ConstantOffset->getSExtValue();
1584 Simm12 = isInt<12>(ConstantVal);
1590 unsigned Opcode = 0;
1591 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1593 if (Simm12 && SignExtend)
1594 Opcode = RISCV::CV_LB_ri_inc;
1595 else if (Simm12 && !SignExtend)
1596 Opcode = RISCV::CV_LBU_ri_inc;
1597 else if (!Simm12 && SignExtend)
1598 Opcode = RISCV::CV_LB_rr_inc;
1600 Opcode = RISCV::CV_LBU_rr_inc;
1603 if (Simm12 && SignExtend)
1604 Opcode = RISCV::CV_LH_ri_inc;
1605 else if (Simm12 && !SignExtend)
1606 Opcode = RISCV::CV_LHU_ri_inc;
1607 else if (!Simm12 && SignExtend)
1608 Opcode = RISCV::CV_LH_rr_inc;
1610 Opcode = RISCV::CV_LHU_rr_inc;
1614 Opcode = RISCV::CV_LW_ri_inc;
1616 Opcode = RISCV::CV_LW_rr_inc;
1632 unsigned IntNo = Node->getConstantOperandVal(0);
1637 case Intrinsic::riscv_vmsgeu:
1638 case Intrinsic::riscv_vmsge: {
1639 SDValue Src1 = Node->getOperand(1);
1640 SDValue Src2 = Node->getOperand(2);
1641 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1642 bool IsCmpConstant =
false;
1643 bool IsCmpMinimum =
false;
1650 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1651 IsCmpConstant =
true;
1652 CVal =
C->getSExtValue();
1653 if (CVal >= -15 && CVal <= 16) {
1654 if (!IsUnsigned || CVal != 0)
1656 IsCmpMinimum =
true;
1660 IsCmpMinimum =
true;
1663 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode, VMSGTOpcode;
1667#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1668 case RISCVII::VLMUL::lmulenum: \
1669 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1670 : RISCV::PseudoVMSLT_VX_##suffix; \
1671 VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
1672 : RISCV::PseudoVMSGT_VX_##suffix; \
1681#undef CASE_VMSLT_OPCODES
1687#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \
1688 case RISCVII::VLMUL::lmulenum: \
1689 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1690 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
1699#undef CASE_VMNAND_VMSET_OPCODES
1714 if (IsCmpConstant) {
1719 {Src1, Imm, VL, SEW}));
1729 {Cmp, Cmp, VL, MaskSEW}));
1732 case Intrinsic::riscv_vmsgeu_mask:
1733 case Intrinsic::riscv_vmsge_mask: {
1734 SDValue Src1 = Node->getOperand(2);
1735 SDValue Src2 = Node->getOperand(3);
1736 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1737 bool IsCmpConstant =
false;
1738 bool IsCmpMinimum =
false;
1745 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1746 IsCmpConstant =
true;
1747 CVal =
C->getSExtValue();
1748 if (CVal >= -15 && CVal <= 16) {
1749 if (!IsUnsigned || CVal != 0)
1751 IsCmpMinimum =
true;
1755 IsCmpMinimum =
true;
1758 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1759 VMOROpcode, VMSGTMaskOpcode;
1763#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1764 case RISCVII::VLMUL::lmulenum: \
1765 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1766 : RISCV::PseudoVMSLT_VX_##suffix; \
1767 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1768 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1769 VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
1770 : RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
1779#undef CASE_VMSLT_OPCODES
1785#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1786 case RISCVII::VLMUL::lmulenum: \
1787 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1788 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1789 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1798#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1805 SDValue MaskedOff = Node->getOperand(1);
1806 SDValue Mask = Node->getOperand(4);
1812 if (Mask == MaskedOff) {
1818 {Mask, MaskedOff, VL, MaskSEW}));
1825 if (Mask == MaskedOff) {
1830 {Mask, Cmp, VL, MaskSEW}));
1840 if (IsCmpConstant) {
1845 VMSGTMaskOpcode,
DL, VT,
1846 {MaskedOff, Src1, Imm, V0, VL, SEW, Glue}));
1857 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1861 {Cmp, Mask, VL, MaskSEW}));
1864 case Intrinsic::riscv_vsetvli:
1865 case Intrinsic::riscv_vsetvlimax:
1871 unsigned IntNo = Node->getConstantOperandVal(1);
1876 case Intrinsic::riscv_vlseg2:
1877 case Intrinsic::riscv_vlseg3:
1878 case Intrinsic::riscv_vlseg4:
1879 case Intrinsic::riscv_vlseg5:
1880 case Intrinsic::riscv_vlseg6:
1881 case Intrinsic::riscv_vlseg7:
1882 case Intrinsic::riscv_vlseg8: {
1887 case Intrinsic::riscv_vlseg2_mask:
1888 case Intrinsic::riscv_vlseg3_mask:
1889 case Intrinsic::riscv_vlseg4_mask:
1890 case Intrinsic::riscv_vlseg5_mask:
1891 case Intrinsic::riscv_vlseg6_mask:
1892 case Intrinsic::riscv_vlseg7_mask:
1893 case Intrinsic::riscv_vlseg8_mask: {
1898 case Intrinsic::riscv_vlsseg2:
1899 case Intrinsic::riscv_vlsseg3:
1900 case Intrinsic::riscv_vlsseg4:
1901 case Intrinsic::riscv_vlsseg5:
1902 case Intrinsic::riscv_vlsseg6:
1903 case Intrinsic::riscv_vlsseg7:
1904 case Intrinsic::riscv_vlsseg8: {
1909 case Intrinsic::riscv_vlsseg2_mask:
1910 case Intrinsic::riscv_vlsseg3_mask:
1911 case Intrinsic::riscv_vlsseg4_mask:
1912 case Intrinsic::riscv_vlsseg5_mask:
1913 case Intrinsic::riscv_vlsseg6_mask:
1914 case Intrinsic::riscv_vlsseg7_mask:
1915 case Intrinsic::riscv_vlsseg8_mask: {
1920 case Intrinsic::riscv_vloxseg2:
1921 case Intrinsic::riscv_vloxseg3:
1922 case Intrinsic::riscv_vloxseg4:
1923 case Intrinsic::riscv_vloxseg5:
1924 case Intrinsic::riscv_vloxseg6:
1925 case Intrinsic::riscv_vloxseg7:
1926 case Intrinsic::riscv_vloxseg8:
1930 case Intrinsic::riscv_vluxseg2:
1931 case Intrinsic::riscv_vluxseg3:
1932 case Intrinsic::riscv_vluxseg4:
1933 case Intrinsic::riscv_vluxseg5:
1934 case Intrinsic::riscv_vluxseg6:
1935 case Intrinsic::riscv_vluxseg7:
1936 case Intrinsic::riscv_vluxseg8:
1940 case Intrinsic::riscv_vloxseg2_mask:
1941 case Intrinsic::riscv_vloxseg3_mask:
1942 case Intrinsic::riscv_vloxseg4_mask:
1943 case Intrinsic::riscv_vloxseg5_mask:
1944 case Intrinsic::riscv_vloxseg6_mask:
1945 case Intrinsic::riscv_vloxseg7_mask:
1946 case Intrinsic::riscv_vloxseg8_mask:
1950 case Intrinsic::riscv_vluxseg2_mask:
1951 case Intrinsic::riscv_vluxseg3_mask:
1952 case Intrinsic::riscv_vluxseg4_mask:
1953 case Intrinsic::riscv_vluxseg5_mask:
1954 case Intrinsic::riscv_vluxseg6_mask:
1955 case Intrinsic::riscv_vluxseg7_mask:
1956 case Intrinsic::riscv_vluxseg8_mask:
1960 case Intrinsic::riscv_vlseg8ff:
1961 case Intrinsic::riscv_vlseg7ff:
1962 case Intrinsic::riscv_vlseg6ff:
1963 case Intrinsic::riscv_vlseg5ff:
1964 case Intrinsic::riscv_vlseg4ff:
1965 case Intrinsic::riscv_vlseg3ff:
1966 case Intrinsic::riscv_vlseg2ff: {
1970 case Intrinsic::riscv_vlseg8ff_mask:
1971 case Intrinsic::riscv_vlseg7ff_mask:
1972 case Intrinsic::riscv_vlseg6ff_mask:
1973 case Intrinsic::riscv_vlseg5ff_mask:
1974 case Intrinsic::riscv_vlseg4ff_mask:
1975 case Intrinsic::riscv_vlseg3ff_mask:
1976 case Intrinsic::riscv_vlseg2ff_mask: {
1980 case Intrinsic::riscv_vloxei:
1981 case Intrinsic::riscv_vloxei_mask:
1982 case Intrinsic::riscv_vluxei:
1983 case Intrinsic::riscv_vluxei_mask: {
1984 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1985 IntNo == Intrinsic::riscv_vluxei_mask;
1986 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1987 IntNo == Intrinsic::riscv_vloxei_mask;
1989 MVT VT = Node->getSimpleValueType(0);
1994 Operands.push_back(Node->getOperand(CurOp++));
2002 "Element count mismatch");
2007 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2009 "values when XLEN=32");
2012 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
2013 static_cast<unsigned>(IndexLMUL));
2017 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2023 case Intrinsic::riscv_vlm:
2024 case Intrinsic::riscv_vle:
2025 case Intrinsic::riscv_vle_mask:
2026 case Intrinsic::riscv_vlse:
2027 case Intrinsic::riscv_vlse_mask: {
2028 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
2029 IntNo == Intrinsic::riscv_vlse_mask;
2031 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
2033 MVT VT = Node->getSimpleValueType(0);
2042 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
2045 if (HasPassthruOperand)
2046 Operands.push_back(Node->getOperand(CurOp++));
2059 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
2060 static_cast<unsigned>(LMUL));
2064 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2070 case Intrinsic::riscv_vleff:
2071 case Intrinsic::riscv_vleff_mask: {
2072 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2074 MVT VT = Node->getSimpleValueType(0);
2079 Operands.push_back(Node->getOperand(CurOp++));
2086 RISCV::getVLEPseudo(IsMasked,
false,
true,
2087 Log2SEW,
static_cast<unsigned>(LMUL));
2090 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2100 unsigned IntNo = Node->getConstantOperandVal(1);
2102 case Intrinsic::riscv_vsseg2:
2103 case Intrinsic::riscv_vsseg3:
2104 case Intrinsic::riscv_vsseg4:
2105 case Intrinsic::riscv_vsseg5:
2106 case Intrinsic::riscv_vsseg6:
2107 case Intrinsic::riscv_vsseg7:
2108 case Intrinsic::riscv_vsseg8: {
2113 case Intrinsic::riscv_vsseg2_mask:
2114 case Intrinsic::riscv_vsseg3_mask:
2115 case Intrinsic::riscv_vsseg4_mask:
2116 case Intrinsic::riscv_vsseg5_mask:
2117 case Intrinsic::riscv_vsseg6_mask:
2118 case Intrinsic::riscv_vsseg7_mask:
2119 case Intrinsic::riscv_vsseg8_mask: {
2124 case Intrinsic::riscv_vssseg2:
2125 case Intrinsic::riscv_vssseg3:
2126 case Intrinsic::riscv_vssseg4:
2127 case Intrinsic::riscv_vssseg5:
2128 case Intrinsic::riscv_vssseg6:
2129 case Intrinsic::riscv_vssseg7:
2130 case Intrinsic::riscv_vssseg8: {
2135 case Intrinsic::riscv_vssseg2_mask:
2136 case Intrinsic::riscv_vssseg3_mask:
2137 case Intrinsic::riscv_vssseg4_mask:
2138 case Intrinsic::riscv_vssseg5_mask:
2139 case Intrinsic::riscv_vssseg6_mask:
2140 case Intrinsic::riscv_vssseg7_mask:
2141 case Intrinsic::riscv_vssseg8_mask: {
2146 case Intrinsic::riscv_vsoxseg2:
2147 case Intrinsic::riscv_vsoxseg3:
2148 case Intrinsic::riscv_vsoxseg4:
2149 case Intrinsic::riscv_vsoxseg5:
2150 case Intrinsic::riscv_vsoxseg6:
2151 case Intrinsic::riscv_vsoxseg7:
2152 case Intrinsic::riscv_vsoxseg8:
2156 case Intrinsic::riscv_vsuxseg2:
2157 case Intrinsic::riscv_vsuxseg3:
2158 case Intrinsic::riscv_vsuxseg4:
2159 case Intrinsic::riscv_vsuxseg5:
2160 case Intrinsic::riscv_vsuxseg6:
2161 case Intrinsic::riscv_vsuxseg7:
2162 case Intrinsic::riscv_vsuxseg8:
2166 case Intrinsic::riscv_vsoxseg2_mask:
2167 case Intrinsic::riscv_vsoxseg3_mask:
2168 case Intrinsic::riscv_vsoxseg4_mask:
2169 case Intrinsic::riscv_vsoxseg5_mask:
2170 case Intrinsic::riscv_vsoxseg6_mask:
2171 case Intrinsic::riscv_vsoxseg7_mask:
2172 case Intrinsic::riscv_vsoxseg8_mask:
2176 case Intrinsic::riscv_vsuxseg2_mask:
2177 case Intrinsic::riscv_vsuxseg3_mask:
2178 case Intrinsic::riscv_vsuxseg4_mask:
2179 case Intrinsic::riscv_vsuxseg5_mask:
2180 case Intrinsic::riscv_vsuxseg6_mask:
2181 case Intrinsic::riscv_vsuxseg7_mask:
2182 case Intrinsic::riscv_vsuxseg8_mask:
2186 case Intrinsic::riscv_vsoxei:
2187 case Intrinsic::riscv_vsoxei_mask:
2188 case Intrinsic::riscv_vsuxei:
2189 case Intrinsic::riscv_vsuxei_mask: {
2190 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2191 IntNo == Intrinsic::riscv_vsuxei_mask;
2192 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2193 IntNo == Intrinsic::riscv_vsoxei_mask;
2195 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2200 Operands.push_back(Node->getOperand(CurOp++));
2208 "Element count mismatch");
2213 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2215 "values when XLEN=32");
2218 IsMasked, IsOrdered, IndexLog2EEW,
2219 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2223 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2229 case Intrinsic::riscv_vsm:
2230 case Intrinsic::riscv_vse:
2231 case Intrinsic::riscv_vse_mask:
2232 case Intrinsic::riscv_vsse:
2233 case Intrinsic::riscv_vsse_mask: {
2234 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2235 IntNo == Intrinsic::riscv_vsse_mask;
2237 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2239 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2244 Operands.push_back(Node->getOperand(CurOp++));
2251 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2254 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2260 case Intrinsic::riscv_sf_vc_x_se:
2261 case Intrinsic::riscv_sf_vc_i_se:
2268 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2281 SDValue V = Node->getOperand(0);
2282 SDValue SubV = Node->getOperand(1);
2284 auto Idx = Node->getConstantOperandVal(2);
2288 MVT SubVecContainerVT = SubVecVT;
2291 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2293 [[maybe_unused]]
bool ExactlyVecRegSized =
2295 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2297 .getKnownMinValue()));
2298 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2300 MVT ContainerVT = VT;
2302 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2306 std::tie(SubRegIdx,
Idx) =
2308 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2317 [[maybe_unused]]
bool IsSubVecPartReg =
2321 assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg ||
2323 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2324 "the subvector is smaller than a full-sized register");
2328 if (SubRegIdx == RISCV::NoSubRegister) {
2329 unsigned InRegClassID =
2333 "Unexpected subvector extraction");
2347 SDValue V = Node->getOperand(0);
2348 auto Idx = Node->getConstantOperandVal(1);
2349 MVT InVT = V.getSimpleValueType();
2353 MVT SubVecContainerVT = VT;
2357 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2360 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2364 std::tie(SubRegIdx,
Idx) =
2366 InVT, SubVecContainerVT,
Idx,
TRI);
2376 if (SubRegIdx == RISCV::NoSubRegister) {
2380 "Unexpected subvector extraction");
2399 if (!Node->getOperand(0).isUndef())
2401 SDValue Src = Node->getOperand(1);
2402 auto *Ld = dyn_cast<LoadSDNode>(Src);
2405 if (!Ld || Ld->isIndexed())
2407 EVT MemVT = Ld->getMemoryVT();
2433 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2443 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2447 false, IsStrided,
false,
2448 Log2SEW,
static_cast<unsigned>(LMUL));
2460 unsigned Locality = Node->getConstantOperandVal(3);
2464 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2468 int NontemporalLevel = 0;
2471 NontemporalLevel = 3;
2474 NontemporalLevel = 1;
2477 NontemporalLevel = 0;
2483 if (NontemporalLevel & 0b1)
2485 if (NontemporalLevel & 0b10)
2497 std::vector<SDValue> &OutOps) {
2500 switch (ConstraintID) {
2505 assert(Found &&
"SelectAddrRegImm should always succeed");
2506 OutOps.push_back(Op0);
2507 OutOps.push_back(Op1);
2511 OutOps.push_back(
Op);
2525 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2543 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2544 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2545 if (isInt<12>(CVal)) {
2561 bool IsPrefetch =
false,
2562 bool IsRV32Zdinx =
false) {
2563 if (!isa<ConstantSDNode>(
Addr))
2566 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2571 int64_t Lo12 = SignExtend64<12>(CVal);
2573 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2574 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2576 if (IsRV32Zdinx && !isInt<12>(Lo12 + 4))
2580 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2597 if (Seq.
back().getOpcode() != RISCV::ADDI)
2599 Lo12 = Seq.
back().getImm();
2600 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2602 if (IsRV32Zdinx && !isInt<12>(Lo12 + 4))
2607 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2617 for (
auto *
User :
Add->users()) {
2622 EVT VT = cast<MemSDNode>(
User)->getMemoryVT();
2628 cast<StoreSDNode>(
User)->getValue() ==
Add)
2631 cast<AtomicSDNode>(
User)->getVal() ==
Add)
2639 unsigned MaxShiftAmount,
2642 EVT VT =
Addr.getSimpleValueType();
2643 auto UnwrapShl = [
this, VT, MaxShiftAmount](
SDValue N,
SDValue &Index,
2648 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2650 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2651 Index =
N.getOperand(0);
2652 ShiftAmt =
N.getConstantOperandVal(1);
2657 return ShiftAmt != 0;
2661 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2664 UnwrapShl(AddrB.
getOperand(0), Index, Scale) &&
2666 isInt<12>(C1->getSExtValue())) {
2675 }
else if (UnwrapShl(
Addr.getOperand(0), Index, Scale)) {
2679 UnwrapShl(
Addr.getOperand(1), Index, Scale);
2683 }
else if (UnwrapShl(
Addr, Index, Scale)) {
2698 MVT VT =
Addr.getSimpleValueType();
2710 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Addr.getOperand(1))) {
2713 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2714 if (Alignment > 4) {
2720 if (
auto *CP = dyn_cast<ConstantPoolSDNode>(
Addr.getOperand(1))) {
2722 if (Alignment > 4) {
2730 int64_t RV32ZdinxRange = IsRV32Zdinx ? 4 : 0;
2732 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2733 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2737 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2745 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2746 if ((CVal == 0 || Alignment > CVal) &&
2748 int64_t CombinedOffset = CVal + GA->getOffset();
2752 CombinedOffset, GA->getTargetFlags());
2758 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2766 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2767 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2768 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2769 "simm12 not already handled?");
2774 if (CVal >= -4096 && CVal <= (4094 - RV32ZdinxRange)) {
2775 int64_t Adj = CVal < 0 ? -2048 : 2047;
2792 Offset,
false, RV32ZdinxRange)) {
2802 false, RV32ZdinxRange))
2818 MVT VT =
Addr.getSimpleValueType();
2821 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2822 if (isInt<12>(CVal)) {
2826 if ((CVal & 0b11111) != 0) {
2832 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2840 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2841 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2842 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2843 "simm12 not already handled?");
2847 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2848 int64_t Adj = CVal < 0 ? -2048 : 2016;
2849 int64_t AdjustedOffset = CVal - Adj;
2852 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2883 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
2928 if (Imm != 0 && Imm % ShiftWidth == 0) {
2937 if (Imm != 0 && Imm % ShiftWidth == 0) {
2941 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2949 if (Imm % ShiftWidth == ShiftWidth - 1) {
2971 "Unexpected condition code!");
2978 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2979 if (CCVal != ExpectedCCVal)
2985 if (!
LHS.getValueType().isScalarInteger())
2996 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2997 int64_t CVal =
C->getSExtValue();
3000 if (CVal == -2048) {
3003 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
3010 if (isInt<12>(CVal) || CVal == 2048) {
3013 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
3021 RISCV::BINVI,
DL,
N->getValueType(0),
LHS,
3037 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
3038 Val =
N.getOperand(0);
3042 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
3043 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
3048 N.getConstantOperandVal(1) == ShiftAmt &&
3055 MVT VT =
N.getSimpleValueType();
3066 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3067 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
3068 Val =
N.getOperand(0);
3072 MVT VT =
N.getSimpleValueType();
3087 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
3093 uint64_t Mask =
N.getConstantOperandVal(1);
3096 unsigned XLen = Subtarget->
getXLen();
3098 Mask &= maskTrailingZeros<uint64_t>(C2);
3100 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
3108 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
3110 EVT VT =
N.getValueType();
3120 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
3122 EVT VT =
N.getValueType();
3133 uint64_t Mask =
N.getConstantOperandVal(1);
3141 unsigned XLen = Subtarget->
getXLen();
3144 if (C2 > Leading && Leading > 0 && Trailing == ShAmt) {
3146 EVT VT =
N.getValueType();
3152 RISCV::SRLI,
DL, VT, Val,
3159 }
else if (
bool LeftShift =
N.getOpcode() ==
ISD::SHL;
3160 (LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
3161 isa<ConstantSDNode>(
N.getOperand(1))) {
3167 unsigned C1 =
N.getConstantOperandVal(1);
3168 unsigned XLen = Subtarget->
getXLen();
3173 if (LeftShift && Leading == 32 && Trailing > 0 &&
3174 (Trailing + C1) == ShAmt) {
3176 EVT VT =
N.getValueType();
3185 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3186 (Trailing - C1) == ShAmt) {
3188 EVT VT =
N.getValueType();
3207 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3212 uint64_t Mask =
N.getConstantOperandVal(1);
3215 Mask &= maskTrailingZeros<uint64_t>(C2);
3223 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3225 EVT VT =
N.getValueType();
3240 if (!isa<ConstantSDNode>(
N))
3243 int64_t Imm = cast<ConstantSDNode>(
N)->getSExtValue();
3244 if ((Imm & 0xfff) != 0xfff || Imm == -1)
3247 for (
const SDNode *U :
N->users()) {
3255 if (!isInt<32>(Imm)) {
3260 if (OrigImmCost <= NegImmCost)
3282 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
3284 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3288 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3290 if (UserOpNo == VLIdx)
3293 auto NumDemandedBits =
3295 return NumDemandedBits && Bits >= *NumDemandedBits;
3308 const unsigned Depth)
const {
3314 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3315 "Unexpected opcode");
3322 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3328 if (!
User->isMachineOpcode())
3332 switch (
User->getMachineOpcode()) {
3357 case RISCV::SLLI_UW:
3358 case RISCV::FMV_W_X:
3359 case RISCV::FCVT_H_W:
3360 case RISCV::FCVT_H_W_INX:
3361 case RISCV::FCVT_H_WU:
3362 case RISCV::FCVT_H_WU_INX:
3363 case RISCV::FCVT_S_W:
3364 case RISCV::FCVT_S_W_INX:
3365 case RISCV::FCVT_S_WU:
3366 case RISCV::FCVT_S_WU_INX:
3367 case RISCV::FCVT_D_W:
3368 case RISCV::FCVT_D_W_INX:
3369 case RISCV::FCVT_D_WU:
3370 case RISCV::FCVT_D_WU_INX:
3371 case RISCV::TH_REVW:
3372 case RISCV::TH_SRRIW:
3390 if (Bits >= Subtarget->
getXLen() -
User->getConstantOperandVal(1))
3399 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3418 unsigned ShAmt =
User->getConstantOperandVal(1);
3432 case RISCV::FMV_H_X:
3433 case RISCV::ZEXT_H_RV32:
3434 case RISCV::ZEXT_H_RV64:
3440 if (Bits >= (Subtarget->
getXLen() / 2))
3444 case RISCV::SH1ADD_UW:
3445 case RISCV::SH2ADD_UW:
3446 case RISCV::SH3ADD_UW:
3473 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3474 int64_t
Offset =
C->getSExtValue();
3476 for (Shift = 0; Shift < 4; Shift++)
3477 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3484 EVT Ty =
N->getValueType(0);
3496 auto *
C = dyn_cast<ConstantSDNode>(
N);
3497 if (
C && isUInt<5>(
C->getZExtValue())) {
3499 N->getValueType(0));
3500 }
else if (
C &&
C->isAllOnes()) {
3503 N->getValueType(0));
3504 }
else if (isa<RegisterSDNode>(
N) &&
3505 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3511 N->getValueType(0));
3521 if (!
N.getOperand(0).isUndef())
3523 N =
N.getOperand(1);
3528 !
Splat.getOperand(0).isUndef())
3530 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3539 SplatVal =
Splat.getOperand(1);
3546 std::function<
bool(int64_t)> ValidateImm) {
3548 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3551 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3553 "Unexpected splat operand type");
3562 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3566 if (!ValidateImm(SplatImm))
3576 [](int64_t Imm) {
return isInt<5>(Imm); });
3581 N, SplatVal, *
CurDAG, *Subtarget,
3582 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3588 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3589 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3596 N, SplatVal, *
CurDAG, *Subtarget,
3597 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3601 auto IsExtOrTrunc = [](
SDValue N) {
3602 switch (
N->getOpcode()) {
3617 while (IsExtOrTrunc(
N)) {
3618 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3620 N =
N->getOperand(0);
3629 N.getOperand(0).getValueType() == Subtarget->
getXLenVT()) {
3630 Imm =
N.getOperand(0);
3636 Imm =
N.getOperand(0);
3652 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3664 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3667 if (!isInt<5>(ImmVal))
3680bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3682 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3704 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3705 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3706 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3707 case RISCV::MUL: Opc = RISCV::MULW;
break;
3708 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3716 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3731 case RISCV::TH_MULAW:
3732 case RISCV::TH_MULAH:
3733 case RISCV::TH_MULSW:
3734 case RISCV::TH_MULSH:
3752 if (!isa<RegisterSDNode>(MaskOp) ||
3753 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3757 const auto *Glued = GlueOp.
getNode();
3763 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3764 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3784 const auto IsVMSet = [](
unsigned Opc) {
3785 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3786 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3787 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3788 Opc == RISCV::PseudoVMSET_M_B8;
3801 N->getOperand(
N->getNumOperands() - 1));
3805 if (!V.isMachineOpcode())
3807 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3808 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3813 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3818 case RISCV::VCPOP_M:
3819 case RISCV::VFIRST_M:
3831 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3835 unsigned MaskOpIdx =
I->MaskOpIdx;
3841 const unsigned Opc =
I->UnmaskedPseudo;
3848 "Masked and unmasked pseudos are inconsistent");
3850 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3855 bool ShouldSkip = !UseTUPseudo && !
hasGPROut(Opc);
3856 for (
unsigned I = ShouldSkip, E =
N->getNumOperands();
I != E;
I++) {
3859 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3865 const auto *Glued =
N->getGluedNode();
3866 if (
auto *TGlued = Glued->getGluedNode())
3872 if (!
N->memoperands_empty())
3875 Result->setFlags(
N->getFlags());
3903bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3906 Passthru =
N->getOperand(0);
3907 False =
N->getOperand(1);
3908 True =
N->getOperand(2);
3909 Mask =
N->getOperand(3);
3910 VL =
N->getOperand(4);
3912 Glue =
N->getOperand(
N->getNumOperands() - 1);
3913 assert(cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3926 "Expect True is the first output of an instruction.");
3941 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3949 if (False != PassthruOpTrue)
3981 unsigned TrueVLIndex =
3982 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3993 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3994 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3997 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
4003 VL = GetMinVL(TrueVL, VL);
4024 unsigned MaskedOpc =
Info->MaskedPseudo;
4028 "Expected instructions with mask have policy operand.");
4031 "Expected instructions with mask have a tied dest.");
4041 bool MergeVLShrunk = VL != OrigVL;
4053 const unsigned NormalOpsEnd = TrueVLIndex - HasRoundingMode;
4062 if (HasRoundingMode)
4065 Ops.
append({VL, SEW, PolicyOp});
4078 if (!cast<MachineSDNode>(True)->memoperands_empty())
4091bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
4092 bool MadeChange =
false;
4097 if (
N->use_empty() || !
N->isMachineOpcode())
4101 MadeChange |= performCombineVMergeAndVOps(
N);
4111bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4112 bool MadeChange =
false;
4117 if (
N->use_empty() || !
N->isMachineOpcode())
4120 const unsigned Opc =
N->getMachineOpcode();
4121 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
4128 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
4135 Result->setFlags(
N->getFlags());
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false, bool IsRV32Zdinx=false)
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static unsigned getSegInstNF(unsigned Intrinsic)
static bool hasGPROut(unsigned Opc)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
#define INST_ALL_NF_CASE_WITH_FF(NAME)
#define CASE_VMSLT_OPCODES(lmulenum, suffix)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
static SDValue findVSplat(SDValue N)
static SDValue getMaskSetter(SDValue MaskOp, SDValue GlueOp)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
#define INST_ALL_NF_CASE(NAME)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsRV32Zdinx=false)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset should be all zeros.
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
bool selectInvLogicImm(SDValue N, SDValue &Val)
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool selectScalarFPAsInt(SDValue N, SDValue &Imm)
bool hasAllBUsers(SDNode *Node) const
void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getRegister(Register Reg, EVT VT)
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool elementsDependOnMask(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool elementsDependOnVL(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SplitF64
Turns a f64 into a pair of i32s.
@ BuildPairF64
Turns a pair of i32s into an f64.
@ BuildGPRPair
Turn a pair of i<xlen>s into an even-odd register pair (untyped).
@ SPLAT_VECTOR_SPLIT_I64_VL
@ SplitGPRPair
Turn an even-odd register pair (untyped) into a pair of i<xlen>s.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.