20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#include "RISCVGenSearchableTables.inc"
52 bool MadeChange =
false;
59 switch (
N->getOpcode()) {
63 MVT VT =
N->getSimpleValueType(0);
79 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
80 MVT VT =
N->getSimpleValueType(0);
86 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
94 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
118 MVT::i64, MPI,
Align(8),
125 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
144 bool MadeChange =
false;
148 if (
N->use_empty() || !
N->isMachineOpcode())
151 MadeChange |= doPeepholeSExtW(
N);
156 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
161 MadeChange |= doPeepholeMergeVVMFold();
169 MadeChange |= doPeepholeNoRegPassThru();
181 switch (Inst.getOpndKind()) {
220 if (Seq.
size() > 3) {
221 unsigned ShiftAmt, AddOpc;
241 static const unsigned M1TupleRegClassIDs[] = {
242 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
243 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
244 RISCV::VRN8M1RegClassID};
245 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
246 RISCV::VRN3M2RegClassID,
247 RISCV::VRN4M2RegClassID};
260 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
261 "Unexpected subreg numbering");
262 SubReg0 = RISCV::sub_vrm1_0;
263 RegClassID = M1TupleRegClassIDs[NF - 2];
266 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
267 "Unexpected subreg numbering");
268 SubReg0 = RISCV::sub_vrm2_0;
269 RegClassID = M2TupleRegClassIDs[NF - 2];
272 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
273 "Unexpected subreg numbering");
274 SubReg0 = RISCV::sub_vrm4_0;
275 RegClassID = RISCV::VRN2M4RegClassID;
284 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
294 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
296 bool IsLoad,
MVT *IndexVT) {
297 SDValue Chain = Node->getOperand(0);
300 Operands.push_back(Node->getOperand(CurOp++));
302 if (IsStridedOrIndexed) {
303 Operands.push_back(Node->getOperand(CurOp++));
305 *IndexVT =
Operands.back()->getSimpleValueType(0);
310 SDValue Mask = Node->getOperand(CurOp++);
329 Policy = Node->getConstantOperandVal(CurOp++);
342 unsigned NF = Node->getNumValues() - 1;
343 MVT VT = Node->getSimpleValueType(0);
351 Node->op_begin() + CurOp + NF);
360 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
361 static_cast<unsigned>(LMUL));
365 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
369 for (
unsigned I = 0;
I < NF; ++
I) {
381 unsigned NF = Node->getNumValues() - 2;
382 MVT VT = Node->getSimpleValueType(0);
391 Node->op_begin() + CurOp + NF);
401 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
402 Log2SEW,
static_cast<unsigned>(LMUL));
406 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
410 for (
unsigned I = 0;
I < NF; ++
I) {
424 unsigned NF = Node->getNumValues() - 1;
425 MVT VT = Node->getSimpleValueType(0);
433 Node->op_begin() + CurOp + NF);
444 "Element count mismatch");
448 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
450 "values when XLEN=32");
453 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
454 static_cast<unsigned>(IndexLMUL));
458 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
462 for (
unsigned I = 0;
I < NF; ++
I) {
475 unsigned NF = Node->getNumOperands() - 4;
480 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
488 unsigned CurOp = 2 + NF;
494 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
498 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
507 unsigned NF = Node->getNumOperands() - 5;
510 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
518 unsigned CurOp = 2 + NF;
526 "Element count mismatch");
530 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
532 "values when XLEN=32");
535 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
536 static_cast<unsigned>(IndexLMUL));
540 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
555 unsigned IntNo = Node->getConstantOperandVal(0);
557 assert((IntNo == Intrinsic::riscv_vsetvli ||
558 IntNo == Intrinsic::riscv_vsetvlimax) &&
559 "Unexpected vsetvli intrinsic");
561 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
562 unsigned Offset = (VLMax ? 1 : 2);
565 "Unexpected number of operands");
570 Node->getConstantOperandVal(
Offset + 1) & 0x7);
577 unsigned Opcode = RISCV::PseudoVSETVLI;
578 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
585 Opcode = RISCV::PseudoVSETVLIX0;
587 VLOperand = Node->getOperand(1);
589 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
591 if (isUInt<5>(AVL)) {
594 XLenVT, VLImm, VTypeIOp));
605 MVT VT = Node->getSimpleValueType(0);
606 unsigned Opcode = Node->getOpcode();
608 "Unexpected opcode");
613 SDValue N0 = Node->getOperand(0);
614 SDValue N1 = Node->getOperand(1);
631 bool SignExt =
false;
649 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
650 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
653 int64_t ShiftedVal = Val >> ShAmt;
654 if (!isInt<12>(ShiftedVal))
658 if (SignExt && ShAmt >= 32)
665 case ISD::AND: BinOpc = RISCV::ANDI;
break;
666 case ISD::OR: BinOpc = RISCV::ORI;
break;
667 case ISD::XOR: BinOpc = RISCV::XORI;
break;
670 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
684 if (!Subtarget->hasVendorXTHeadBb())
687 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
691 SDValue N0 = Node->getOperand(0);
695 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
703 MVT VT = Node->getSimpleValueType(0);
704 const unsigned RightShAmt = N1C->getZExtValue();
709 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
713 const unsigned LeftShAmt = N01C->getZExtValue();
716 if (LeftShAmt > RightShAmt)
720 const unsigned Msb = MsbPlusOne - 1;
721 const unsigned Lsb = RightShAmt - LeftShAmt;
723 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
732 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
738 const unsigned Msb = ExtSize - 1;
739 const unsigned Lsb = RightShAmt;
741 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
751 if (!Subtarget->hasVendorXTHeadMemIdx())
765 "Unexpected addressing mode");
768 int64_t
Offset =
C->getSExtValue();
773 for (Shift = 0; Shift < 4; Shift++)
774 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
783 if (LoadVT == MVT::i8 && IsPre)
784 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
785 else if (LoadVT == MVT::i8 && IsPost)
786 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
787 else if (LoadVT == MVT::i16 && IsPre)
788 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
789 else if (LoadVT == MVT::i16 && IsPost)
790 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
791 else if (LoadVT == MVT::i32 && IsPre)
792 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
793 else if (LoadVT == MVT::i32 && IsPost)
794 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
795 else if (LoadVT == MVT::i64 && IsPre)
796 Opcode = RISCV::TH_LDIB;
797 else if (LoadVT == MVT::i64 && IsPost)
798 Opcode = RISCV::TH_LDIA;
825 unsigned IntNo = Node->getConstantOperandVal(1);
827 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
828 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
829 "Unexpected vsetvli intrinsic");
832 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
836 Node->getOperand(4), Node->getOperand(5),
837 Node->getOperand(8), SEWOp,
838 Node->getOperand(0)};
841 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
842 switch (LMulSDNode->getSExtValue()) {
844 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
845 : RISCV::PseudoVC_I_SE_MF8;
848 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
849 : RISCV::PseudoVC_I_SE_MF4;
852 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
853 : RISCV::PseudoVC_I_SE_MF2;
856 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
857 : RISCV::PseudoVC_I_SE_M1;
860 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
861 : RISCV::PseudoVC_I_SE_M2;
864 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
865 : RISCV::PseudoVC_I_SE_M4;
868 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
869 : RISCV::PseudoVC_I_SE_M8;
874 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
879 if (Node->isMachineOpcode()) {
887 unsigned Opcode = Node->getOpcode();
890 MVT VT = Node->getSimpleValueType(0);
892 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
896 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
897 auto *ConstNode = cast<ConstantSDNode>(Node);
898 if (ConstNode->isZero()) {
904 int64_t Imm = ConstNode->getSExtValue();
908 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
909 Imm = SignExtend64<8>(Imm);
912 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
914 Imm = SignExtend64<16>(Imm);
917 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
918 Imm = SignExtend64<32>(Imm);
924 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
925 auto [FPImm, NeedsFNeg] =
936 FNegOpc = RISCV::FSGNJN_H;
940 FNegOpc = RISCV::FSGNJN_S;
944 FNegOpc = RISCV::FSGNJN_D;
957 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
967 bool HasZdinx = Subtarget->hasStdExtZdinx();
968 bool Is64Bit = Subtarget->
is64Bit();
974 assert(Subtarget->hasStdExtZfbfmin());
975 Opc = RISCV::FMV_H_X;
978 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
981 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
988 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
990 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
995 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1004 Opc = RISCV::FSGNJN_D;
1006 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1015 if (!Subtarget->hasStdExtZdinx())
1022 Node->getOperand(0),
1024 Node->getOperand(1),
1033 if (Subtarget->hasStdExtZdinx()) {
1036 if (!
SDValue(Node, 0).use_empty()) {
1038 Node->getOperand(0));
1042 if (!
SDValue(Node, 1).use_empty()) {
1044 Node->getOperand(0));
1052 if (!Subtarget->hasStdExtZfa())
1055 "Unexpected subtarget");
1058 if (!
SDValue(Node, 0).use_empty()) {
1060 Node->getOperand(0));
1063 if (!
SDValue(Node, 1).use_empty()) {
1065 Node->getOperand(0));
1073 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1076 SDValue N0 = Node->getOperand(0);
1080 unsigned ShAmt = N1C->getZExtValue();
1086 unsigned XLen = Subtarget->
getXLen();
1089 if (TrailingZeros > 0 && LeadingZeros == 32) {
1103 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1106 SDValue N0 = Node->getOperand(0);
1109 unsigned ShAmt = N1C->getZExtValue();
1115 unsigned XLen = Subtarget->
getXLen();
1118 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1137 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1141 if (ShAmt >= TrailingOnes)
1144 if (TrailingOnes == 32) {
1146 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1157 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1159 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1165 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1187 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1190 SDValue N0 = Node->getOperand(0);
1193 unsigned ShAmt = N1C->getZExtValue();
1195 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1197 if (ExtSize >= 32 || ShAmt >= ExtSize)
1199 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1216 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1221 const bool isC1ANDI = isInt<12>(C1);
1223 SDValue N0 = Node->getOperand(0);
1228 if (!Subtarget->hasVendorXTHeadBb())
1240 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1243 unsigned C2 =
C->getZExtValue();
1244 unsigned XLen = Subtarget->
getXLen();
1245 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1253 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1257 C1 &= maskTrailingZeros<uint64_t>(C2);
1259 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1263 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1269 if (!LeftShift && isC1Mask) {
1273 if (C2 + 32 == Leading) {
1285 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1287 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1292 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1306 const unsigned Lsb = C2;
1307 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1312 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1314 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1316 Skip |= HasBitTest && Leading == XLen - 1;
1317 if (OneUseOrZExtW && !Skip) {
1319 RISCV::SLLI,
DL, VT,
X,
1335 if (C2 + Leading < XLen &&
1336 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1338 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1347 if (OneUseOrZExtW && !IsCANDI) {
1349 RISCV::SLLI,
DL, VT,
X,
1365 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1367 unsigned SrliOpc = RISCV::SRLI;
1370 isa<ConstantSDNode>(
X.getOperand(1)) &&
1371 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1372 SrliOpc = RISCV::SRLIW;
1373 X =
X.getOperand(0);
1385 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1386 OneUseOrZExtW && !IsCANDI) {
1388 RISCV::SRLIW,
DL, VT,
X,
1403 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1405 RISCV::SRLI,
DL, VT,
X,
1414 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1416 RISCV::SRLIW,
DL, VT,
X,
1426 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1427 Subtarget->hasStdExtZba()) {
1429 RISCV::SRLI,
DL, VT,
X,
1432 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1445 if (isC1Mask && !isC1ANDI) {
1447 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1464 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1465 if (!N1C || !N1C->hasOneUse())
1469 SDValue N0 = Node->getOperand(0);
1486 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1488 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1489 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1493 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1495 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1496 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1502 unsigned XLen = Subtarget->
getXLen();
1508 unsigned ConstantShift = XLen - LeadingZeros;
1512 uint64_t ShiftedC1 = C1 << ConstantShift;
1515 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1531 if (Subtarget->hasVendorXCVmem()) {
1537 SDValue Chain = Node->getOperand(0);
1541 bool Simm12 =
false;
1542 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1544 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1545 int ConstantVal = ConstantOffset->getSExtValue();
1546 Simm12 = isInt<12>(ConstantVal);
1552 unsigned Opcode = 0;
1553 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1555 if (Simm12 && SignExtend)
1556 Opcode = RISCV::CV_LB_ri_inc;
1557 else if (Simm12 && !SignExtend)
1558 Opcode = RISCV::CV_LBU_ri_inc;
1559 else if (!Simm12 && SignExtend)
1560 Opcode = RISCV::CV_LB_rr_inc;
1562 Opcode = RISCV::CV_LBU_rr_inc;
1565 if (Simm12 && SignExtend)
1566 Opcode = RISCV::CV_LH_ri_inc;
1567 else if (Simm12 && !SignExtend)
1568 Opcode = RISCV::CV_LHU_ri_inc;
1569 else if (!Simm12 && SignExtend)
1570 Opcode = RISCV::CV_LH_rr_inc;
1572 Opcode = RISCV::CV_LHU_rr_inc;
1576 Opcode = RISCV::CV_LW_ri_inc;
1578 Opcode = RISCV::CV_LW_rr_inc;
1594 unsigned IntNo = Node->getConstantOperandVal(0);
1599 case Intrinsic::riscv_vmsgeu:
1600 case Intrinsic::riscv_vmsge: {
1601 SDValue Src1 = Node->getOperand(1);
1602 SDValue Src2 = Node->getOperand(2);
1603 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1604 bool IsCmpUnsignedZero =
false;
1609 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1610 int64_t CVal =
C->getSExtValue();
1611 if (CVal >= -15 && CVal <= 16) {
1612 if (!IsUnsigned || CVal != 0)
1614 IsCmpUnsignedZero =
true;
1618 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1622#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1623 case RISCVII::VLMUL::lmulenum: \
1624 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1625 : RISCV::PseudoVMSLT_VX_##suffix; \
1626 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1627 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1636#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1644 if (IsCmpUnsignedZero) {
1655 {Cmp, Cmp, VL, SEW}));
1658 case Intrinsic::riscv_vmsgeu_mask:
1659 case Intrinsic::riscv_vmsge_mask: {
1660 SDValue Src1 = Node->getOperand(2);
1661 SDValue Src2 = Node->getOperand(3);
1662 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1663 bool IsCmpUnsignedZero =
false;
1668 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1669 int64_t CVal =
C->getSExtValue();
1670 if (CVal >= -15 && CVal <= 16) {
1671 if (!IsUnsigned || CVal != 0)
1673 IsCmpUnsignedZero =
true;
1677 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1682#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1683 case RISCVII::VLMUL::lmulenum: \
1684 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1685 : RISCV::PseudoVMSLT_VX_##suffix; \
1686 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1687 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1696#undef CASE_VMSLT_OPCODES
1702#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1703 case RISCVII::VLMUL::lmulenum: \
1704 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1705 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1706 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1715#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1722 SDValue MaskedOff = Node->getOperand(1);
1723 SDValue Mask = Node->getOperand(4);
1726 if (IsCmpUnsignedZero) {
1729 if (Mask == MaskedOff) {
1735 {Mask, MaskedOff, VL, MaskSEW}));
1742 if (Mask == MaskedOff) {
1747 {Mask, Cmp, VL, MaskSEW}));
1764 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1768 {Cmp, Mask, VL, MaskSEW}));
1771 case Intrinsic::riscv_vsetvli:
1772 case Intrinsic::riscv_vsetvlimax:
1778 unsigned IntNo = Node->getConstantOperandVal(1);
1783 case Intrinsic::riscv_vlseg2:
1784 case Intrinsic::riscv_vlseg3:
1785 case Intrinsic::riscv_vlseg4:
1786 case Intrinsic::riscv_vlseg5:
1787 case Intrinsic::riscv_vlseg6:
1788 case Intrinsic::riscv_vlseg7:
1789 case Intrinsic::riscv_vlseg8: {
1793 case Intrinsic::riscv_vlseg2_mask:
1794 case Intrinsic::riscv_vlseg3_mask:
1795 case Intrinsic::riscv_vlseg4_mask:
1796 case Intrinsic::riscv_vlseg5_mask:
1797 case Intrinsic::riscv_vlseg6_mask:
1798 case Intrinsic::riscv_vlseg7_mask:
1799 case Intrinsic::riscv_vlseg8_mask: {
1803 case Intrinsic::riscv_vlsseg2:
1804 case Intrinsic::riscv_vlsseg3:
1805 case Intrinsic::riscv_vlsseg4:
1806 case Intrinsic::riscv_vlsseg5:
1807 case Intrinsic::riscv_vlsseg6:
1808 case Intrinsic::riscv_vlsseg7:
1809 case Intrinsic::riscv_vlsseg8: {
1813 case Intrinsic::riscv_vlsseg2_mask:
1814 case Intrinsic::riscv_vlsseg3_mask:
1815 case Intrinsic::riscv_vlsseg4_mask:
1816 case Intrinsic::riscv_vlsseg5_mask:
1817 case Intrinsic::riscv_vlsseg6_mask:
1818 case Intrinsic::riscv_vlsseg7_mask:
1819 case Intrinsic::riscv_vlsseg8_mask: {
1823 case Intrinsic::riscv_vloxseg2:
1824 case Intrinsic::riscv_vloxseg3:
1825 case Intrinsic::riscv_vloxseg4:
1826 case Intrinsic::riscv_vloxseg5:
1827 case Intrinsic::riscv_vloxseg6:
1828 case Intrinsic::riscv_vloxseg7:
1829 case Intrinsic::riscv_vloxseg8:
1832 case Intrinsic::riscv_vluxseg2:
1833 case Intrinsic::riscv_vluxseg3:
1834 case Intrinsic::riscv_vluxseg4:
1835 case Intrinsic::riscv_vluxseg5:
1836 case Intrinsic::riscv_vluxseg6:
1837 case Intrinsic::riscv_vluxseg7:
1838 case Intrinsic::riscv_vluxseg8:
1841 case Intrinsic::riscv_vloxseg2_mask:
1842 case Intrinsic::riscv_vloxseg3_mask:
1843 case Intrinsic::riscv_vloxseg4_mask:
1844 case Intrinsic::riscv_vloxseg5_mask:
1845 case Intrinsic::riscv_vloxseg6_mask:
1846 case Intrinsic::riscv_vloxseg7_mask:
1847 case Intrinsic::riscv_vloxseg8_mask:
1850 case Intrinsic::riscv_vluxseg2_mask:
1851 case Intrinsic::riscv_vluxseg3_mask:
1852 case Intrinsic::riscv_vluxseg4_mask:
1853 case Intrinsic::riscv_vluxseg5_mask:
1854 case Intrinsic::riscv_vluxseg6_mask:
1855 case Intrinsic::riscv_vluxseg7_mask:
1856 case Intrinsic::riscv_vluxseg8_mask:
1859 case Intrinsic::riscv_vlseg8ff:
1860 case Intrinsic::riscv_vlseg7ff:
1861 case Intrinsic::riscv_vlseg6ff:
1862 case Intrinsic::riscv_vlseg5ff:
1863 case Intrinsic::riscv_vlseg4ff:
1864 case Intrinsic::riscv_vlseg3ff:
1865 case Intrinsic::riscv_vlseg2ff: {
1869 case Intrinsic::riscv_vlseg8ff_mask:
1870 case Intrinsic::riscv_vlseg7ff_mask:
1871 case Intrinsic::riscv_vlseg6ff_mask:
1872 case Intrinsic::riscv_vlseg5ff_mask:
1873 case Intrinsic::riscv_vlseg4ff_mask:
1874 case Intrinsic::riscv_vlseg3ff_mask:
1875 case Intrinsic::riscv_vlseg2ff_mask: {
1879 case Intrinsic::riscv_vloxei:
1880 case Intrinsic::riscv_vloxei_mask:
1881 case Intrinsic::riscv_vluxei:
1882 case Intrinsic::riscv_vluxei_mask: {
1883 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1884 IntNo == Intrinsic::riscv_vluxei_mask;
1885 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1886 IntNo == Intrinsic::riscv_vloxei_mask;
1888 MVT VT = Node->getSimpleValueType(0);
1893 Operands.push_back(Node->getOperand(CurOp++));
1901 "Element count mismatch");
1906 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1908 "values when XLEN=32");
1911 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1912 static_cast<unsigned>(IndexLMUL));
1916 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1922 case Intrinsic::riscv_vlm:
1923 case Intrinsic::riscv_vle:
1924 case Intrinsic::riscv_vle_mask:
1925 case Intrinsic::riscv_vlse:
1926 case Intrinsic::riscv_vlse_mask: {
1927 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1928 IntNo == Intrinsic::riscv_vlse_mask;
1930 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1932 MVT VT = Node->getSimpleValueType(0);
1941 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1944 if (HasPassthruOperand)
1945 Operands.push_back(Node->getOperand(CurOp++));
1958 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
1959 static_cast<unsigned>(LMUL));
1963 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1969 case Intrinsic::riscv_vleff:
1970 case Intrinsic::riscv_vleff_mask: {
1971 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1973 MVT VT = Node->getSimpleValueType(0);
1978 Operands.push_back(Node->getOperand(CurOp++));
1985 RISCV::getVLEPseudo(IsMasked,
false,
true,
1986 Log2SEW,
static_cast<unsigned>(LMUL));
1989 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1999 unsigned IntNo = Node->getConstantOperandVal(1);
2001 case Intrinsic::riscv_vsseg2:
2002 case Intrinsic::riscv_vsseg3:
2003 case Intrinsic::riscv_vsseg4:
2004 case Intrinsic::riscv_vsseg5:
2005 case Intrinsic::riscv_vsseg6:
2006 case Intrinsic::riscv_vsseg7:
2007 case Intrinsic::riscv_vsseg8: {
2011 case Intrinsic::riscv_vsseg2_mask:
2012 case Intrinsic::riscv_vsseg3_mask:
2013 case Intrinsic::riscv_vsseg4_mask:
2014 case Intrinsic::riscv_vsseg5_mask:
2015 case Intrinsic::riscv_vsseg6_mask:
2016 case Intrinsic::riscv_vsseg7_mask:
2017 case Intrinsic::riscv_vsseg8_mask: {
2021 case Intrinsic::riscv_vssseg2:
2022 case Intrinsic::riscv_vssseg3:
2023 case Intrinsic::riscv_vssseg4:
2024 case Intrinsic::riscv_vssseg5:
2025 case Intrinsic::riscv_vssseg6:
2026 case Intrinsic::riscv_vssseg7:
2027 case Intrinsic::riscv_vssseg8: {
2031 case Intrinsic::riscv_vssseg2_mask:
2032 case Intrinsic::riscv_vssseg3_mask:
2033 case Intrinsic::riscv_vssseg4_mask:
2034 case Intrinsic::riscv_vssseg5_mask:
2035 case Intrinsic::riscv_vssseg6_mask:
2036 case Intrinsic::riscv_vssseg7_mask:
2037 case Intrinsic::riscv_vssseg8_mask: {
2041 case Intrinsic::riscv_vsoxseg2:
2042 case Intrinsic::riscv_vsoxseg3:
2043 case Intrinsic::riscv_vsoxseg4:
2044 case Intrinsic::riscv_vsoxseg5:
2045 case Intrinsic::riscv_vsoxseg6:
2046 case Intrinsic::riscv_vsoxseg7:
2047 case Intrinsic::riscv_vsoxseg8:
2050 case Intrinsic::riscv_vsuxseg2:
2051 case Intrinsic::riscv_vsuxseg3:
2052 case Intrinsic::riscv_vsuxseg4:
2053 case Intrinsic::riscv_vsuxseg5:
2054 case Intrinsic::riscv_vsuxseg6:
2055 case Intrinsic::riscv_vsuxseg7:
2056 case Intrinsic::riscv_vsuxseg8:
2059 case Intrinsic::riscv_vsoxseg2_mask:
2060 case Intrinsic::riscv_vsoxseg3_mask:
2061 case Intrinsic::riscv_vsoxseg4_mask:
2062 case Intrinsic::riscv_vsoxseg5_mask:
2063 case Intrinsic::riscv_vsoxseg6_mask:
2064 case Intrinsic::riscv_vsoxseg7_mask:
2065 case Intrinsic::riscv_vsoxseg8_mask:
2068 case Intrinsic::riscv_vsuxseg2_mask:
2069 case Intrinsic::riscv_vsuxseg3_mask:
2070 case Intrinsic::riscv_vsuxseg4_mask:
2071 case Intrinsic::riscv_vsuxseg5_mask:
2072 case Intrinsic::riscv_vsuxseg6_mask:
2073 case Intrinsic::riscv_vsuxseg7_mask:
2074 case Intrinsic::riscv_vsuxseg8_mask:
2077 case Intrinsic::riscv_vsoxei:
2078 case Intrinsic::riscv_vsoxei_mask:
2079 case Intrinsic::riscv_vsuxei:
2080 case Intrinsic::riscv_vsuxei_mask: {
2081 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2082 IntNo == Intrinsic::riscv_vsuxei_mask;
2083 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2084 IntNo == Intrinsic::riscv_vsoxei_mask;
2086 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2091 Operands.push_back(Node->getOperand(CurOp++));
2099 "Element count mismatch");
2104 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2106 "values when XLEN=32");
2109 IsMasked, IsOrdered, IndexLog2EEW,
2110 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2114 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2120 case Intrinsic::riscv_vsm:
2121 case Intrinsic::riscv_vse:
2122 case Intrinsic::riscv_vse_mask:
2123 case Intrinsic::riscv_vsse:
2124 case Intrinsic::riscv_vsse_mask: {
2125 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2126 IntNo == Intrinsic::riscv_vsse_mask;
2128 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2130 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2135 Operands.push_back(Node->getOperand(CurOp++));
2142 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2145 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2151 case Intrinsic::riscv_sf_vc_x_se:
2152 case Intrinsic::riscv_sf_vc_i_se:
2159 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2171 SDValue V = Node->getOperand(0);
2172 SDValue SubV = Node->getOperand(1);
2174 auto Idx = Node->getConstantOperandVal(2);
2178 MVT SubVecContainerVT = SubVecVT;
2181 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2183 [[maybe_unused]]
bool ExactlyVecRegSized =
2185 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2187 .getKnownMinValue()));
2188 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2190 MVT ContainerVT = VT;
2192 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2196 std::tie(SubRegIdx,
Idx) =
2198 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2207 [[maybe_unused]]
bool IsSubVecPartReg =
2211 assert((!IsSubVecPartReg || V.isUndef()) &&
2212 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2213 "the subvector is smaller than a full-sized register");
2217 if (SubRegIdx == RISCV::NoSubRegister) {
2218 unsigned InRegClassID =
2222 "Unexpected subvector extraction");
2235 SDValue V = Node->getOperand(0);
2236 auto Idx = Node->getConstantOperandVal(1);
2237 MVT InVT = V.getSimpleValueType();
2241 MVT SubVecContainerVT = VT;
2245 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2248 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2252 std::tie(SubRegIdx,
Idx) =
2254 InVT, SubVecContainerVT,
Idx,
TRI);
2264 if (SubRegIdx == RISCV::NoSubRegister) {
2268 "Unexpected subvector extraction");
2287 if (!Node->getOperand(0).isUndef())
2289 SDValue Src = Node->getOperand(1);
2290 auto *Ld = dyn_cast<LoadSDNode>(Src);
2293 if (!Ld || Ld->isIndexed())
2295 EVT MemVT = Ld->getMemoryVT();
2321 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2331 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2335 false, IsStrided,
false,
2336 Log2SEW,
static_cast<unsigned>(LMUL));
2348 unsigned Locality = Node->getConstantOperandVal(3);
2352 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2356 int NontemporalLevel = 0;
2359 NontemporalLevel = 3;
2362 NontemporalLevel = 1;
2365 NontemporalLevel = 0;
2371 if (NontemporalLevel & 0b1)
2373 if (NontemporalLevel & 0b10)
2385 std::vector<SDValue> &OutOps) {
2388 switch (ConstraintID) {
2393 assert(Found &&
"SelectAddrRegImm should always succeed");
2394 OutOps.push_back(Op0);
2395 OutOps.push_back(Op1);
2399 OutOps.push_back(
Op);
2413 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2431 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2432 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2433 if (isInt<12>(CVal)) {
2449 bool IsPrefetch =
false) {
2450 if (!isa<ConstantSDNode>(
Addr))
2453 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2458 int64_t Lo12 = SignExtend64<12>(CVal);
2460 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2461 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2465 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2482 if (Seq.
back().getOpcode() != RISCV::ADDI)
2484 Lo12 = Seq.
back().getImm();
2485 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2490 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2500 for (
auto *
Use :
Add->uses()) {
2505 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2511 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2514 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2522 unsigned MaxShiftAmount,
2525 EVT VT =
Addr.getSimpleValueType();
2531 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2533 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2535 ShiftAmt =
N.getConstantOperandVal(1);
2540 return ShiftAmt != 0;
2544 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2549 isInt<12>(C1->getSExtValue())) {
2558 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2562 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2566 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2581 MVT VT =
Addr.getSimpleValueType();
2589 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2591 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2592 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2596 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2604 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2605 if (CVal == 0 || Alignment > CVal) {
2606 int64_t CombinedOffset = CVal + GA->getOffset();
2610 CombinedOffset, GA->getTargetFlags());
2616 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2624 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2625 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2626 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2627 "simm12 not already handled?");
2632 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2633 int64_t Adj = CVal < 0 ? -2048 : 2047;
2675 MVT VT =
Addr.getSimpleValueType();
2678 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2679 if (isInt<12>(CVal)) {
2683 if ((CVal & 0b11111) != 0) {
2689 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2697 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2698 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2699 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2700 "simm12 not already handled?");
2704 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2705 int64_t Adj = CVal < 0 ? -2048 : 2016;
2706 int64_t AdjustedOffset = CVal - Adj;
2708 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2738 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
2783 if (Imm != 0 && Imm % ShiftWidth == 0) {
2792 if (Imm != 0 && Imm % ShiftWidth == 0) {
2796 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2804 if (Imm % ShiftWidth == ShiftWidth - 1) {
2826 "Unexpected condition code!");
2833 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2834 if (CCVal != ExpectedCCVal)
2840 if (!
LHS.getValueType().isScalarInteger())
2851 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2852 int64_t CVal =
C->getSExtValue();
2855 if (CVal == -2048) {
2858 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2865 if (isInt<12>(CVal) || CVal == 2048) {
2868 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2884 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2885 Val =
N.getOperand(0);
2889 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2890 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2895 N.getConstantOperandVal(1) == ShiftAmt &&
2902 MVT VT =
N.getSimpleValueType();
2913 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2914 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2915 Val =
N.getOperand(0);
2919 MVT VT =
N.getSimpleValueType();
2934 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2940 uint64_t Mask =
N.getConstantOperandVal(1);
2943 unsigned XLen = Subtarget->
getXLen();
2945 Mask &= maskTrailingZeros<uint64_t>(C2);
2947 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2955 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2957 EVT VT =
N.getValueType();
2967 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2969 EVT VT =
N.getValueType();
2981 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2982 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2983 isa<ConstantSDNode>(
N.getOperand(1))) {
2989 unsigned C1 =
N.getConstantOperandVal(1);
2990 unsigned XLen = Subtarget->
getXLen();
2995 if (LeftShift && Leading == 32 && Trailing > 0 &&
2996 (Trailing + C1) == ShAmt) {
2998 EVT VT =
N.getValueType();
3007 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3008 (Trailing - C1) == ShAmt) {
3010 EVT VT =
N.getValueType();
3029 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3034 uint64_t Mask =
N.getConstantOperandVal(1);
3037 Mask &= maskTrailingZeros<uint64_t>(C2);
3045 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3047 EVT VT =
N.getValueType();
3075 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
3077 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3081 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3083 if (UserOpNo == VLIdx)
3086 auto NumDemandedBits =
3088 return NumDemandedBits && Bits >= *NumDemandedBits;
3101 const unsigned Depth)
const {
3107 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3108 "Unexpected opcode");
3115 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3118 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3121 if (!
User->isMachineOpcode())
3125 switch (
User->getMachineOpcode()) {
3150 case RISCV::SLLI_UW:
3151 case RISCV::FMV_W_X:
3152 case RISCV::FCVT_H_W:
3153 case RISCV::FCVT_H_WU:
3154 case RISCV::FCVT_S_W:
3155 case RISCV::FCVT_S_WU:
3156 case RISCV::FCVT_D_W:
3157 case RISCV::FCVT_D_WU:
3158 case RISCV::TH_REVW:
3159 case RISCV::TH_SRRIW:
3172 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
3177 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
3186 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3205 unsigned ShAmt =
User->getConstantOperandVal(1);
3219 case RISCV::FMV_H_X:
3220 case RISCV::ZEXT_H_RV32:
3221 case RISCV::ZEXT_H_RV64:
3227 if (Bits < (Subtarget->
getXLen() / 2))
3231 case RISCV::SH1ADD_UW:
3232 case RISCV::SH2ADD_UW:
3233 case RISCV::SH3ADD_UW:
3236 if (UI.getOperandNo() != 0 || Bits < 32)
3240 if (UI.getOperandNo() != 0 || Bits < 8)
3244 if (UI.getOperandNo() != 0 || Bits < 16)
3248 if (UI.getOperandNo() != 0 || Bits < 32)
3260 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3261 int64_t
Offset =
C->getSExtValue();
3263 for (Shift = 0; Shift < 4; Shift++)
3264 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3271 EVT Ty =
N->getValueType(0);
3283 auto *
C = dyn_cast<ConstantSDNode>(
N);
3284 if (
C && isUInt<5>(
C->getZExtValue())) {
3286 N->getValueType(0));
3287 }
else if (
C &&
C->isAllOnes()) {
3290 N->getValueType(0));
3291 }
else if (isa<RegisterSDNode>(
N) &&
3292 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3298 N->getValueType(0));
3308 if (!
N.getOperand(0).isUndef())
3310 N =
N.getOperand(1);
3315 !
Splat.getOperand(0).isUndef())
3317 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3326 SplatVal =
Splat.getOperand(1);
3333 std::function<
bool(int64_t)> ValidateImm) {
3335 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3338 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3340 "Unexpected splat operand type");
3349 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3353 if (!ValidateImm(SplatImm))
3362 [](int64_t Imm) {
return isInt<5>(Imm); });
3367 N, SplatVal, *
CurDAG, *Subtarget,
3368 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3374 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3375 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3382 N, SplatVal, *
CurDAG, *Subtarget,
3383 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3387 auto IsExtOrTrunc = [](
SDValue N) {
3388 switch (
N->getOpcode()) {
3403 while (IsExtOrTrunc(
N)) {
3404 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3406 N =
N->getOperand(0);
3427 ->getLegalZfaFPImm(APF, VT)
3432 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3444 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3447 if (!isInt<5>(ImmVal))
3459bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3461 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3483 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3484 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3485 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3486 case RISCV::MUL: Opc = RISCV::MULW;
break;
3487 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3495 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3510 case RISCV::TH_MULAW:
3511 case RISCV::TH_MULAH:
3512 case RISCV::TH_MULSW:
3513 case RISCV::TH_MULSH:
3528 if (!isa<RegisterSDNode>(MaskOp) ||
3529 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3533 const auto *Glued = GlueOp.
getNode();
3539 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3540 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3552 const auto IsVMSet = [](
unsigned Opc) {
3553 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3554 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3555 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3556 Opc == RISCV::PseudoVMSET_M_B8;
3569 N->getOperand(
N->getNumOperands() - 1));
3573 if (!V.isMachineOpcode())
3575 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3576 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3581 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3590 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3594 unsigned MaskOpIdx =
I->MaskOpIdx;
3600 const unsigned Opc =
I->UnmaskedPseudo;
3607 "Masked and unmasked pseudos are inconsistent");
3609 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3614 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3617 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3623 const auto *Glued =
N->getGluedNode();
3624 if (
auto *TGlued = Glued->getGluedNode())
3630 if (!
N->memoperands_empty())
3633 Result->setFlags(
N->getFlags());
3650 return RISCV::PseudoVMSET_M_B1;
3652 return RISCV::PseudoVMSET_M_B2;
3654 return RISCV::PseudoVMSET_M_B4;
3656 return RISCV::PseudoVMSET_M_B8;
3658 return RISCV::PseudoVMSET_M_B16;
3660 return RISCV::PseudoVMSET_M_B32;
3662 return RISCV::PseudoVMSET_M_B64;
3690bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3694 Merge =
N->getOperand(0);
3695 False =
N->getOperand(0);
3696 True =
N->getOperand(1);
3697 VL =
N->getOperand(2);
3702 Merge =
N->getOperand(0);
3703 False =
N->getOperand(1);
3704 True =
N->getOperand(2);
3705 Mask =
N->getOperand(3);
3706 VL =
N->getOperand(4);
3708 Glue =
N->getOperand(
N->getNumOperands() - 1);
3710 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3719 "Expect True is the first output of an instruction.");
3733 bool IsMasked =
false;
3735 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3736 if (!Info && HasTiedDest) {
3737 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3758 if (False != MergeOpTrue)
3765 assert(HasTiedDest &&
"Expected tied dest");
3805 unsigned TrueVLIndex =
3806 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3817 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3818 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3821 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3827 VL = GetMinVL(TrueVL, VL);
3834 if (TrueVL != VL || !IsMasked)
3859 RISCV::V0, AllOnesMask,
SDValue());
3864 unsigned MaskedOpc =
Info->MaskedPseudo;
3868 "Expected instructions with mask have policy operand.");
3871 "Expected instructions with mask have a tied dest.");
3881 bool MergeVLShrunk = VL != OrigVL;
3893 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3894 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3903 if (HasRoundingMode)
3906 Ops.
append({VL, SEW, PolicyOp});
3919 if (!cast<MachineSDNode>(True)->memoperands_empty())
3932bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3933 bool MadeChange =
false;
3938 if (
N->use_empty() || !
N->isMachineOpcode())
3942 MadeChange |= performCombineVMergeAndVOps(
N);
3952bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3953 bool MadeChange =
false;
3958 if (
N->use_empty() || !
N->isMachineOpcode())
3961 const unsigned Opc =
N->getMachineOpcode();
3962 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3969 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
3976 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool hasAllBUsers(SDNode *Node) const
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.