21#include "llvm/IR/IntrinsicsRISCV.h"
29#define DEBUG_TYPE "riscv-isel"
30#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
33 "riscv-use-rematerializable-movimm",
cl::Hidden,
34 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
35 "constant materialization"),
39#define GET_RISCVVSSEGTable_IMPL
40#define GET_RISCVVLSEGTable_IMPL
41#define GET_RISCVVLXSEGTable_IMPL
42#define GET_RISCVVSXSEGTable_IMPL
43#define GET_RISCVVLETable_IMPL
44#define GET_RISCVVSETable_IMPL
45#define GET_RISCVVLXTable_IMPL
46#define GET_RISCVVSXTable_IMPL
47#include "RISCVGenSearchableTables.inc"
53 bool MadeChange =
false;
60 switch (
N->getOpcode()) {
64 MVT VT =
N->getSimpleValueType(0);
80 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
81 MVT VT =
N->getSimpleValueType(0);
87 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
95 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
119 MVT::i64, MPI,
Align(8),
126 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
145 bool MadeChange =
false;
149 if (
N->use_empty() || !
N->isMachineOpcode())
152 MadeChange |= doPeepholeSExtW(
N);
157 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
162 MadeChange |= doPeepholeMergeVVMFold();
170 MadeChange |= doPeepholeNoRegPassThru();
182 switch (Inst.getOpndKind()) {
221 if (Seq.
size() > 3) {
222 unsigned ShiftAmt, AddOpc;
242 static const unsigned M1TupleRegClassIDs[] = {
243 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
244 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
245 RISCV::VRN8M1RegClassID};
246 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
247 RISCV::VRN3M2RegClassID,
248 RISCV::VRN4M2RegClassID};
261 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
262 "Unexpected subreg numbering");
263 SubReg0 = RISCV::sub_vrm1_0;
264 RegClassID = M1TupleRegClassIDs[NF - 2];
267 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
268 "Unexpected subreg numbering");
269 SubReg0 = RISCV::sub_vrm2_0;
270 RegClassID = M2TupleRegClassIDs[NF - 2];
273 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
274 "Unexpected subreg numbering");
275 SubReg0 = RISCV::sub_vrm4_0;
276 RegClassID = RISCV::VRN2M4RegClassID;
285 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
295 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
297 bool IsLoad,
MVT *IndexVT) {
298 SDValue Chain = Node->getOperand(0);
301 Operands.push_back(Node->getOperand(CurOp++));
303 if (IsStridedOrIndexed) {
304 Operands.push_back(Node->getOperand(CurOp++));
306 *IndexVT =
Operands.back()->getSimpleValueType(0);
311 SDValue Mask = Node->getOperand(CurOp++);
330 Policy = Node->getConstantOperandVal(CurOp++);
343 unsigned NF = Node->getNumValues() - 1;
344 MVT VT = Node->getSimpleValueType(0);
352 Node->op_begin() + CurOp + NF);
361 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
362 static_cast<unsigned>(LMUL));
366 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
370 for (
unsigned I = 0;
I < NF; ++
I) {
382 unsigned NF = Node->getNumValues() - 2;
383 MVT VT = Node->getSimpleValueType(0);
392 Node->op_begin() + CurOp + NF);
402 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
403 Log2SEW,
static_cast<unsigned>(LMUL));
407 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
411 for (
unsigned I = 0;
I < NF; ++
I) {
425 unsigned NF = Node->getNumValues() - 1;
426 MVT VT = Node->getSimpleValueType(0);
434 Node->op_begin() + CurOp + NF);
445 "Element count mismatch");
449 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
451 "values when XLEN=32");
454 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
455 static_cast<unsigned>(IndexLMUL));
459 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
463 for (
unsigned I = 0;
I < NF; ++
I) {
476 unsigned NF = Node->getNumOperands() - 4;
481 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
489 unsigned CurOp = 2 + NF;
495 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
499 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
508 unsigned NF = Node->getNumOperands() - 5;
511 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
519 unsigned CurOp = 2 + NF;
527 "Element count mismatch");
531 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
533 "values when XLEN=32");
536 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
537 static_cast<unsigned>(IndexLMUL));
541 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
556 unsigned IntNo = Node->getConstantOperandVal(0);
558 assert((IntNo == Intrinsic::riscv_vsetvli ||
559 IntNo == Intrinsic::riscv_vsetvlimax) &&
560 "Unexpected vsetvli intrinsic");
562 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
563 unsigned Offset = (VLMax ? 1 : 2);
566 "Unexpected number of operands");
571 Node->getConstantOperandVal(
Offset + 1) & 0x7);
578 unsigned Opcode = RISCV::PseudoVSETVLI;
579 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
586 Opcode = RISCV::PseudoVSETVLIX0;
588 VLOperand = Node->getOperand(1);
590 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
592 if (isUInt<5>(AVL)) {
595 XLenVT, VLImm, VTypeIOp));
606 MVT VT = Node->getSimpleValueType(0);
607 unsigned Opcode = Node->getOpcode();
609 "Unexpected opcode");
614 SDValue N0 = Node->getOperand(0);
615 SDValue N1 = Node->getOperand(1);
632 bool SignExt =
false;
650 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
651 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
654 int64_t ShiftedVal = Val >> ShAmt;
655 if (!isInt<12>(ShiftedVal))
659 if (SignExt && ShAmt >= 32)
666 case ISD::AND: BinOpc = RISCV::ANDI;
break;
667 case ISD::OR: BinOpc = RISCV::ORI;
break;
668 case ISD::XOR: BinOpc = RISCV::XORI;
break;
671 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
685 if (!Subtarget->hasVendorXTHeadBb())
688 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
692 SDValue N0 = Node->getOperand(0);
696 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
704 MVT VT = Node->getSimpleValueType(0);
705 const unsigned RightShAmt = N1C->getZExtValue();
710 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
714 const unsigned LeftShAmt = N01C->getZExtValue();
717 if (LeftShAmt > RightShAmt)
721 const unsigned Msb = MsbPlusOne - 1;
722 const unsigned Lsb = RightShAmt - LeftShAmt;
724 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
733 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
739 const unsigned Msb = ExtSize - 1;
740 const unsigned Lsb = RightShAmt;
742 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
752 if (!Subtarget->hasVendorXTHeadMemIdx())
766 "Unexpected addressing mode");
769 int64_t
Offset =
C->getSExtValue();
774 for (Shift = 0; Shift < 4; Shift++)
775 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
784 if (LoadVT == MVT::i8 && IsPre)
785 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
786 else if (LoadVT == MVT::i8 && IsPost)
787 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
788 else if (LoadVT == MVT::i16 && IsPre)
789 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
790 else if (LoadVT == MVT::i16 && IsPost)
791 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
792 else if (LoadVT == MVT::i32 && IsPre)
793 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
794 else if (LoadVT == MVT::i32 && IsPost)
795 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
796 else if (LoadVT == MVT::i64 && IsPre)
797 Opcode = RISCV::TH_LDIB;
798 else if (LoadVT == MVT::i64 && IsPost)
799 Opcode = RISCV::TH_LDIA;
826 unsigned IntNo = Node->getConstantOperandVal(1);
828 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
829 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
830 "Unexpected vsetvli intrinsic");
833 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
837 Node->getOperand(4), Node->getOperand(5),
838 Node->getOperand(8), SEWOp,
839 Node->getOperand(0)};
842 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
843 switch (LMulSDNode->getSExtValue()) {
845 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
846 : RISCV::PseudoVC_I_SE_MF8;
849 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
850 : RISCV::PseudoVC_I_SE_MF4;
853 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
854 : RISCV::PseudoVC_I_SE_MF2;
857 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
858 : RISCV::PseudoVC_I_SE_M1;
861 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
862 : RISCV::PseudoVC_I_SE_M2;
865 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
866 : RISCV::PseudoVC_I_SE_M4;
869 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
870 : RISCV::PseudoVC_I_SE_M8;
875 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
880 if (Node->isMachineOpcode()) {
888 unsigned Opcode = Node->getOpcode();
891 MVT VT = Node->getSimpleValueType(0);
893 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
897 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
898 auto *ConstNode = cast<ConstantSDNode>(Node);
899 if (ConstNode->isZero()) {
905 int64_t Imm = ConstNode->getSExtValue();
909 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
910 Imm = SignExtend64<8>(Imm);
913 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
915 Imm = SignExtend64<16>(Imm);
918 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
919 Imm = SignExtend64<32>(Imm);
925 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
926 auto [FPImm, NeedsFNeg] =
937 FNegOpc = RISCV::FSGNJN_H;
941 FNegOpc = RISCV::FSGNJN_S;
945 FNegOpc = RISCV::FSGNJN_D;
958 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
968 bool HasZdinx = Subtarget->hasStdExtZdinx();
969 bool Is64Bit = Subtarget->
is64Bit();
975 assert(Subtarget->hasStdExtZfbfmin());
976 Opc = RISCV::FMV_H_X;
979 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
982 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
989 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
991 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
996 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1005 Opc = RISCV::FSGNJN_D;
1007 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1016 if (!Subtarget->hasStdExtZdinx())
1023 Node->getOperand(0),
1025 Node->getOperand(1),
1034 if (Subtarget->hasStdExtZdinx()) {
1037 if (!
SDValue(Node, 0).use_empty()) {
1039 Node->getOperand(0));
1043 if (!
SDValue(Node, 1).use_empty()) {
1045 Node->getOperand(0));
1053 if (!Subtarget->hasStdExtZfa())
1056 "Unexpected subtarget");
1059 if (!
SDValue(Node, 0).use_empty()) {
1061 Node->getOperand(0));
1064 if (!
SDValue(Node, 1).use_empty()) {
1066 Node->getOperand(0));
1074 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1077 SDValue N0 = Node->getOperand(0);
1081 unsigned ShAmt = N1C->getZExtValue();
1087 unsigned XLen = Subtarget->
getXLen();
1090 if (TrailingZeros > 0 && LeadingZeros == 32) {
1104 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1107 SDValue N0 = Node->getOperand(0);
1110 unsigned ShAmt = N1C->getZExtValue();
1116 unsigned XLen = Subtarget->
getXLen();
1119 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1138 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1142 if (ShAmt >= TrailingOnes)
1145 if (TrailingOnes == 32) {
1147 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1158 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1160 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1166 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1188 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1191 SDValue N0 = Node->getOperand(0);
1194 unsigned ShAmt = N1C->getZExtValue();
1196 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1198 if (ExtSize >= 32 || ShAmt >= ExtSize)
1200 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1217 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1221 SDValue N0 = Node->getOperand(0);
1226 if (!Subtarget->hasVendorXTHeadBb())
1238 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1241 unsigned C2 =
C->getZExtValue();
1242 unsigned XLen = Subtarget->
getXLen();
1243 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1251 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1257 C1 &= maskTrailingZeros<uint64_t>(C2);
1259 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1263 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1273 if (C2 + 32 == Leading) {
1285 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1287 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1292 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1306 const unsigned Lsb = C2;
1307 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1312 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1314 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1316 Skip |= HasBitTest && Leading == XLen - 1;
1317 if (OneUseOrZExtW && !Skip) {
1319 RISCV::SLLI,
DL, VT,
X,
1335 if (C2 + Leading < XLen &&
1336 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1338 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1347 if (OneUseOrZExtW && !IsCANDI) {
1349 RISCV::SLLI,
DL, VT,
X,
1365 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1367 unsigned SrliOpc = RISCV::SRLI;
1370 isa<ConstantSDNode>(
X.getOperand(1)) &&
1371 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1372 SrliOpc = RISCV::SRLIW;
1373 X =
X.getOperand(0);
1385 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1386 OneUseOrZExtW && !IsCANDI) {
1388 RISCV::SRLIW,
DL, VT,
X,
1397 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1398 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1400 RISCV::SRLI,
DL, VT,
X,
1403 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1415 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1417 RISCV::SRLI,
DL, VT,
X,
1426 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1428 RISCV::SRLIW,
DL, VT,
X,
1438 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1439 Subtarget->hasStdExtZba()) {
1441 RISCV::SRLI,
DL, VT,
X,
1444 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1452 const uint64_t C1 = N1C->getZExtValue();
1459 unsigned XLen = Subtarget->
getXLen();
1460 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1465 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1467 isa<ConstantSDNode>(
X.getOperand(1)) &&
1468 X.getConstantOperandVal(1) == 32;
1473 RISCV::SRAI,
DL, VT,
X,
1489 if (
isMask_64(C1) && !isInt<12>(N1C->getSExtValue())) {
1491 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1508 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1509 if (!N1C || !N1C->hasOneUse())
1513 SDValue N0 = Node->getOperand(0);
1530 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1532 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1533 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1537 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1539 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1540 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1546 unsigned XLen = Subtarget->
getXLen();
1552 unsigned ConstantShift = XLen - LeadingZeros;
1556 uint64_t ShiftedC1 = C1 << ConstantShift;
1559 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1575 if (Subtarget->hasVendorXCVmem() && !Subtarget->
is64Bit()) {
1581 SDValue Chain = Node->getOperand(0);
1585 bool Simm12 =
false;
1586 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1588 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1589 int ConstantVal = ConstantOffset->getSExtValue();
1590 Simm12 = isInt<12>(ConstantVal);
1596 unsigned Opcode = 0;
1597 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1599 if (Simm12 && SignExtend)
1600 Opcode = RISCV::CV_LB_ri_inc;
1601 else if (Simm12 && !SignExtend)
1602 Opcode = RISCV::CV_LBU_ri_inc;
1603 else if (!Simm12 && SignExtend)
1604 Opcode = RISCV::CV_LB_rr_inc;
1606 Opcode = RISCV::CV_LBU_rr_inc;
1609 if (Simm12 && SignExtend)
1610 Opcode = RISCV::CV_LH_ri_inc;
1611 else if (Simm12 && !SignExtend)
1612 Opcode = RISCV::CV_LHU_ri_inc;
1613 else if (!Simm12 && SignExtend)
1614 Opcode = RISCV::CV_LH_rr_inc;
1616 Opcode = RISCV::CV_LHU_rr_inc;
1620 Opcode = RISCV::CV_LW_ri_inc;
1622 Opcode = RISCV::CV_LW_rr_inc;
1638 unsigned IntNo = Node->getConstantOperandVal(0);
1643 case Intrinsic::riscv_vmsgeu:
1644 case Intrinsic::riscv_vmsge: {
1645 SDValue Src1 = Node->getOperand(1);
1646 SDValue Src2 = Node->getOperand(2);
1647 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1648 bool IsCmpUnsignedZero =
false;
1653 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1654 int64_t CVal =
C->getSExtValue();
1655 if (CVal >= -15 && CVal <= 16) {
1656 if (!IsUnsigned || CVal != 0)
1658 IsCmpUnsignedZero =
true;
1662 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1666#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1667 case RISCVII::VLMUL::lmulenum: \
1668 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1669 : RISCV::PseudoVMSLT_VX_##suffix; \
1670 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1671 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1680#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1688 if (IsCmpUnsignedZero) {
1699 {Cmp, Cmp, VL, SEW}));
1702 case Intrinsic::riscv_vmsgeu_mask:
1703 case Intrinsic::riscv_vmsge_mask: {
1704 SDValue Src1 = Node->getOperand(2);
1705 SDValue Src2 = Node->getOperand(3);
1706 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1707 bool IsCmpUnsignedZero =
false;
1712 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1713 int64_t CVal =
C->getSExtValue();
1714 if (CVal >= -15 && CVal <= 16) {
1715 if (!IsUnsigned || CVal != 0)
1717 IsCmpUnsignedZero =
true;
1721 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1726#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1727 case RISCVII::VLMUL::lmulenum: \
1728 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1729 : RISCV::PseudoVMSLT_VX_##suffix; \
1730 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1731 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1740#undef CASE_VMSLT_OPCODES
1746#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1747 case RISCVII::VLMUL::lmulenum: \
1748 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1749 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1750 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1759#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1766 SDValue MaskedOff = Node->getOperand(1);
1767 SDValue Mask = Node->getOperand(4);
1770 if (IsCmpUnsignedZero) {
1773 if (Mask == MaskedOff) {
1779 {Mask, MaskedOff, VL, MaskSEW}));
1786 if (Mask == MaskedOff) {
1791 {Mask, Cmp, VL, MaskSEW}));
1808 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1812 {Cmp, Mask, VL, MaskSEW}));
1815 case Intrinsic::riscv_vsetvli:
1816 case Intrinsic::riscv_vsetvlimax:
1822 unsigned IntNo = Node->getConstantOperandVal(1);
1827 case Intrinsic::riscv_vlseg2:
1828 case Intrinsic::riscv_vlseg3:
1829 case Intrinsic::riscv_vlseg4:
1830 case Intrinsic::riscv_vlseg5:
1831 case Intrinsic::riscv_vlseg6:
1832 case Intrinsic::riscv_vlseg7:
1833 case Intrinsic::riscv_vlseg8: {
1837 case Intrinsic::riscv_vlseg2_mask:
1838 case Intrinsic::riscv_vlseg3_mask:
1839 case Intrinsic::riscv_vlseg4_mask:
1840 case Intrinsic::riscv_vlseg5_mask:
1841 case Intrinsic::riscv_vlseg6_mask:
1842 case Intrinsic::riscv_vlseg7_mask:
1843 case Intrinsic::riscv_vlseg8_mask: {
1847 case Intrinsic::riscv_vlsseg2:
1848 case Intrinsic::riscv_vlsseg3:
1849 case Intrinsic::riscv_vlsseg4:
1850 case Intrinsic::riscv_vlsseg5:
1851 case Intrinsic::riscv_vlsseg6:
1852 case Intrinsic::riscv_vlsseg7:
1853 case Intrinsic::riscv_vlsseg8: {
1857 case Intrinsic::riscv_vlsseg2_mask:
1858 case Intrinsic::riscv_vlsseg3_mask:
1859 case Intrinsic::riscv_vlsseg4_mask:
1860 case Intrinsic::riscv_vlsseg5_mask:
1861 case Intrinsic::riscv_vlsseg6_mask:
1862 case Intrinsic::riscv_vlsseg7_mask:
1863 case Intrinsic::riscv_vlsseg8_mask: {
1867 case Intrinsic::riscv_vloxseg2:
1868 case Intrinsic::riscv_vloxseg3:
1869 case Intrinsic::riscv_vloxseg4:
1870 case Intrinsic::riscv_vloxseg5:
1871 case Intrinsic::riscv_vloxseg6:
1872 case Intrinsic::riscv_vloxseg7:
1873 case Intrinsic::riscv_vloxseg8:
1876 case Intrinsic::riscv_vluxseg2:
1877 case Intrinsic::riscv_vluxseg3:
1878 case Intrinsic::riscv_vluxseg4:
1879 case Intrinsic::riscv_vluxseg5:
1880 case Intrinsic::riscv_vluxseg6:
1881 case Intrinsic::riscv_vluxseg7:
1882 case Intrinsic::riscv_vluxseg8:
1885 case Intrinsic::riscv_vloxseg2_mask:
1886 case Intrinsic::riscv_vloxseg3_mask:
1887 case Intrinsic::riscv_vloxseg4_mask:
1888 case Intrinsic::riscv_vloxseg5_mask:
1889 case Intrinsic::riscv_vloxseg6_mask:
1890 case Intrinsic::riscv_vloxseg7_mask:
1891 case Intrinsic::riscv_vloxseg8_mask:
1894 case Intrinsic::riscv_vluxseg2_mask:
1895 case Intrinsic::riscv_vluxseg3_mask:
1896 case Intrinsic::riscv_vluxseg4_mask:
1897 case Intrinsic::riscv_vluxseg5_mask:
1898 case Intrinsic::riscv_vluxseg6_mask:
1899 case Intrinsic::riscv_vluxseg7_mask:
1900 case Intrinsic::riscv_vluxseg8_mask:
1903 case Intrinsic::riscv_vlseg8ff:
1904 case Intrinsic::riscv_vlseg7ff:
1905 case Intrinsic::riscv_vlseg6ff:
1906 case Intrinsic::riscv_vlseg5ff:
1907 case Intrinsic::riscv_vlseg4ff:
1908 case Intrinsic::riscv_vlseg3ff:
1909 case Intrinsic::riscv_vlseg2ff: {
1913 case Intrinsic::riscv_vlseg8ff_mask:
1914 case Intrinsic::riscv_vlseg7ff_mask:
1915 case Intrinsic::riscv_vlseg6ff_mask:
1916 case Intrinsic::riscv_vlseg5ff_mask:
1917 case Intrinsic::riscv_vlseg4ff_mask:
1918 case Intrinsic::riscv_vlseg3ff_mask:
1919 case Intrinsic::riscv_vlseg2ff_mask: {
1923 case Intrinsic::riscv_vloxei:
1924 case Intrinsic::riscv_vloxei_mask:
1925 case Intrinsic::riscv_vluxei:
1926 case Intrinsic::riscv_vluxei_mask: {
1927 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1928 IntNo == Intrinsic::riscv_vluxei_mask;
1929 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1930 IntNo == Intrinsic::riscv_vloxei_mask;
1932 MVT VT = Node->getSimpleValueType(0);
1937 Operands.push_back(Node->getOperand(CurOp++));
1945 "Element count mismatch");
1950 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1952 "values when XLEN=32");
1955 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1956 static_cast<unsigned>(IndexLMUL));
1960 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1966 case Intrinsic::riscv_vlm:
1967 case Intrinsic::riscv_vle:
1968 case Intrinsic::riscv_vle_mask:
1969 case Intrinsic::riscv_vlse:
1970 case Intrinsic::riscv_vlse_mask: {
1971 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1972 IntNo == Intrinsic::riscv_vlse_mask;
1974 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1976 MVT VT = Node->getSimpleValueType(0);
1985 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1988 if (HasPassthruOperand)
1989 Operands.push_back(Node->getOperand(CurOp++));
2002 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
2003 static_cast<unsigned>(LMUL));
2007 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2013 case Intrinsic::riscv_vleff:
2014 case Intrinsic::riscv_vleff_mask: {
2015 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2017 MVT VT = Node->getSimpleValueType(0);
2022 Operands.push_back(Node->getOperand(CurOp++));
2029 RISCV::getVLEPseudo(IsMasked,
false,
true,
2030 Log2SEW,
static_cast<unsigned>(LMUL));
2033 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2043 unsigned IntNo = Node->getConstantOperandVal(1);
2045 case Intrinsic::riscv_vsseg2:
2046 case Intrinsic::riscv_vsseg3:
2047 case Intrinsic::riscv_vsseg4:
2048 case Intrinsic::riscv_vsseg5:
2049 case Intrinsic::riscv_vsseg6:
2050 case Intrinsic::riscv_vsseg7:
2051 case Intrinsic::riscv_vsseg8: {
2055 case Intrinsic::riscv_vsseg2_mask:
2056 case Intrinsic::riscv_vsseg3_mask:
2057 case Intrinsic::riscv_vsseg4_mask:
2058 case Intrinsic::riscv_vsseg5_mask:
2059 case Intrinsic::riscv_vsseg6_mask:
2060 case Intrinsic::riscv_vsseg7_mask:
2061 case Intrinsic::riscv_vsseg8_mask: {
2065 case Intrinsic::riscv_vssseg2:
2066 case Intrinsic::riscv_vssseg3:
2067 case Intrinsic::riscv_vssseg4:
2068 case Intrinsic::riscv_vssseg5:
2069 case Intrinsic::riscv_vssseg6:
2070 case Intrinsic::riscv_vssseg7:
2071 case Intrinsic::riscv_vssseg8: {
2075 case Intrinsic::riscv_vssseg2_mask:
2076 case Intrinsic::riscv_vssseg3_mask:
2077 case Intrinsic::riscv_vssseg4_mask:
2078 case Intrinsic::riscv_vssseg5_mask:
2079 case Intrinsic::riscv_vssseg6_mask:
2080 case Intrinsic::riscv_vssseg7_mask:
2081 case Intrinsic::riscv_vssseg8_mask: {
2085 case Intrinsic::riscv_vsoxseg2:
2086 case Intrinsic::riscv_vsoxseg3:
2087 case Intrinsic::riscv_vsoxseg4:
2088 case Intrinsic::riscv_vsoxseg5:
2089 case Intrinsic::riscv_vsoxseg6:
2090 case Intrinsic::riscv_vsoxseg7:
2091 case Intrinsic::riscv_vsoxseg8:
2094 case Intrinsic::riscv_vsuxseg2:
2095 case Intrinsic::riscv_vsuxseg3:
2096 case Intrinsic::riscv_vsuxseg4:
2097 case Intrinsic::riscv_vsuxseg5:
2098 case Intrinsic::riscv_vsuxseg6:
2099 case Intrinsic::riscv_vsuxseg7:
2100 case Intrinsic::riscv_vsuxseg8:
2103 case Intrinsic::riscv_vsoxseg2_mask:
2104 case Intrinsic::riscv_vsoxseg3_mask:
2105 case Intrinsic::riscv_vsoxseg4_mask:
2106 case Intrinsic::riscv_vsoxseg5_mask:
2107 case Intrinsic::riscv_vsoxseg6_mask:
2108 case Intrinsic::riscv_vsoxseg7_mask:
2109 case Intrinsic::riscv_vsoxseg8_mask:
2112 case Intrinsic::riscv_vsuxseg2_mask:
2113 case Intrinsic::riscv_vsuxseg3_mask:
2114 case Intrinsic::riscv_vsuxseg4_mask:
2115 case Intrinsic::riscv_vsuxseg5_mask:
2116 case Intrinsic::riscv_vsuxseg6_mask:
2117 case Intrinsic::riscv_vsuxseg7_mask:
2118 case Intrinsic::riscv_vsuxseg8_mask:
2121 case Intrinsic::riscv_vsoxei:
2122 case Intrinsic::riscv_vsoxei_mask:
2123 case Intrinsic::riscv_vsuxei:
2124 case Intrinsic::riscv_vsuxei_mask: {
2125 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2126 IntNo == Intrinsic::riscv_vsuxei_mask;
2127 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2128 IntNo == Intrinsic::riscv_vsoxei_mask;
2130 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2135 Operands.push_back(Node->getOperand(CurOp++));
2143 "Element count mismatch");
2148 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2150 "values when XLEN=32");
2153 IsMasked, IsOrdered, IndexLog2EEW,
2154 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2158 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2164 case Intrinsic::riscv_vsm:
2165 case Intrinsic::riscv_vse:
2166 case Intrinsic::riscv_vse_mask:
2167 case Intrinsic::riscv_vsse:
2168 case Intrinsic::riscv_vsse_mask: {
2169 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2170 IntNo == Intrinsic::riscv_vsse_mask;
2172 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2174 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2179 Operands.push_back(Node->getOperand(CurOp++));
2186 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2189 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2195 case Intrinsic::riscv_sf_vc_x_se:
2196 case Intrinsic::riscv_sf_vc_i_se:
2203 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2215 SDValue V = Node->getOperand(0);
2216 SDValue SubV = Node->getOperand(1);
2218 auto Idx = Node->getConstantOperandVal(2);
2222 MVT SubVecContainerVT = SubVecVT;
2225 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2227 [[maybe_unused]]
bool ExactlyVecRegSized =
2229 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2231 .getKnownMinValue()));
2232 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2234 MVT ContainerVT = VT;
2236 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2240 std::tie(SubRegIdx,
Idx) =
2242 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2251 [[maybe_unused]]
bool IsSubVecPartReg =
2255 assert((!IsSubVecPartReg || V.isUndef()) &&
2256 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2257 "the subvector is smaller than a full-sized register");
2261 if (SubRegIdx == RISCV::NoSubRegister) {
2262 unsigned InRegClassID =
2266 "Unexpected subvector extraction");
2279 SDValue V = Node->getOperand(0);
2280 auto Idx = Node->getConstantOperandVal(1);
2281 MVT InVT = V.getSimpleValueType();
2285 MVT SubVecContainerVT = VT;
2289 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2292 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2296 std::tie(SubRegIdx,
Idx) =
2298 InVT, SubVecContainerVT,
Idx,
TRI);
2308 if (SubRegIdx == RISCV::NoSubRegister) {
2312 "Unexpected subvector extraction");
2331 if (!Node->getOperand(0).isUndef())
2333 SDValue Src = Node->getOperand(1);
2334 auto *Ld = dyn_cast<LoadSDNode>(Src);
2337 if (!Ld || Ld->isIndexed())
2339 EVT MemVT = Ld->getMemoryVT();
2365 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2375 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2379 false, IsStrided,
false,
2380 Log2SEW,
static_cast<unsigned>(LMUL));
2392 unsigned Locality = Node->getConstantOperandVal(3);
2396 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2400 int NontemporalLevel = 0;
2403 NontemporalLevel = 3;
2406 NontemporalLevel = 1;
2409 NontemporalLevel = 0;
2415 if (NontemporalLevel & 0b1)
2417 if (NontemporalLevel & 0b10)
2429 std::vector<SDValue> &OutOps) {
2432 switch (ConstraintID) {
2437 assert(Found &&
"SelectAddrRegImm should always succeed");
2438 OutOps.push_back(Op0);
2439 OutOps.push_back(Op1);
2443 OutOps.push_back(
Op);
2457 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2475 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2476 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2477 if (isInt<12>(CVal)) {
2493 bool IsPrefetch =
false) {
2494 if (!isa<ConstantSDNode>(
Addr))
2497 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2502 int64_t Lo12 = SignExtend64<12>(CVal);
2504 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2505 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2509 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2526 if (Seq.
back().getOpcode() != RISCV::ADDI)
2528 Lo12 = Seq.
back().getImm();
2529 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2534 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2544 for (
auto *
Use :
Add->uses()) {
2549 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2555 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2558 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2566 unsigned MaxShiftAmount,
2569 EVT VT =
Addr.getSimpleValueType();
2575 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2577 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2579 ShiftAmt =
N.getConstantOperandVal(1);
2584 return ShiftAmt != 0;
2588 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2593 isInt<12>(C1->getSExtValue())) {
2602 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2606 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2610 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2625 MVT VT =
Addr.getSimpleValueType();
2633 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2635 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2636 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2640 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2648 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2649 if (CVal == 0 || Alignment > CVal) {
2650 int64_t CombinedOffset = CVal + GA->getOffset();
2654 CombinedOffset, GA->getTargetFlags());
2660 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2668 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2669 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2670 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2671 "simm12 not already handled?");
2676 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2677 int64_t Adj = CVal < 0 ? -2048 : 2047;
2719 MVT VT =
Addr.getSimpleValueType();
2722 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2723 if (isInt<12>(CVal)) {
2727 if ((CVal & 0b11111) != 0) {
2733 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2741 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2742 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2743 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2744 "simm12 not already handled?");
2748 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2749 int64_t Adj = CVal < 0 ? -2048 : 2016;
2750 int64_t AdjustedOffset = CVal - Adj;
2752 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2782 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
2827 if (Imm != 0 && Imm % ShiftWidth == 0) {
2836 if (Imm != 0 && Imm % ShiftWidth == 0) {
2840 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2848 if (Imm % ShiftWidth == ShiftWidth - 1) {
2870 "Unexpected condition code!");
2877 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2878 if (CCVal != ExpectedCCVal)
2884 if (!
LHS.getValueType().isScalarInteger())
2895 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2896 int64_t CVal =
C->getSExtValue();
2899 if (CVal == -2048) {
2902 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2909 if (isInt<12>(CVal) || CVal == 2048) {
2912 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2928 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2929 Val =
N.getOperand(0);
2933 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2934 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2939 N.getConstantOperandVal(1) == ShiftAmt &&
2946 MVT VT =
N.getSimpleValueType();
2957 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2958 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2959 Val =
N.getOperand(0);
2963 MVT VT =
N.getSimpleValueType();
2978 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2984 uint64_t Mask =
N.getConstantOperandVal(1);
2987 unsigned XLen = Subtarget->
getXLen();
2989 Mask &= maskTrailingZeros<uint64_t>(C2);
2991 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2999 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
3001 EVT VT =
N.getValueType();
3011 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
3013 EVT VT =
N.getValueType();
3023 }
else if (
bool LeftShift =
N.getOpcode() ==
ISD::SHL;
3024 (LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
3025 isa<ConstantSDNode>(
N.getOperand(1))) {
3031 unsigned C1 =
N.getConstantOperandVal(1);
3032 unsigned XLen = Subtarget->
getXLen();
3037 if (LeftShift && Leading == 32 && Trailing > 0 &&
3038 (Trailing + C1) == ShAmt) {
3040 EVT VT =
N.getValueType();
3049 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3050 (Trailing - C1) == ShAmt) {
3052 EVT VT =
N.getValueType();
3071 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3076 uint64_t Mask =
N.getConstantOperandVal(1);
3079 Mask &= maskTrailingZeros<uint64_t>(C2);
3087 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3089 EVT VT =
N.getValueType();
3117 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
3119 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3123 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3125 if (UserOpNo == VLIdx)
3128 auto NumDemandedBits =
3130 return NumDemandedBits && Bits >= *NumDemandedBits;
3143 const unsigned Depth)
const {
3149 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3150 "Unexpected opcode");
3157 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3160 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3163 if (!
User->isMachineOpcode())
3167 switch (
User->getMachineOpcode()) {
3192 case RISCV::SLLI_UW:
3193 case RISCV::FMV_W_X:
3194 case RISCV::FCVT_H_W:
3195 case RISCV::FCVT_H_W_INX:
3196 case RISCV::FCVT_H_WU:
3197 case RISCV::FCVT_H_WU_INX:
3198 case RISCV::FCVT_S_W:
3199 case RISCV::FCVT_S_W_INX:
3200 case RISCV::FCVT_S_WU:
3201 case RISCV::FCVT_S_WU_INX:
3202 case RISCV::FCVT_D_W:
3203 case RISCV::FCVT_D_W_INX:
3204 case RISCV::FCVT_D_WU:
3205 case RISCV::FCVT_D_WU_INX:
3206 case RISCV::TH_REVW:
3207 case RISCV::TH_SRRIW:
3220 if (UI.getOperandNo() == 1 && Bits >=
Log2_32(Subtarget->
getXLen()))
3225 if (Bits >= Subtarget->
getXLen() -
User->getConstantOperandVal(1))
3234 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3253 unsigned ShAmt =
User->getConstantOperandVal(1);
3267 case RISCV::FMV_H_X:
3268 case RISCV::ZEXT_H_RV32:
3269 case RISCV::ZEXT_H_RV64:
3275 if (Bits >= (Subtarget->
getXLen() / 2))
3279 case RISCV::SH1ADD_UW:
3280 case RISCV::SH2ADD_UW:
3281 case RISCV::SH3ADD_UW:
3284 if (UI.getOperandNo() == 0 && Bits >= 32)
3288 if (UI.getOperandNo() == 0 && Bits >= 8)
3292 if (UI.getOperandNo() == 0 && Bits >= 16)
3296 if (UI.getOperandNo() == 0 && Bits >= 32)
3308 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3309 int64_t
Offset =
C->getSExtValue();
3311 for (Shift = 0; Shift < 4; Shift++)
3312 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3319 EVT Ty =
N->getValueType(0);
3331 auto *
C = dyn_cast<ConstantSDNode>(
N);
3332 if (
C && isUInt<5>(
C->getZExtValue())) {
3334 N->getValueType(0));
3335 }
else if (
C &&
C->isAllOnes()) {
3338 N->getValueType(0));
3339 }
else if (isa<RegisterSDNode>(
N) &&
3340 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3346 N->getValueType(0));
3356 if (!
N.getOperand(0).isUndef())
3358 N =
N.getOperand(1);
3363 !
Splat.getOperand(0).isUndef())
3365 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3374 SplatVal =
Splat.getOperand(1);
3381 std::function<
bool(int64_t)> ValidateImm) {
3383 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3386 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3388 "Unexpected splat operand type");
3397 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3401 if (!ValidateImm(SplatImm))
3410 [](int64_t Imm) {
return isInt<5>(Imm); });
3415 N, SplatVal, *
CurDAG, *Subtarget,
3416 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3422 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3423 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3430 N, SplatVal, *
CurDAG, *Subtarget,
3431 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3435 auto IsExtOrTrunc = [](
SDValue N) {
3436 switch (
N->getOpcode()) {
3451 while (IsExtOrTrunc(
N)) {
3452 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3454 N =
N->getOperand(0);
3475 ->getLegalZfaFPImm(APF, VT)
3480 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3492 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3495 if (!isInt<5>(ImmVal))
3507bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3509 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3531 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3532 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3533 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3534 case RISCV::MUL: Opc = RISCV::MULW;
break;
3535 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3543 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3558 case RISCV::TH_MULAW:
3559 case RISCV::TH_MULAH:
3560 case RISCV::TH_MULSW:
3561 case RISCV::TH_MULSH:
3579 if (!isa<RegisterSDNode>(MaskOp) ||
3580 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3584 const auto *Glued = GlueOp.
getNode();
3590 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3591 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3611 const auto IsVMSet = [](
unsigned Opc) {
3612 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3613 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3614 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3615 Opc == RISCV::PseudoVMSET_M_B8;
3628 N->getOperand(
N->getNumOperands() - 1));
3632 if (!V.isMachineOpcode())
3634 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3635 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3640 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3649 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3653 unsigned MaskOpIdx =
I->MaskOpIdx;
3659 const unsigned Opc =
I->UnmaskedPseudo;
3666 "Masked and unmasked pseudos are inconsistent");
3668 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3673 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3676 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3682 const auto *Glued =
N->getGluedNode();
3683 if (
auto *TGlued = Glued->getGluedNode())
3689 if (!
N->memoperands_empty())
3692 Result->setFlags(
N->getFlags());
3709 return RISCV::PseudoVMSET_M_B1;
3711 return RISCV::PseudoVMSET_M_B2;
3713 return RISCV::PseudoVMSET_M_B4;
3715 return RISCV::PseudoVMSET_M_B8;
3717 return RISCV::PseudoVMSET_M_B16;
3719 return RISCV::PseudoVMSET_M_B32;
3721 return RISCV::PseudoVMSET_M_B64;
3749bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3753 Passthru =
N->getOperand(0);
3754 False =
N->getOperand(0);
3755 True =
N->getOperand(1);
3756 VL =
N->getOperand(2);
3761 Passthru =
N->getOperand(0);
3762 False =
N->getOperand(1);
3763 True =
N->getOperand(2);
3764 Mask =
N->getOperand(3);
3765 VL =
N->getOperand(4);
3767 Glue =
N->getOperand(
N->getNumOperands() - 1);
3769 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3782 "Expect True is the first output of an instruction.");
3796 bool IsMasked =
false;
3798 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3799 if (!Info && HasTiedDest) {
3800 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3803 assert(!(IsMasked && !HasTiedDest) &&
"Expected tied dest");
3812 if (False != PassthruOpTrue)
3818 if (IsMasked && Mask) {
3859 unsigned TrueVLIndex =
3860 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3871 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3872 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3875 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3881 VL = GetMinVL(TrueVL, VL);
3899 if (TrueVL != VL || !IsMasked)
3924 RISCV::V0, AllOnesMask,
SDValue());
3929 unsigned MaskedOpc =
Info->MaskedPseudo;
3933 "Expected instructions with mask have policy operand.");
3936 "Expected instructions with mask have a tied dest.");
3946 bool MergeVLShrunk = VL != OrigVL;
3958 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3959 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3968 if (HasRoundingMode)
3971 Ops.
append({VL, SEW, PolicyOp});
3984 if (!cast<MachineSDNode>(True)->memoperands_empty())
3997bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3998 bool MadeChange =
false;
4003 if (
N->use_empty() || !
N->isMachineOpcode())
4007 MadeChange |= performCombineVMergeAndVOps(
N);
4017bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4018 bool MadeChange =
false;
4023 if (
N->use_empty() || !
N->isMachineOpcode())
4026 const unsigned Opc =
N->getMachineOpcode();
4027 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
4034 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
4041 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static SDValue getMaskSetter(SDValue MaskOp, SDValue GlueOp)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool hasAllBUsers(SDNode *Node) const
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...