LLVM  15.0.0git
RISCVISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
16 #include "RISCVISelLowering.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
47 
48  bool MadeChange = false;
49  while (Position != CurDAG->allnodes_begin()) {
50  SDNode *N = &*--Position;
51  if (N->use_empty())
52  continue;
53 
54  SDValue Result;
55  switch (N->getOpcode()) {
56  case ISD::SPLAT_VECTOR: {
57  // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
58  // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
59  MVT VT = N->getSimpleValueType(0);
60  unsigned Opc =
62  SDLoc DL(N);
63  SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
64  Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
65  N->getOperand(0), VL);
66  break;
67  }
69  // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70  // load. Done after lowering and combining so that we have a chance to
71  // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72  assert(N->getNumOperands() == 4 && "Unexpected number of operands");
73  MVT VT = N->getSimpleValueType(0);
74  SDValue Passthru = N->getOperand(0);
75  SDValue Lo = N->getOperand(1);
76  SDValue Hi = N->getOperand(2);
77  SDValue VL = N->getOperand(3);
79  Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
80  "Unexpected VTs!");
84  SDLoc DL(N);
85 
86  // We use the same frame index we use for moving two i32s into 64-bit FPR.
87  // This is an analogous operation.
88  int FI = FuncInfo->getMoveF64FrameIndex(MF);
91  SDValue StackSlot =
93 
94  SDValue Chain = CurDAG->getEntryNode();
95  Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
96 
97  SDValue OffsetSlot =
99  Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
100  Align(8));
101 
102  Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
103 
104  SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
105  SDValue IntID =
106  CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
107  SDValue Ops[] = {Chain,
108  IntID,
109  Passthru,
110  StackSlot,
111  CurDAG->getRegister(RISCV::X0, MVT::i64),
112  VL};
113 
115  MVT::i64, MPI, Align(8),
117  break;
118  }
119  }
120 
121  if (Result) {
122  LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld: ");
123  LLVM_DEBUG(N->dump(CurDAG));
124  LLVM_DEBUG(dbgs() << "\nNew: ");
125  LLVM_DEBUG(Result->dump(CurDAG));
126  LLVM_DEBUG(dbgs() << "\n");
127 
129  MadeChange = true;
130  }
131  }
132 
133  if (MadeChange)
135 }
136 
140 
141  bool MadeChange = false;
142  while (Position != CurDAG->allnodes_begin()) {
143  SDNode *N = &*--Position;
144  // Skip dead nodes and any non-machine opcodes.
145  if (N->use_empty() || !N->isMachineOpcode())
146  continue;
147 
148  MadeChange |= doPeepholeSExtW(N);
149  MadeChange |= doPeepholeMaskedRVV(N);
150  }
151 
152  CurDAG->setRoot(Dummy.getValue());
153 
154  if (MadeChange)
156 }
157 
158 static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
159  RISCVMatInt::InstSeq &Seq) {
160  SDNode *Result = nullptr;
161  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
162  for (RISCVMatInt::Inst &Inst : Seq) {
163  SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, VT);
164  switch (Inst.getOpndKind()) {
165  case RISCVMatInt::Imm:
166  Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SDImm);
167  break;
168  case RISCVMatInt::RegX0:
169  Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg,
170  CurDAG->getRegister(RISCV::X0, VT));
171  break;
172  case RISCVMatInt::RegReg:
173  Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SrcReg);
174  break;
175  case RISCVMatInt::RegImm:
176  Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SDImm);
177  break;
178  }
179 
180  // Only the first instruction has X0 as its source.
181  SrcReg = SDValue(Result, 0);
182  }
183 
184  return Result;
185 }
186 
187 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
188  int64_t Imm, const RISCVSubtarget &Subtarget) {
190  RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
191 
192  return selectImmSeq(CurDAG, DL, VT, Seq);
193 }
194 
196  unsigned NF, RISCVII::VLMUL LMUL) {
197  static const unsigned M1TupleRegClassIDs[] = {
198  RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
199  RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
200  RISCV::VRN8M1RegClassID};
201  static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
202  RISCV::VRN3M2RegClassID,
203  RISCV::VRN4M2RegClassID};
204 
205  assert(Regs.size() >= 2 && Regs.size() <= 8);
206 
207  unsigned RegClassID;
208  unsigned SubReg0;
209  switch (LMUL) {
210  default:
211  llvm_unreachable("Invalid LMUL.");
216  static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
217  "Unexpected subreg numbering");
218  SubReg0 = RISCV::sub_vrm1_0;
219  RegClassID = M1TupleRegClassIDs[NF - 2];
220  break;
222  static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
223  "Unexpected subreg numbering");
224  SubReg0 = RISCV::sub_vrm2_0;
225  RegClassID = M2TupleRegClassIDs[NF - 2];
226  break;
228  static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
229  "Unexpected subreg numbering");
230  SubReg0 = RISCV::sub_vrm4_0;
231  RegClassID = RISCV::VRN2M4RegClassID;
232  break;
233  }
234 
235  SDLoc DL(Regs[0]);
237 
238  Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
239 
240  for (unsigned I = 0; I < Regs.size(); ++I) {
241  Ops.push_back(Regs[I]);
242  Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
243  }
244  SDNode *N =
245  CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
246  return SDValue(N, 0);
247 }
248 
250  SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
251  bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
252  bool IsLoad, MVT *IndexVT) {
253  SDValue Chain = Node->getOperand(0);
254  SDValue Glue;
255 
256  Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
257 
258  if (IsStridedOrIndexed) {
259  Operands.push_back(Node->getOperand(CurOp++)); // Index.
260  if (IndexVT)
261  *IndexVT = Operands.back()->getSimpleValueType(0);
262  }
263 
264  if (IsMasked) {
265  // Mask needs to be copied to V0.
266  SDValue Mask = Node->getOperand(CurOp++);
267  Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
268  Glue = Chain.getValue(1);
269  Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
270  }
271  SDValue VL;
272  selectVLOp(Node->getOperand(CurOp++), VL);
273  Operands.push_back(VL);
274 
275  MVT XLenVT = Subtarget->getXLenVT();
276  SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
277  Operands.push_back(SEWOp);
278 
279  // Masked load has the tail policy argument.
280  if (IsMasked && IsLoad) {
281  // Policy must be a constant.
282  uint64_t Policy = Node->getConstantOperandVal(CurOp++);
283  SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
284  Operands.push_back(PolicyOp);
285  }
286 
287  Operands.push_back(Chain); // Chain.
288  if (Glue)
289  Operands.push_back(Glue);
290 }
291 
292 static bool isAllUndef(ArrayRef<SDValue> Values) {
293  return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
294 }
295 
296 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
297  bool IsStrided) {
298  SDLoc DL(Node);
299  unsigned NF = Node->getNumValues() - 1;
300  MVT VT = Node->getSimpleValueType(0);
301  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
303 
304  unsigned CurOp = 2;
306 
307  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
308  Node->op_begin() + CurOp + NF);
309  bool IsTU = IsMasked || !isAllUndef(Regs);
310  if (IsTU) {
311  SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
312  Operands.push_back(Merge);
313  }
314  CurOp += NF;
315 
316  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
317  Operands, /*IsLoad=*/true);
318 
319  const RISCV::VLSEGPseudo *P =
320  RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
321  static_cast<unsigned>(LMUL));
324 
325  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
326  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
327 
328  SDValue SuperReg = SDValue(Load, 0);
329  for (unsigned I = 0; I < NF; ++I) {
330  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
331  ReplaceUses(SDValue(Node, I),
332  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
333  }
334 
335  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
336  CurDAG->RemoveDeadNode(Node);
337 }
338 
339 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
340  SDLoc DL(Node);
341  unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
342  MVT VT = Node->getSimpleValueType(0);
343  MVT XLenVT = Subtarget->getXLenVT();
344  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
346 
347  unsigned CurOp = 2;
349 
350  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
351  Node->op_begin() + CurOp + NF);
352  bool IsTU = IsMasked || !isAllUndef(Regs);
353  if (IsTU) {
354  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
355  Operands.push_back(MaskedOff);
356  }
357  CurOp += NF;
358 
359  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
360  /*IsStridedOrIndexed*/ false, Operands,
361  /*IsLoad=*/true);
362 
363  const RISCV::VLSEGPseudo *P =
364  RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
365  Log2SEW, static_cast<unsigned>(LMUL));
367  XLenVT, MVT::Other, Operands);
368 
369  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
370  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
371 
372  SDValue SuperReg = SDValue(Load, 0);
373  for (unsigned I = 0; I < NF; ++I) {
374  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
375  ReplaceUses(SDValue(Node, I),
376  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
377  }
378 
379  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL
380  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
381  CurDAG->RemoveDeadNode(Node);
382 }
383 
384 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
385  bool IsOrdered) {
386  SDLoc DL(Node);
387  unsigned NF = Node->getNumValues() - 1;
388  MVT VT = Node->getSimpleValueType(0);
389  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
391 
392  unsigned CurOp = 2;
394 
395  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
396  Node->op_begin() + CurOp + NF);
397  bool IsTU = IsMasked || !isAllUndef(Regs);
398  if (IsTU) {
399  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
400  Operands.push_back(MaskedOff);
401  }
402  CurOp += NF;
403 
404  MVT IndexVT;
405  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
406  /*IsStridedOrIndexed*/ true, Operands,
407  /*IsLoad=*/true, &IndexVT);
408 
410  "Element count mismatch");
411 
412  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
413  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
414  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
415  report_fatal_error("The V extension does not support EEW=64 for index "
416  "values when XLEN=32");
417  }
418  const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
419  NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
420  static_cast<unsigned>(IndexLMUL));
423 
424  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
425  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
426 
427  SDValue SuperReg = SDValue(Load, 0);
428  for (unsigned I = 0; I < NF; ++I) {
429  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
430  ReplaceUses(SDValue(Node, I),
431  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
432  }
433 
434  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
435  CurDAG->RemoveDeadNode(Node);
436 }
437 
438 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
439  bool IsStrided) {
440  SDLoc DL(Node);
441  unsigned NF = Node->getNumOperands() - 4;
442  if (IsStrided)
443  NF--;
444  if (IsMasked)
445  NF--;
446  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
447  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
449  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
450  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
451 
453  Operands.push_back(StoreVal);
454  unsigned CurOp = 2 + NF;
455 
456  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
457  Operands);
458 
459  const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
460  NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
462  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
463 
464  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
465  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
466 
467  ReplaceNode(Node, Store);
468 }
469 
470 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
471  bool IsOrdered) {
472  SDLoc DL(Node);
473  unsigned NF = Node->getNumOperands() - 5;
474  if (IsMasked)
475  --NF;
476  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
477  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
479  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
480  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
481 
483  Operands.push_back(StoreVal);
484  unsigned CurOp = 2 + NF;
485 
486  MVT IndexVT;
487  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
488  /*IsStridedOrIndexed*/ true, Operands,
489  /*IsLoad=*/false, &IndexVT);
490 
492  "Element count mismatch");
493 
494  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
495  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
496  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
497  report_fatal_error("The V extension does not support EEW=64 for index "
498  "values when XLEN=32");
499  }
500  const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
501  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
502  static_cast<unsigned>(IndexLMUL));
504  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
505 
506  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
507  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
508 
509  ReplaceNode(Node, Store);
510 }
511 
513  if (!Subtarget->hasVInstructions())
514  return;
515 
516  assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
517  Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
518  "Unexpected opcode");
519 
520  SDLoc DL(Node);
521  MVT XLenVT = Subtarget->getXLenVT();
522 
523  bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
524  unsigned IntNoOffset = HasChain ? 1 : 0;
525  unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
526 
527  assert((IntNo == Intrinsic::riscv_vsetvli ||
528  IntNo == Intrinsic::riscv_vsetvlimax ||
529  IntNo == Intrinsic::riscv_vsetvli_opt ||
530  IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
531  "Unexpected vsetvli intrinsic");
532 
533  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
534  IntNo == Intrinsic::riscv_vsetvlimax_opt;
535  unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
536 
537  assert(Node->getNumOperands() == Offset + 2 &&
538  "Unexpected number of operands");
539 
540  unsigned SEW =
541  RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
542  RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
543  Node->getConstantOperandVal(Offset + 1) & 0x7);
544 
545  unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
546  /*MaskAgnostic*/ false);
547  SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
548 
549  SmallVector<EVT, 2> VTs = {XLenVT};
550  if (HasChain)
551  VTs.push_back(MVT::Other);
552 
553  SDValue VLOperand;
554  unsigned Opcode = RISCV::PseudoVSETVLI;
555  if (VLMax) {
556  VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
557  Opcode = RISCV::PseudoVSETVLIX0;
558  } else {
559  VLOperand = Node->getOperand(IntNoOffset + 1);
560 
561  if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
562  uint64_t AVL = C->getZExtValue();
563  if (isUInt<5>(AVL)) {
564  SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
565  SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
566  if (HasChain)
567  Ops.push_back(Node->getOperand(0));
568  ReplaceNode(
569  Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
570  return;
571  }
572  }
573  }
574 
575  SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
576  if (HasChain)
577  Ops.push_back(Node->getOperand(0));
578 
579  ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
580 }
581 
583  // If we have a custom node, we have already selected.
584  if (Node->isMachineOpcode()) {
585  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
586  Node->setNodeId(-1);
587  return;
588  }
589 
590  // Instruction Selection not handled by the auto-generated tablegen selection
591  // should be handled here.
592  unsigned Opcode = Node->getOpcode();
593  MVT XLenVT = Subtarget->getXLenVT();
594  SDLoc DL(Node);
595  MVT VT = Node->getSimpleValueType(0);
596 
597  switch (Opcode) {
598  case ISD::Constant: {
599  auto *ConstNode = cast<ConstantSDNode>(Node);
600  if (VT == XLenVT && ConstNode->isZero()) {
601  SDValue New =
602  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
603  ReplaceNode(Node, New.getNode());
604  return;
605  }
606  int64_t Imm = ConstNode->getSExtValue();
607  // If the upper XLen-16 bits are not used, try to convert this to a simm12
608  // by sign extending bit 15.
609  if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
610  hasAllHUsers(Node))
611  Imm = SignExtend64<16>(Imm);
612  // If the upper 32-bits are not used try to convert this into a simm32 by
613  // sign extending bit 32.
614  if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
615  Imm = SignExtend64<32>(Imm);
616 
617  ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
618  return;
619  }
620  case ISD::SHL: {
621  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
622  if (!N1C)
623  break;
624  SDValue N0 = Node->getOperand(0);
625  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
626  !isa<ConstantSDNode>(N0.getOperand(1)))
627  break;
628  unsigned ShAmt = N1C->getZExtValue();
630 
631  // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
632  // 32 leading zeros and C3 trailing zeros.
633  if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
634  unsigned XLen = Subtarget->getXLen();
635  unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
636  unsigned TrailingZeros = countTrailingZeros(Mask);
637  if (TrailingZeros > 0 && LeadingZeros == 32) {
638  SDNode *SRLIW = CurDAG->getMachineNode(
639  RISCV::SRLIW, DL, VT, N0->getOperand(0),
640  CurDAG->getTargetConstant(TrailingZeros, DL, VT));
641  SDNode *SLLI = CurDAG->getMachineNode(
642  RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
643  CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
644  ReplaceNode(Node, SLLI);
645  return;
646  }
647  }
648  break;
649  }
650  case ISD::SRL: {
651  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
652  if (!N1C)
653  break;
654  SDValue N0 = Node->getOperand(0);
655  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
656  !isa<ConstantSDNode>(N0.getOperand(1)))
657  break;
658  unsigned ShAmt = N1C->getZExtValue();
660 
661  // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
662  // 32 leading zeros and C3 trailing zeros.
663  if (isShiftedMask_64(Mask)) {
664  unsigned XLen = Subtarget->getXLen();
665  unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
666  unsigned TrailingZeros = countTrailingZeros(Mask);
667  if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
668  SDNode *SRLIW = CurDAG->getMachineNode(
669  RISCV::SRLIW, DL, VT, N0->getOperand(0),
670  CurDAG->getTargetConstant(TrailingZeros, DL, VT));
671  SDNode *SLLI = CurDAG->getMachineNode(
672  RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
673  CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
674  ReplaceNode(Node, SLLI);
675  return;
676  }
677  }
678 
679  // Optimize (srl (and X, C2), C) ->
680  // (srli (slli X, (XLen-C3), (XLen-C3) + C)
681  // Where C2 is a mask with C3 trailing ones.
682  // Taking into account that the C2 may have had lower bits unset by
683  // SimplifyDemandedBits. This avoids materializing the C2 immediate.
684  // This pattern occurs when type legalizing right shifts for types with
685  // less than XLen bits.
686  Mask |= maskTrailingOnes<uint64_t>(ShAmt);
687  if (!isMask_64(Mask))
688  break;
689  unsigned TrailingOnes = countTrailingOnes(Mask);
690  // 32 trailing ones should use srliw via tablegen pattern.
691  if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
692  break;
693  unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
694  SDNode *SLLI =
695  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
696  CurDAG->getTargetConstant(LShAmt, DL, VT));
697  SDNode *SRLI = CurDAG->getMachineNode(
698  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
699  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
700  ReplaceNode(Node, SRLI);
701  return;
702  }
703  case ISD::SRA: {
704  // Optimize (sra (sext_inreg X, i16), C) ->
705  // (srai (slli X, (XLen-16), (XLen-16) + C)
706  // And (sra (sext_inreg X, i8), C) ->
707  // (srai (slli X, (XLen-8), (XLen-8) + C)
708  // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
709  // This transform matches the code we get without Zbb. The shifts are more
710  // compressible, and this can help expose CSE opportunities in the sdiv by
711  // constant optimization.
712  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
713  if (!N1C)
714  break;
715  SDValue N0 = Node->getOperand(0);
716  if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
717  break;
718  unsigned ShAmt = N1C->getZExtValue();
719  unsigned ExtSize =
720  cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
721  // ExtSize of 32 should use sraiw via tablegen pattern.
722  if (ExtSize >= 32 || ShAmt >= ExtSize)
723  break;
724  unsigned LShAmt = Subtarget->getXLen() - ExtSize;
725  SDNode *SLLI =
726  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
727  CurDAG->getTargetConstant(LShAmt, DL, VT));
728  SDNode *SRAI = CurDAG->getMachineNode(
729  RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
730  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
731  ReplaceNode(Node, SRAI);
732  return;
733  }
734  case ISD::AND: {
735  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
736  if (!N1C)
737  break;
738 
739  SDValue N0 = Node->getOperand(0);
740 
741  bool LeftShift = N0.getOpcode() == ISD::SHL;
742  if (!LeftShift && N0.getOpcode() != ISD::SRL)
743  break;
744 
745  auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
746  if (!C)
747  break;
748  unsigned C2 = C->getZExtValue();
749  unsigned XLen = Subtarget->getXLen();
750  assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
751 
752  uint64_t C1 = N1C->getZExtValue();
753 
754  // Keep track of whether this is a c.andi. If we can't use c.andi, the
755  // shift pair might offer more compression opportunities.
756  // TODO: We could check for C extension here, but we don't have many lit
757  // tests with the C extension enabled so not checking gets better coverage.
758  // TODO: What if ANDI faster than shift?
759  bool IsCANDI = isInt<6>(N1C->getSExtValue());
760 
761  // Clear irrelevant bits in the mask.
762  if (LeftShift)
763  C1 &= maskTrailingZeros<uint64_t>(C2);
764  else
765  C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
766 
767  // Some transforms should only be done if the shift has a single use or
768  // the AND would become (srli (slli X, 32), 32)
769  bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
770 
771  SDValue X = N0.getOperand(0);
772 
773  // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
774  // with c3 leading zeros.
775  if (!LeftShift && isMask_64(C1)) {
776  unsigned Leading = XLen - (64 - countLeadingZeros(C1));
777  if (C2 < Leading) {
778  // If the number of leading zeros is C2+32 this can be SRLIW.
779  if (C2 + 32 == Leading) {
780  SDNode *SRLIW = CurDAG->getMachineNode(
781  RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
782  ReplaceNode(Node, SRLIW);
783  return;
784  }
785 
786  // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
787  // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
788  //
789  // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
790  // legalized and goes through DAG combine.
791  if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
792  X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
793  cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
794  SDNode *SRAIW =
795  CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
796  CurDAG->getTargetConstant(31, DL, VT));
797  SDNode *SRLIW = CurDAG->getMachineNode(
798  RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
799  CurDAG->getTargetConstant(Leading - 32, DL, VT));
800  ReplaceNode(Node, SRLIW);
801  return;
802  }
803 
804  // (srli (slli x, c3-c2), c3).
805  // Skip if we could use (zext.w (sraiw X, C2)).
806  bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
807  X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
808  cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
809  // Also Skip if we can use bexti.
810  Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
811  if (OneUseOrZExtW && !Skip) {
812  SDNode *SLLI = CurDAG->getMachineNode(
813  RISCV::SLLI, DL, VT, X,
814  CurDAG->getTargetConstant(Leading - C2, DL, VT));
815  SDNode *SRLI = CurDAG->getMachineNode(
816  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
817  CurDAG->getTargetConstant(Leading, DL, VT));
818  ReplaceNode(Node, SRLI);
819  return;
820  }
821  }
822  }
823 
824  // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
825  // shifted by c2 bits with c3 leading zeros.
826  if (LeftShift && isShiftedMask_64(C1)) {
827  unsigned Leading = XLen - (64 - countLeadingZeros(C1));
828 
829  if (C2 + Leading < XLen &&
830  C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
831  // Use slli.uw when possible.
832  if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
833  SDNode *SLLI_UW = CurDAG->getMachineNode(
834  RISCV::SLLI_UW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
835  ReplaceNode(Node, SLLI_UW);
836  return;
837  }
838 
839  // (srli (slli c2+c3), c3)
840  if (OneUseOrZExtW && !IsCANDI) {
841  SDNode *SLLI = CurDAG->getMachineNode(
842  RISCV::SLLI, DL, VT, X,
843  CurDAG->getTargetConstant(C2 + Leading, DL, VT));
844  SDNode *SRLI = CurDAG->getMachineNode(
845  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
846  CurDAG->getTargetConstant(Leading, DL, VT));
847  ReplaceNode(Node, SRLI);
848  return;
849  }
850  }
851  }
852 
853  // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
854  // shifted mask with c2 leading zeros and c3 trailing zeros.
855  if (!LeftShift && isShiftedMask_64(C1)) {
856  unsigned Leading = XLen - (64 - countLeadingZeros(C1));
857  unsigned Trailing = countTrailingZeros(C1);
858  if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW && !IsCANDI) {
859  unsigned SrliOpc = RISCV::SRLI;
860  // If the input is zexti32 we should use SRLIW.
861  if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
862  X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
863  SrliOpc = RISCV::SRLIW;
864  X = X.getOperand(0);
865  }
866  SDNode *SRLI = CurDAG->getMachineNode(
867  SrliOpc, DL, VT, X,
868  CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
869  SDNode *SLLI =
870  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
871  CurDAG->getTargetConstant(Trailing, DL, VT));
872  ReplaceNode(Node, SLLI);
873  return;
874  }
875  // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
876  if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
877  OneUseOrZExtW && !IsCANDI) {
878  SDNode *SRLIW = CurDAG->getMachineNode(
879  RISCV::SRLIW, DL, VT, X,
880  CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
881  SDNode *SLLI =
882  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
883  CurDAG->getTargetConstant(Trailing, DL, VT));
884  ReplaceNode(Node, SLLI);
885  return;
886  }
887  }
888 
889  // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
890  // shifted mask with no leading zeros and c3 trailing zeros.
891  if (LeftShift && isShiftedMask_64(C1)) {
892  unsigned Leading = XLen - (64 - countLeadingZeros(C1));
893  unsigned Trailing = countTrailingZeros(C1);
894  if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
895  SDNode *SRLI = CurDAG->getMachineNode(
896  RISCV::SRLI, DL, VT, X,
897  CurDAG->getTargetConstant(Trailing - C2, DL, VT));
898  SDNode *SLLI =
899  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
900  CurDAG->getTargetConstant(Trailing, DL, VT));
901  ReplaceNode(Node, SLLI);
902  return;
903  }
904  // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
905  if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
906  SDNode *SRLIW = CurDAG->getMachineNode(
907  RISCV::SRLIW, DL, VT, X,
908  CurDAG->getTargetConstant(Trailing - C2, DL, VT));
909  SDNode *SLLI =
910  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
911  CurDAG->getTargetConstant(Trailing, DL, VT));
912  ReplaceNode(Node, SLLI);
913  return;
914  }
915  }
916 
917  break;
918  }
919  case ISD::MUL: {
920  // Special case for calculating (mul (and X, C2), C1) where the full product
921  // fits in XLen bits. We can shift X left by the number of leading zeros in
922  // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
923  // product has XLen trailing zeros, putting it in the output of MULHU. This
924  // can avoid materializing a constant in a register for C2.
925 
926  // RHS should be a constant.
927  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
928  if (!N1C || !N1C->hasOneUse())
929  break;
930 
931  // LHS should be an AND with constant.
932  SDValue N0 = Node->getOperand(0);
933  if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
934  break;
935 
936  uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
937 
938  // Constant should be a mask.
939  if (!isMask_64(C2))
940  break;
941 
942  // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT
943  // has multiple users or the constant is a simm12. This prevents inserting
944  // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will
945  // likely make it more costly to materialize. Otherwise, using a SLLI
946  // might allow it to be compressed.
947  bool IsANDIOrZExt =
948  isInt<12>(C2) ||
949  (C2 == UINT64_C(0xFFFF) &&
950  (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
951  (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
952  if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
953  break;
954 
955  // We need to shift left the AND input and C1 by a total of XLen bits.
956 
957  // How far left do we need to shift the AND input?
958  unsigned XLen = Subtarget->getXLen();
959  unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
960 
961  // The constant gets shifted by the remaining amount unless that would
962  // shift bits out.
963  uint64_t C1 = N1C->getZExtValue();
964  unsigned ConstantShift = XLen - LeadingZeros;
965  if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
966  break;
967 
968  uint64_t ShiftedC1 = C1 << ConstantShift;
969  // If this RV32, we need to sign extend the constant.
970  if (XLen == 32)
971  ShiftedC1 = SignExtend64<32>(ShiftedC1);
972 
973  // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
974  SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
975  SDNode *SLLI =
976  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
977  CurDAG->getTargetConstant(LeadingZeros, DL, VT));
979  SDValue(SLLI, 0), SDValue(Imm, 0));
980  ReplaceNode(Node, MULHU);
981  return;
982  }
984  unsigned IntNo = Node->getConstantOperandVal(0);
985  switch (IntNo) {
986  // By default we do not custom select any intrinsic.
987  default:
988  break;
989  case Intrinsic::riscv_vmsgeu:
990  case Intrinsic::riscv_vmsge: {
991  SDValue Src1 = Node->getOperand(1);
992  SDValue Src2 = Node->getOperand(2);
993  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
994  bool IsCmpUnsignedZero = false;
995  // Only custom select scalar second operand.
996  if (Src2.getValueType() != XLenVT)
997  break;
998  // Small constants are handled with patterns.
999  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1000  int64_t CVal = C->getSExtValue();
1001  if (CVal >= -15 && CVal <= 16) {
1002  if (!IsUnsigned || CVal != 0)
1003  break;
1004  IsCmpUnsignedZero = true;
1005  }
1006  }
1007  MVT Src1VT = Src1.getSimpleValueType();
1008  unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1009  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1010  default:
1011  llvm_unreachable("Unexpected LMUL!");
1012 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1013  case RISCVII::VLMUL::lmulenum: \
1014  VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1015  : RISCV::PseudoVMSLT_VX_##suffix; \
1016  VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1017  VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1018  break;
1026 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1027  }
1029  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1030  SDValue VL;
1031  selectVLOp(Node->getOperand(3), VL);
1032 
1033  // If vmsgeu with 0 immediate, expand it to vmset.
1034  if (IsCmpUnsignedZero) {
1035  ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
1036  return;
1037  }
1038 
1039  // Expand to
1040  // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1041  SDValue Cmp = SDValue(
1042  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1043  0);
1044  ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1045  {Cmp, Cmp, VL, SEW}));
1046  return;
1047  }
1048  case Intrinsic::riscv_vmsgeu_mask:
1049  case Intrinsic::riscv_vmsge_mask: {
1050  SDValue Src1 = Node->getOperand(2);
1051  SDValue Src2 = Node->getOperand(3);
1052  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1053  bool IsCmpUnsignedZero = false;
1054  // Only custom select scalar second operand.
1055  if (Src2.getValueType() != XLenVT)
1056  break;
1057  // Small constants are handled with patterns.
1058  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1059  int64_t CVal = C->getSExtValue();
1060  if (CVal >= -15 && CVal <= 16) {
1061  if (!IsUnsigned || CVal != 0)
1062  break;
1063  IsCmpUnsignedZero = true;
1064  }
1065  }
1066  MVT Src1VT = Src1.getSimpleValueType();
1067  unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1068  VMOROpcode;
1069  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1070  default:
1071  llvm_unreachable("Unexpected LMUL!");
1072 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1073  case RISCVII::VLMUL::lmulenum: \
1074  VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1075  : RISCV::PseudoVMSLT_VX_##suffix; \
1076  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1077  : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1078  break;
1080  CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1081  CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1083  CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1084  CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1085  CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1086 #undef CASE_VMSLT_OPCODES
1087  }
1088  // Mask operations use the LMUL from the mask type.
1089  switch (RISCVTargetLowering::getLMUL(VT)) {
1090  default:
1091  llvm_unreachable("Unexpected LMUL!");
1092 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1093  case RISCVII::VLMUL::lmulenum: \
1094  VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1095  VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1096  VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1097  break;
1105 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1106  }
1108  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1109  SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1110  SDValue VL;
1111  selectVLOp(Node->getOperand(5), VL);
1112  SDValue MaskedOff = Node->getOperand(1);
1113  SDValue Mask = Node->getOperand(4);
1114 
1115  // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1116  if (IsCmpUnsignedZero) {
1117  // We don't need vmor if the MaskedOff and the Mask are the same
1118  // value.
1119  if (Mask == MaskedOff) {
1120  ReplaceUses(Node, Mask.getNode());
1121  return;
1122  }
1123  ReplaceNode(Node,
1124  CurDAG->getMachineNode(VMOROpcode, DL, VT,
1125  {Mask, MaskedOff, VL, MaskSEW}));
1126  return;
1127  }
1128 
1129  // If the MaskedOff value and the Mask are the same value use
1130  // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
1131  // This avoids needing to copy v0 to vd before starting the next sequence.
1132  if (Mask == MaskedOff) {
1133  SDValue Cmp = SDValue(
1134  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1135  0);
1136  ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1137  {Mask, Cmp, VL, MaskSEW}));
1138  return;
1139  }
1140 
1141  // Mask needs to be copied to V0.
1143  RISCV::V0, Mask, SDValue());
1144  SDValue Glue = Chain.getValue(1);
1145  SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1146 
1147  // Otherwise use
1148  // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1149  // The result is mask undisturbed.
1150  // We use the same instructions to emulate mask agnostic behavior, because
1151  // the agnostic result can be either undisturbed or all 1.
1152  SDValue Cmp = SDValue(
1153  CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1154  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1155  0);
1156  // vmxor.mm vd, vd, v0 is used to update active value.
1157  ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1158  {Cmp, Mask, VL, MaskSEW}));
1159  return;
1160  }
1161  case Intrinsic::riscv_vsetvli_opt:
1162  case Intrinsic::riscv_vsetvlimax_opt:
1163  return selectVSETVLI(Node);
1164  }
1165  break;
1166  }
1167  case ISD::INTRINSIC_W_CHAIN: {
1168  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1169  switch (IntNo) {
1170  // By default we do not custom select any intrinsic.
1171  default:
1172  break;
1173  case Intrinsic::riscv_vsetvli:
1174  case Intrinsic::riscv_vsetvlimax:
1175  return selectVSETVLI(Node);
1176  case Intrinsic::riscv_vlseg2:
1177  case Intrinsic::riscv_vlseg3:
1178  case Intrinsic::riscv_vlseg4:
1179  case Intrinsic::riscv_vlseg5:
1180  case Intrinsic::riscv_vlseg6:
1181  case Intrinsic::riscv_vlseg7:
1182  case Intrinsic::riscv_vlseg8: {
1183  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1184  return;
1185  }
1186  case Intrinsic::riscv_vlseg2_mask:
1187  case Intrinsic::riscv_vlseg3_mask:
1188  case Intrinsic::riscv_vlseg4_mask:
1189  case Intrinsic::riscv_vlseg5_mask:
1190  case Intrinsic::riscv_vlseg6_mask:
1191  case Intrinsic::riscv_vlseg7_mask:
1192  case Intrinsic::riscv_vlseg8_mask: {
1193  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1194  return;
1195  }
1196  case Intrinsic::riscv_vlsseg2:
1197  case Intrinsic::riscv_vlsseg3:
1198  case Intrinsic::riscv_vlsseg4:
1199  case Intrinsic::riscv_vlsseg5:
1200  case Intrinsic::riscv_vlsseg6:
1201  case Intrinsic::riscv_vlsseg7:
1202  case Intrinsic::riscv_vlsseg8: {
1203  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1204  return;
1205  }
1206  case Intrinsic::riscv_vlsseg2_mask:
1207  case Intrinsic::riscv_vlsseg3_mask:
1208  case Intrinsic::riscv_vlsseg4_mask:
1209  case Intrinsic::riscv_vlsseg5_mask:
1210  case Intrinsic::riscv_vlsseg6_mask:
1211  case Intrinsic::riscv_vlsseg7_mask:
1212  case Intrinsic::riscv_vlsseg8_mask: {
1213  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1214  return;
1215  }
1216  case Intrinsic::riscv_vloxseg2:
1217  case Intrinsic::riscv_vloxseg3:
1218  case Intrinsic::riscv_vloxseg4:
1219  case Intrinsic::riscv_vloxseg5:
1220  case Intrinsic::riscv_vloxseg6:
1221  case Intrinsic::riscv_vloxseg7:
1222  case Intrinsic::riscv_vloxseg8:
1223  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1224  return;
1225  case Intrinsic::riscv_vluxseg2:
1226  case Intrinsic::riscv_vluxseg3:
1227  case Intrinsic::riscv_vluxseg4:
1228  case Intrinsic::riscv_vluxseg5:
1229  case Intrinsic::riscv_vluxseg6:
1230  case Intrinsic::riscv_vluxseg7:
1231  case Intrinsic::riscv_vluxseg8:
1232  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1233  return;
1234  case Intrinsic::riscv_vloxseg2_mask:
1235  case Intrinsic::riscv_vloxseg3_mask:
1236  case Intrinsic::riscv_vloxseg4_mask:
1237  case Intrinsic::riscv_vloxseg5_mask:
1238  case Intrinsic::riscv_vloxseg6_mask:
1239  case Intrinsic::riscv_vloxseg7_mask:
1240  case Intrinsic::riscv_vloxseg8_mask:
1241  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1242  return;
1243  case Intrinsic::riscv_vluxseg2_mask:
1244  case Intrinsic::riscv_vluxseg3_mask:
1245  case Intrinsic::riscv_vluxseg4_mask:
1246  case Intrinsic::riscv_vluxseg5_mask:
1247  case Intrinsic::riscv_vluxseg6_mask:
1248  case Intrinsic::riscv_vluxseg7_mask:
1249  case Intrinsic::riscv_vluxseg8_mask:
1250  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1251  return;
1252  case Intrinsic::riscv_vlseg8ff:
1253  case Intrinsic::riscv_vlseg7ff:
1254  case Intrinsic::riscv_vlseg6ff:
1255  case Intrinsic::riscv_vlseg5ff:
1256  case Intrinsic::riscv_vlseg4ff:
1257  case Intrinsic::riscv_vlseg3ff:
1258  case Intrinsic::riscv_vlseg2ff: {
1259  selectVLSEGFF(Node, /*IsMasked*/ false);
1260  return;
1261  }
1262  case Intrinsic::riscv_vlseg8ff_mask:
1263  case Intrinsic::riscv_vlseg7ff_mask:
1264  case Intrinsic::riscv_vlseg6ff_mask:
1265  case Intrinsic::riscv_vlseg5ff_mask:
1266  case Intrinsic::riscv_vlseg4ff_mask:
1267  case Intrinsic::riscv_vlseg3ff_mask:
1268  case Intrinsic::riscv_vlseg2ff_mask: {
1269  selectVLSEGFF(Node, /*IsMasked*/ true);
1270  return;
1271  }
1272  case Intrinsic::riscv_vloxei:
1273  case Intrinsic::riscv_vloxei_mask:
1274  case Intrinsic::riscv_vluxei:
1275  case Intrinsic::riscv_vluxei_mask: {
1276  bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1277  IntNo == Intrinsic::riscv_vluxei_mask;
1278  bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1279  IntNo == Intrinsic::riscv_vloxei_mask;
1280 
1281  MVT VT = Node->getSimpleValueType(0);
1282  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1283 
1284  unsigned CurOp = 2;
1285  // Masked intrinsic only have TU version pseduo instructions.
1286  bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1288  if (IsTU)
1289  Operands.push_back(Node->getOperand(CurOp++));
1290  else
1291  // Skip the undef passthru operand for nomask TA version pseudo
1292  CurOp++;
1293 
1294  MVT IndexVT;
1295  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1296  /*IsStridedOrIndexed*/ true, Operands,
1297  /*IsLoad=*/true, &IndexVT);
1298 
1300  "Element count mismatch");
1301 
1303  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1304  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1305  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1306  report_fatal_error("The V extension does not support EEW=64 for index "
1307  "values when XLEN=32");
1308  }
1309  const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1310  IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1311  static_cast<unsigned>(IndexLMUL));
1312  MachineSDNode *Load =
1313  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1314 
1315  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1316  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1317 
1318  ReplaceNode(Node, Load);
1319  return;
1320  }
1321  case Intrinsic::riscv_vlm:
1322  case Intrinsic::riscv_vle:
1323  case Intrinsic::riscv_vle_mask:
1324  case Intrinsic::riscv_vlse:
1325  case Intrinsic::riscv_vlse_mask: {
1326  bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1327  IntNo == Intrinsic::riscv_vlse_mask;
1328  bool IsStrided =
1329  IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1330 
1331  MVT VT = Node->getSimpleValueType(0);
1332  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1333 
1334  unsigned CurOp = 2;
1335  // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1336  bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1337  // Masked intrinsic only have TU version pseduo instructions.
1338  bool IsTU = HasPassthruOperand &&
1339  (IsMasked || !Node->getOperand(CurOp).isUndef());
1341  if (IsTU)
1342  Operands.push_back(Node->getOperand(CurOp++));
1343  else if (HasPassthruOperand)
1344  // Skip the undef passthru operand for nomask TA version pseudo
1345  CurOp++;
1346 
1347  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1348  Operands, /*IsLoad=*/true);
1349 
1351  const RISCV::VLEPseudo *P =
1352  RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1353  static_cast<unsigned>(LMUL));
1354  MachineSDNode *Load =
1355  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1356 
1357  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1358  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1359 
1360  ReplaceNode(Node, Load);
1361  return;
1362  }
1363  case Intrinsic::riscv_vleff:
1364  case Intrinsic::riscv_vleff_mask: {
1365  bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1366 
1367  MVT VT = Node->getSimpleValueType(0);
1368  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1369 
1370  unsigned CurOp = 2;
1371  // Masked intrinsic only have TU version pseduo instructions.
1372  bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1374  if (IsTU)
1375  Operands.push_back(Node->getOperand(CurOp++));
1376  else
1377  // Skip the undef passthru operand for nomask TA version pseudo
1378  CurOp++;
1379 
1380  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1381  /*IsStridedOrIndexed*/ false, Operands,
1382  /*IsLoad=*/true);
1383 
1385  const RISCV::VLEPseudo *P =
1386  RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1387  Log2SEW, static_cast<unsigned>(LMUL));
1389  P->Pseudo, DL, Node->getVTList(), Operands);
1390  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1391  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1392 
1393  ReplaceNode(Node, Load);
1394  return;
1395  }
1396  }
1397  break;
1398  }
1399  case ISD::INTRINSIC_VOID: {
1400  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1401  switch (IntNo) {
1402  case Intrinsic::riscv_vsseg2:
1403  case Intrinsic::riscv_vsseg3:
1404  case Intrinsic::riscv_vsseg4:
1405  case Intrinsic::riscv_vsseg5:
1406  case Intrinsic::riscv_vsseg6:
1407  case Intrinsic::riscv_vsseg7:
1408  case Intrinsic::riscv_vsseg8: {
1409  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1410  return;
1411  }
1412  case Intrinsic::riscv_vsseg2_mask:
1413  case Intrinsic::riscv_vsseg3_mask:
1414  case Intrinsic::riscv_vsseg4_mask:
1415  case Intrinsic::riscv_vsseg5_mask:
1416  case Intrinsic::riscv_vsseg6_mask:
1417  case Intrinsic::riscv_vsseg7_mask:
1418  case Intrinsic::riscv_vsseg8_mask: {
1419  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1420  return;
1421  }
1422  case Intrinsic::riscv_vssseg2:
1423  case Intrinsic::riscv_vssseg3:
1424  case Intrinsic::riscv_vssseg4:
1425  case Intrinsic::riscv_vssseg5:
1426  case Intrinsic::riscv_vssseg6:
1427  case Intrinsic::riscv_vssseg7:
1428  case Intrinsic::riscv_vssseg8: {
1429  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1430  return;
1431  }
1432  case Intrinsic::riscv_vssseg2_mask:
1433  case Intrinsic::riscv_vssseg3_mask:
1434  case Intrinsic::riscv_vssseg4_mask:
1435  case Intrinsic::riscv_vssseg5_mask:
1436  case Intrinsic::riscv_vssseg6_mask:
1437  case Intrinsic::riscv_vssseg7_mask:
1438  case Intrinsic::riscv_vssseg8_mask: {
1439  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1440  return;
1441  }
1442  case Intrinsic::riscv_vsoxseg2:
1443  case Intrinsic::riscv_vsoxseg3:
1444  case Intrinsic::riscv_vsoxseg4:
1445  case Intrinsic::riscv_vsoxseg5:
1446  case Intrinsic::riscv_vsoxseg6:
1447  case Intrinsic::riscv_vsoxseg7:
1448  case Intrinsic::riscv_vsoxseg8:
1449  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1450  return;
1451  case Intrinsic::riscv_vsuxseg2:
1452  case Intrinsic::riscv_vsuxseg3:
1453  case Intrinsic::riscv_vsuxseg4:
1454  case Intrinsic::riscv_vsuxseg5:
1455  case Intrinsic::riscv_vsuxseg6:
1456  case Intrinsic::riscv_vsuxseg7:
1457  case Intrinsic::riscv_vsuxseg8:
1458  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1459  return;
1460  case Intrinsic::riscv_vsoxseg2_mask:
1461  case Intrinsic::riscv_vsoxseg3_mask:
1462  case Intrinsic::riscv_vsoxseg4_mask:
1463  case Intrinsic::riscv_vsoxseg5_mask:
1464  case Intrinsic::riscv_vsoxseg6_mask:
1465  case Intrinsic::riscv_vsoxseg7_mask:
1466  case Intrinsic::riscv_vsoxseg8_mask:
1467  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1468  return;
1469  case Intrinsic::riscv_vsuxseg2_mask:
1470  case Intrinsic::riscv_vsuxseg3_mask:
1471  case Intrinsic::riscv_vsuxseg4_mask:
1472  case Intrinsic::riscv_vsuxseg5_mask:
1473  case Intrinsic::riscv_vsuxseg6_mask:
1474  case Intrinsic::riscv_vsuxseg7_mask:
1475  case Intrinsic::riscv_vsuxseg8_mask:
1476  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1477  return;
1478  case Intrinsic::riscv_vsoxei:
1479  case Intrinsic::riscv_vsoxei_mask:
1480  case Intrinsic::riscv_vsuxei:
1481  case Intrinsic::riscv_vsuxei_mask: {
1482  bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1483  IntNo == Intrinsic::riscv_vsuxei_mask;
1484  bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1485  IntNo == Intrinsic::riscv_vsoxei_mask;
1486 
1487  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1488  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1489 
1490  unsigned CurOp = 2;
1492  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1493 
1494  MVT IndexVT;
1495  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1496  /*IsStridedOrIndexed*/ true, Operands,
1497  /*IsLoad=*/false, &IndexVT);
1498 
1500  "Element count mismatch");
1501 
1503  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1504  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1505  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1506  report_fatal_error("The V extension does not support EEW=64 for index "
1507  "values when XLEN=32");
1508  }
1509  const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1510  IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1511  static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1512  MachineSDNode *Store =
1513  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1514 
1515  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1516  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1517 
1518  ReplaceNode(Node, Store);
1519  return;
1520  }
1521  case Intrinsic::riscv_vsm:
1522  case Intrinsic::riscv_vse:
1523  case Intrinsic::riscv_vse_mask:
1524  case Intrinsic::riscv_vsse:
1525  case Intrinsic::riscv_vsse_mask: {
1526  bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1527  IntNo == Intrinsic::riscv_vsse_mask;
1528  bool IsStrided =
1529  IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1530 
1531  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1532  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1533 
1534  unsigned CurOp = 2;
1536  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1537 
1538  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1539  Operands);
1540 
1542  const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1543  IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1544  MachineSDNode *Store =
1545  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1546  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1547  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1548 
1549  ReplaceNode(Node, Store);
1550  return;
1551  }
1552  }
1553  break;
1554  }
1555  case ISD::BITCAST: {
1556  MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1557  // Just drop bitcasts between vectors if both are fixed or both are
1558  // scalable.
1559  if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1560  (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1561  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1562  CurDAG->RemoveDeadNode(Node);
1563  return;
1564  }
1565  break;
1566  }
1567  case ISD::INSERT_SUBVECTOR: {
1568  SDValue V = Node->getOperand(0);
1569  SDValue SubV = Node->getOperand(1);
1570  SDLoc DL(SubV);
1571  auto Idx = Node->getConstantOperandVal(2);
1572  MVT SubVecVT = SubV.getSimpleValueType();
1573 
1574  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1575  MVT SubVecContainerVT = SubVecVT;
1576  // Establish the correct scalable-vector types for any fixed-length type.
1577  if (SubVecVT.isFixedLengthVector())
1578  SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1579  if (VT.isFixedLengthVector())
1580  VT = TLI.getContainerForFixedLengthVector(VT);
1581 
1582  const auto *TRI = Subtarget->getRegisterInfo();
1583  unsigned SubRegIdx;
1584  std::tie(SubRegIdx, Idx) =
1586  VT, SubVecContainerVT, Idx, TRI);
1587 
1588  // If the Idx hasn't been completely eliminated then this is a subvector
1589  // insert which doesn't naturally align to a vector register. These must
1590  // be handled using instructions to manipulate the vector registers.
1591  if (Idx != 0)
1592  break;
1593 
1594  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1595  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1596  SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1597  SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1598  (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1599  assert((!IsSubVecPartReg || V.isUndef()) &&
1600  "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1601  "the subvector is smaller than a full-sized register");
1602 
1603  // If we haven't set a SubRegIdx, then we must be going between
1604  // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1605  if (SubRegIdx == RISCV::NoSubRegister) {
1606  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1608  InRegClassID &&
1609  "Unexpected subvector extraction");
1610  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1611  SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1612  DL, VT, SubV, RC);
1613  ReplaceNode(Node, NewNode);
1614  return;
1615  }
1616 
1617  SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1618  ReplaceNode(Node, Insert.getNode());
1619  return;
1620  }
1621  case ISD::EXTRACT_SUBVECTOR: {
1622  SDValue V = Node->getOperand(0);
1623  auto Idx = Node->getConstantOperandVal(1);
1624  MVT InVT = V.getSimpleValueType();
1625  SDLoc DL(V);
1626 
1627  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1628  MVT SubVecContainerVT = VT;
1629  // Establish the correct scalable-vector types for any fixed-length type.
1630  if (VT.isFixedLengthVector())
1631  SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1632  if (InVT.isFixedLengthVector())
1633  InVT = TLI.getContainerForFixedLengthVector(InVT);
1634 
1635  const auto *TRI = Subtarget->getRegisterInfo();
1636  unsigned SubRegIdx;
1637  std::tie(SubRegIdx, Idx) =
1639  InVT, SubVecContainerVT, Idx, TRI);
1640 
1641  // If the Idx hasn't been completely eliminated then this is a subvector
1642  // extract which doesn't naturally align to a vector register. These must
1643  // be handled using instructions to manipulate the vector registers.
1644  if (Idx != 0)
1645  break;
1646 
1647  // If we haven't set a SubRegIdx, then we must be going between
1648  // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1649  if (SubRegIdx == RISCV::NoSubRegister) {
1650  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1652  InRegClassID &&
1653  "Unexpected subvector extraction");
1654  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1655  SDNode *NewNode =
1656  CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1657  ReplaceNode(Node, NewNode);
1658  return;
1659  }
1660 
1661  SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1662  ReplaceNode(Node, Extract.getNode());
1663  return;
1664  }
1665  case ISD::SPLAT_VECTOR:
1666  case RISCVISD::VMV_S_X_VL:
1667  case RISCVISD::VFMV_S_F_VL:
1668  case RISCVISD::VMV_V_X_VL:
1669  case RISCVISD::VFMV_V_F_VL: {
1670  // Try to match splat of a scalar load to a strided load with stride of x0.
1671  bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1672  Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1673  bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1674  if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1675  break;
1676  SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1677  auto *Ld = dyn_cast<LoadSDNode>(Src);
1678  if (!Ld)
1679  break;
1680  EVT MemVT = Ld->getMemoryVT();
1681  // The memory VT should be the same size as the element type.
1682  if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1683  break;
1684  if (!IsProfitableToFold(Src, Node, Node) ||
1685  !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1686  break;
1687 
1688  SDValue VL;
1689  if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1691  else if (IsScalarMove) {
1692  // We could deal with more VL if we update the VSETVLI insert pass to
1693  // avoid introducing more VSETVLI.
1694  if (!isOneConstant(Node->getOperand(2)))
1695  break;
1696  selectVLOp(Node->getOperand(2), VL);
1697  } else
1698  selectVLOp(Node->getOperand(2), VL);
1699 
1700  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1701  SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1702 
1703  SDValue Operands[] = {Ld->getBasePtr(),
1704  CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1705  Ld->getChain()};
1706 
1708  const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1709  /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1710  Log2SEW, static_cast<unsigned>(LMUL));
1711  MachineSDNode *Load =
1712  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1713 
1714  CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1715 
1716  ReplaceNode(Node, Load);
1717  return;
1718  }
1719  }
1720 
1721  // Select the default instruction.
1722  SelectCode(Node);
1723 }
1724 
1726  const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1727  switch (ConstraintID) {
1729  // We just support simple memory operands that have a single address
1730  // operand and need no special handling.
1731  OutOps.push_back(Op);
1732  return false;
1734  OutOps.push_back(Op);
1735  return false;
1736  default:
1737  break;
1738  }
1739 
1740  return true;
1741 }
1742 
1744  SDValue &Offset) {
1745  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1746  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1747  Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
1748  return true;
1749  }
1750 
1751  return false;
1752 }
1753 
1754 // Select a frame index and an optional immediate offset from an ADD or OR.
1756  SDValue &Offset) {
1757  if (SelectAddrFrameIndex(Addr, Base, Offset))
1758  return true;
1759 
1761  return false;
1762 
1763  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
1764  int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1765  if (isInt<12>(CVal)) {
1766  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1767  Subtarget->getXLenVT());
1768  Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1769  Subtarget->getXLenVT());
1770  return true;
1771  }
1772  }
1773 
1774  return false;
1775 }
1776 
1777 // Fold constant addresses.
1778 static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
1779  const MVT VT, const RISCVSubtarget *Subtarget,
1780  SDValue Addr, SDValue &Base, SDValue &Offset) {
1781  if (!isa<ConstantSDNode>(Addr))
1782  return false;
1783 
1784  int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
1785 
1786  // If the constant is a simm12, we can fold the whole constant and use X0 as
1787  // the base. If the constant can be materialized with LUI+simm12, use LUI as
1788  // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
1789  int64_t Lo12 = SignExtend64<12>(CVal);
1790  int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
1791  if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
1792  if (Hi) {
1793  int64_t Hi20 = (Hi >> 12) & 0xfffff;
1794  Base = SDValue(
1795  CurDAG->getMachineNode(RISCV::LUI, DL, VT,
1796  CurDAG->getTargetConstant(Hi20, DL, VT)),
1797  0);
1798  } else {
1799  Base = CurDAG->getRegister(RISCV::X0, VT);
1800  }
1801  Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1802  return true;
1803  }
1804 
1805  // Ask how constant materialization would handle this constant.
1806  RISCVMatInt::InstSeq Seq =
1807  RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
1808 
1809  // If the last instruction would be an ADDI, we can fold its immediate and
1810  // emit the rest of the sequence as the base.
1811  if (Seq.back().Opc != RISCV::ADDI)
1812  return false;
1813  Lo12 = Seq.back().Imm;
1814 
1815  // Drop the last instruction.
1816  Seq.pop_back();
1817  assert(!Seq.empty() && "Expected more instructions in sequence");
1818 
1819  Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
1820  Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1821  return true;
1822 }
1823 
1824 // Is this ADD instruction only used as the base pointer of scalar loads and
1825 // stores?
1826 static bool isWorthFoldingAdd(SDValue Add) {
1827  for (auto Use : Add->uses()) {
1828  if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
1829  Use->getOpcode() != ISD::ATOMIC_LOAD &&
1830  Use->getOpcode() != ISD::ATOMIC_STORE)
1831  return false;
1832  EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
1833  if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
1834  VT != MVT::f64)
1835  return false;
1836  // Don't allow stores of the value. It must be used as the address.
1837  if (Use->getOpcode() == ISD::STORE &&
1838  cast<StoreSDNode>(Use)->getValue() == Add)
1839  return false;
1840  if (Use->getOpcode() == ISD::ATOMIC_STORE &&
1841  cast<AtomicSDNode>(Use)->getVal() == Add)
1842  return false;
1843  }
1844 
1845  return true;
1846 }
1847 
1849  SDValue &Offset) {
1850  if (SelectAddrFrameIndex(Addr, Base, Offset))
1851  return true;
1852 
1853  SDLoc DL(Addr);
1854  MVT VT = Addr.getSimpleValueType();
1855 
1856  if (Addr.getOpcode() == RISCVISD::ADD_LO) {
1857  Base = Addr.getOperand(0);
1858  Offset = Addr.getOperand(1);
1859  return true;
1860  }
1861 
1863  int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1864  if (isInt<12>(CVal)) {
1865  Base = Addr.getOperand(0);
1866  if (Base.getOpcode() == RISCVISD::ADD_LO) {
1867  SDValue LoOperand = Base.getOperand(1);
1868  if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
1869  // If the Lo in (ADD_LO hi, lo) is a global variable's address
1870  // (its low part, really), then we can rely on the alignment of that
1871  // variable to provide a margin of safety before low part can overflow
1872  // the 12 bits of the load/store offset. Check if CVal falls within
1873  // that margin; if so (low part + CVal) can't overflow.
1874  const DataLayout &DL = CurDAG->getDataLayout();
1875  Align Alignment = commonAlignment(
1876  GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
1877  if (CVal == 0 || Alignment > CVal) {
1878  int64_t CombinedOffset = CVal + GA->getOffset();
1879  Base = Base.getOperand(0);
1880  Offset = CurDAG->getTargetGlobalAddress(
1881  GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
1882  CombinedOffset, GA->getTargetFlags());
1883  return true;
1884  }
1885  }
1886  }
1887 
1888  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
1889  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
1890  Offset = CurDAG->getTargetConstant(CVal, DL, VT);
1891  return true;
1892  }
1893  }
1894 
1895  // Handle ADD with large immediates.
1896  if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
1897  int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1898  assert(!isInt<12>(CVal) && "simm12 not already handled?");
1899 
1900  // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
1901  // an ADDI for part of the offset and fold the rest into the load/store.
1902  // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
1903  if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
1904  int64_t Adj = CVal < 0 ? -2048 : 2047;
1905  Base = SDValue(
1906  CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
1907  CurDAG->getTargetConstant(Adj, DL, VT)),
1908  0);
1909  Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
1910  return true;
1911  }
1912 
1913  // For larger immediates, we might be able to save one instruction from
1914  // constant materialization by folding the Lo12 bits of the immediate into
1915  // the address. We should only do this if the ADD is only used by loads and
1916  // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
1917  // separately with the full materialized immediate creating extra
1918  // instructions.
1919  if (isWorthFoldingAdd(Addr) &&
1920  selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
1921  Offset)) {
1922  // Insert an ADD instruction with the materialized Hi52 bits.
1923  Base = SDValue(
1924  CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
1925  0);
1926  return true;
1927  }
1928  }
1929 
1930  if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
1931  return true;
1932 
1933  Base = Addr;
1934  Offset = CurDAG->getTargetConstant(0, DL, VT);
1935  return true;
1936 }
1937 
1939  SDValue &ShAmt) {
1940  // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1941  // amount. If there is an AND on the shift amount, we can bypass it if it
1942  // doesn't affect any of those bits.
1943  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1944  const APInt &AndMask = N->getConstantOperandAPInt(1);
1945 
1946  // Since the max shift amount is a power of 2 we can subtract 1 to make a
1947  // mask that covers the bits needed to represent all shift amounts.
1948  assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1949  APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1950 
1951  if (ShMask.isSubsetOf(AndMask)) {
1952  ShAmt = N.getOperand(0);
1953  return true;
1954  }
1955 
1956  // SimplifyDemandedBits may have optimized the mask so try restoring any
1957  // bits that are known zero.
1958  KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1959  if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1960  ShAmt = N.getOperand(0);
1961  return true;
1962  }
1963  } else if (N.getOpcode() == ISD::SUB &&
1964  isa<ConstantSDNode>(N.getOperand(0))) {
1965  uint64_t Imm = N.getConstantOperandVal(0);
1966  // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1967  // generate a NEG instead of a SUB of a constant.
1968  if (Imm != 0 && Imm % ShiftWidth == 0) {
1969  SDLoc DL(N);
1970  EVT VT = N.getValueType();
1971  SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
1972  unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1973  MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1974  N.getOperand(1));
1975  ShAmt = SDValue(Neg, 0);
1976  return true;
1977  }
1978  }
1979 
1980  ShAmt = N;
1981  return true;
1982 }
1983 
1985  if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1986  cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1987  Val = N.getOperand(0);
1988  return true;
1989  }
1990  MVT VT = N.getSimpleValueType();
1991  if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1992  Val = N;
1993  return true;
1994  }
1995 
1996  return false;
1997 }
1998 
2000  if (N.getOpcode() == ISD::AND) {
2001  auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
2002  if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
2003  Val = N.getOperand(0);
2004  return true;
2005  }
2006  }
2007  MVT VT = N.getSimpleValueType();
2009  if (CurDAG->MaskedValueIsZero(N, Mask)) {
2010  Val = N;
2011  return true;
2012  }
2013 
2014  return false;
2015 }
2016 
2017 /// Look for various patterns that can be done with a SHL that can be folded
2018 /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
2019 /// SHXADD we are trying to match.
2021  SDValue &Val) {
2022  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2023  SDValue N0 = N.getOperand(0);
2024 
2025  bool LeftShift = N0.getOpcode() == ISD::SHL;
2026  if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
2027  isa<ConstantSDNode>(N0.getOperand(1))) {
2028  uint64_t Mask = N.getConstantOperandVal(1);
2029  unsigned C2 = N0.getConstantOperandVal(1);
2030 
2031  unsigned XLen = Subtarget->getXLen();
2032  if (LeftShift)
2033  Mask &= maskTrailingZeros<uint64_t>(C2);
2034  else
2035  Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2036 
2037  // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
2038  // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
2039  // followed by a SHXADD with c3 for the X amount.
2040  if (isShiftedMask_64(Mask)) {
2041  unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
2042  unsigned Trailing = countTrailingZeros(Mask);
2043  if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2044  SDLoc DL(N);
2045  EVT VT = N.getValueType();
2046  Val = SDValue(CurDAG->getMachineNode(
2047  RISCV::SRLI, DL, VT, N0.getOperand(0),
2048  CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
2049  0);
2050  return true;
2051  }
2052  // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
2053  // leading zeros and c3 trailing zeros. We can use an SRLI by C3
2054  // followed by a SHXADD using c3 for the X amount.
2055  if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2056  SDLoc DL(N);
2057  EVT VT = N.getValueType();
2058  Val = SDValue(
2060  RISCV::SRLI, DL, VT, N0.getOperand(0),
2061  CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
2062  0);
2063  return true;
2064  }
2065  }
2066  }
2067  }
2068 
2069  bool LeftShift = N.getOpcode() == ISD::SHL;
2070  if ((LeftShift || N.getOpcode() == ISD::SRL) &&
2071  isa<ConstantSDNode>(N.getOperand(1))) {
2072  SDValue N0 = N.getOperand(0);
2073  if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
2074  isa<ConstantSDNode>(N0.getOperand(1))) {
2076  if (isShiftedMask_64(Mask)) {
2077  unsigned C1 = N.getConstantOperandVal(1);
2078  unsigned XLen = Subtarget->getXLen();
2079  unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
2080  unsigned Trailing = countTrailingZeros(Mask);
2081  // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
2082  // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
2083  if (LeftShift && Leading == 32 && Trailing > 0 &&
2084  (Trailing + C1) == ShAmt) {
2085  SDLoc DL(N);
2086  EVT VT = N.getValueType();
2087  Val = SDValue(CurDAG->getMachineNode(
2088  RISCV::SRLIW, DL, VT, N0.getOperand(0),
2089  CurDAG->getTargetConstant(Trailing, DL, VT)),
2090  0);
2091  return true;
2092  }
2093  // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
2094  // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
2095  if (!LeftShift && Leading == 32 && Trailing > C1 &&
2096  (Trailing - C1) == ShAmt) {
2097  SDLoc DL(N);
2098  EVT VT = N.getValueType();
2099  Val = SDValue(CurDAG->getMachineNode(
2100  RISCV::SRLIW, DL, VT, N0.getOperand(0),
2101  CurDAG->getTargetConstant(Trailing, DL, VT)),
2102  0);
2103  return true;
2104  }
2105  }
2106  }
2107  }
2108 
2109  return false;
2110 }
2111 
2112 // Return true if all users of this SDNode* only consume the lower \p Bits.
2113 // This can be used to form W instructions for add/sub/mul/shl even when the
2114 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
2115 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
2116 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
2117 // the add/sub/mul/shl to become non-W instructions. By checking the users we
2118 // may be able to use a W instruction and CSE with the other instruction if
2119 // this has happened. We could try to detect that the CSE opportunity exists
2120 // before doing this, but that would be more complicated.
2121 // TODO: Does this need to look through AND/OR/XOR to their users to find more
2122 // opportunities.
2123 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
2124  assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
2125  Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
2126  Node->getOpcode() == ISD::SRL ||
2127  Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
2128  Node->getOpcode() == RISCVISD::GREV ||
2129  Node->getOpcode() == RISCVISD::GORC ||
2130  isa<ConstantSDNode>(Node)) &&
2131  "Unexpected opcode");
2132 
2133  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2134  SDNode *User = *UI;
2135  // Users of this node should have already been instruction selected
2136  if (!User->isMachineOpcode())
2137  return false;
2138 
2139  // TODO: Add more opcodes?
2140  switch (User->getMachineOpcode()) {
2141  default:
2142  return false;
2143  case RISCV::ADDW:
2144  case RISCV::ADDIW:
2145  case RISCV::SUBW:
2146  case RISCV::MULW:
2147  case RISCV::SLLW:
2148  case RISCV::SLLIW:
2149  case RISCV::SRAW:
2150  case RISCV::SRAIW:
2151  case RISCV::SRLW:
2152  case RISCV::SRLIW:
2153  case RISCV::DIVW:
2154  case RISCV::DIVUW:
2155  case RISCV::REMW:
2156  case RISCV::REMUW:
2157  case RISCV::ROLW:
2158  case RISCV::RORW:
2159  case RISCV::RORIW:
2160  case RISCV::CLZW:
2161  case RISCV::CTZW:
2162  case RISCV::CPOPW:
2163  case RISCV::SLLI_UW:
2164  case RISCV::FMV_W_X:
2165  case RISCV::FCVT_H_W:
2166  case RISCV::FCVT_H_WU:
2167  case RISCV::FCVT_S_W:
2168  case RISCV::FCVT_S_WU:
2169  case RISCV::FCVT_D_W:
2170  case RISCV::FCVT_D_WU:
2171  if (Bits < 32)
2172  return false;
2173  break;
2174  case RISCV::SLLI:
2175  // SLLI only uses the lower (XLen - ShAmt) bits.
2176  if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2177  return false;
2178  break;
2179  case RISCV::ANDI:
2180  if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
2181  return false;
2182  break;
2183  case RISCV::SEXT_B:
2184  if (Bits < 8)
2185  return false;
2186  break;
2187  case RISCV::SEXT_H:
2188  case RISCV::FMV_H_X:
2189  case RISCV::ZEXT_H_RV32:
2190  case RISCV::ZEXT_H_RV64:
2191  if (Bits < 16)
2192  return false;
2193  break;
2194  case RISCV::ADD_UW:
2195  case RISCV::SH1ADD_UW:
2196  case RISCV::SH2ADD_UW:
2197  case RISCV::SH3ADD_UW:
2198  // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2199  // 32 bits.
2200  if (UI.getOperandNo() != 0 || Bits < 32)
2201  return false;
2202  break;
2203  case RISCV::SB:
2204  if (UI.getOperandNo() != 0 || Bits < 8)
2205  return false;
2206  break;
2207  case RISCV::SH:
2208  if (UI.getOperandNo() != 0 || Bits < 16)
2209  return false;
2210  break;
2211  case RISCV::SW:
2212  if (UI.getOperandNo() != 0 || Bits < 32)
2213  return false;
2214  break;
2215  }
2216  }
2217 
2218  return true;
2219 }
2220 
2221 // Select VL as a 5 bit immediate or a value that will become a register. This
2222 // allows us to choose betwen VSETIVLI or VSETVLI later.
2224  auto *C = dyn_cast<ConstantSDNode>(N);
2225  if (C && isUInt<5>(C->getZExtValue())) {
2226  VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
2227  N->getValueType(0));
2228  } else if (C && C->isAllOnesValue()) {
2229  // Treat all ones as VLMax.
2231  N->getValueType(0));
2232  } else if (isa<RegisterSDNode>(N) &&
2233  cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
2234  // All our VL operands use an operand that allows GPRNoX0 or an immediate
2235  // as the register class. Convert X0 to a special immediate to pass the
2236  // MachineVerifier. This is recognized specially by the vsetvli insertion
2237  // pass.
2239  N->getValueType(0));
2240  } else {
2241  VL = N;
2242  }
2243 
2244  return true;
2245 }
2246 
2248  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
2249  return false;
2250  SplatVal = N.getOperand(1);
2251  return true;
2252 }
2253 
2254 using ValidateFn = bool (*)(int64_t);
2255 
2256 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2257  SelectionDAG &DAG,
2258  const RISCVSubtarget &Subtarget,
2259  ValidateFn ValidateImm) {
2260  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2261  !isa<ConstantSDNode>(N.getOperand(1)))
2262  return false;
2263 
2264  int64_t SplatImm =
2265  cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2266 
2267  // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2268  // type is wider than the resulting vector element type: an implicit
2269  // truncation first takes place. Therefore, perform a manual
2270  // truncation/sign-extension in order to ignore any truncated bits and catch
2271  // any zero-extended immediate.
2272  // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2273  // sign-extending to (XLenVT -1).
2274  MVT XLenVT = Subtarget.getXLenVT();
2275  assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2276  "Unexpected splat operand type");
2277  MVT EltVT = N.getSimpleValueType().getVectorElementType();
2278  if (EltVT.bitsLT(XLenVT))
2279  SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2280 
2281  if (!ValidateImm(SplatImm))
2282  return false;
2283 
2284  SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2285  return true;
2286 }
2287 
2289  return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2290  [](int64_t Imm) { return isInt<5>(Imm); });
2291 }
2292 
2294  return selectVSplatSimmHelper(
2295  N, SplatVal, *CurDAG, *Subtarget,
2296  [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2297 }
2298 
2300  SDValue &SplatVal) {
2301  return selectVSplatSimmHelper(
2302  N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2303  return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2304  });
2305 }
2306 
2308  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2309  !isa<ConstantSDNode>(N.getOperand(1)))
2310  return false;
2311 
2312  int64_t SplatImm =
2313  cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2314 
2315  if (!isUInt<5>(SplatImm))
2316  return false;
2317 
2318  SplatVal =
2319  CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2320 
2321  return true;
2322 }
2323 
2325  SDValue &Imm) {
2326  if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2327  int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2328 
2329  if (!isInt<5>(ImmVal))
2330  return false;
2331 
2332  Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2333  return true;
2334  }
2335 
2336  return false;
2337 }
2338 
2339 // Try to remove sext.w if the input is a W instruction or can be made into
2340 // a W instruction cheaply.
2341 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2342  // Look for the sext.w pattern, addiw rd, rs1, 0.
2343  if (N->getMachineOpcode() != RISCV::ADDIW ||
2344  !isNullConstant(N->getOperand(1)))
2345  return false;
2346 
2347  SDValue N0 = N->getOperand(0);
2348  if (!N0.isMachineOpcode())
2349  return false;
2350 
2351  switch (N0.getMachineOpcode()) {
2352  default:
2353  break;
2354  case RISCV::ADD:
2355  case RISCV::ADDI:
2356  case RISCV::SUB:
2357  case RISCV::MUL:
2358  case RISCV::SLLI: {
2359  // Convert sext.w+add/sub/mul to their W instructions. This will create
2360  // a new independent instruction. This improves latency.
2361  unsigned Opc;
2362  switch (N0.getMachineOpcode()) {
2363  default:
2364  llvm_unreachable("Unexpected opcode!");
2365  case RISCV::ADD: Opc = RISCV::ADDW; break;
2366  case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2367  case RISCV::SUB: Opc = RISCV::SUBW; break;
2368  case RISCV::MUL: Opc = RISCV::MULW; break;
2369  case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2370  }
2371 
2372  SDValue N00 = N0.getOperand(0);
2373  SDValue N01 = N0.getOperand(1);
2374 
2375  // Shift amount needs to be uimm5.
2376  if (N0.getMachineOpcode() == RISCV::SLLI &&
2377  !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2378  break;
2379 
2380  SDNode *Result =
2381  CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2382  N00, N01);
2383  ReplaceUses(N, Result);
2384  return true;
2385  }
2386  case RISCV::ADDW:
2387  case RISCV::ADDIW:
2388  case RISCV::SUBW:
2389  case RISCV::MULW:
2390  case RISCV::SLLIW:
2391  case RISCV::GREVIW:
2392  case RISCV::GORCIW:
2393  // Result is already sign extended just remove the sext.w.
2394  // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2395  ReplaceUses(N, N0.getNode());
2396  return true;
2397  }
2398 
2399  return false;
2400 }
2401 
2402 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2403 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2404 // take the form of a V0 physical register operand, with a glued
2405 // register-setting instruction.
2406 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2408  RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2409  if (!I)
2410  return false;
2411 
2412  unsigned MaskOpIdx = I->MaskOpIdx;
2413 
2414  // Check that we're using V0 as a mask register.
2415  if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2416  cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2417  return false;
2418 
2419  // The glued user defines V0.
2420  const auto *Glued = N->getGluedNode();
2421 
2422  if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2423  return false;
2424 
2425  // Check that we're defining V0 as a mask register.
2426  if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2427  cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2428  return false;
2429 
2430  // Check the instruction defining V0; it needs to be a VMSET pseudo.
2431  SDValue MaskSetter = Glued->getOperand(2);
2432 
2433  const auto IsVMSet = [](unsigned Opc) {
2434  return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2435  Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2436  Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2437  Opc == RISCV::PseudoVMSET_M_B8;
2438  };
2439 
2440  // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2441  // undefined behaviour if it's the wrong bitwidth, so we could choose to
2442  // assume that it's all-ones? Same applies to its VL.
2443  if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2444  return false;
2445 
2446  // Retrieve the tail policy operand index, if any.
2447  Optional<unsigned> TailPolicyOpIdx;
2448  const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
2449  const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
2450 
2451  bool IsTA = true;
2452  if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2453  // The last operand of the pseudo is the policy op, but we might have a
2454  // Glue operand last. We might also have a chain.
2455  TailPolicyOpIdx = N->getNumOperands() - 1;
2456  if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2457  (*TailPolicyOpIdx)--;
2458  if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2459  (*TailPolicyOpIdx)--;
2460 
2461  if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2463  // Keep the true-masked instruction when there is no unmasked TU
2464  // instruction
2465  if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2466  return false;
2467  // We can't use TA if the tie-operand is not IMPLICIT_DEF
2468  if (!N->getOperand(0).isUndef())
2469  IsTA = false;
2470  }
2471  }
2472 
2473  unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2474 
2475  // Check that we're dropping the mask operand and any policy operand
2476  // when we transform to this unmasked pseudo. Additionally, if this insturtion
2477  // is tail agnostic, the unmasked instruction should not have a merge op.
2478  uint64_t TSFlags = TII.get(Opc).TSFlags;
2479  assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
2480  RISCVII::hasDummyMaskOp(TSFlags) &&
2481  !RISCVII::hasVecPolicyOp(TSFlags) &&
2482  "Unexpected pseudo to transform to");
2483  (void)TSFlags;
2484 
2486  // Skip the merge operand at index 0 if IsTA
2487  for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2488  // Skip the mask, the policy, and the Glue.
2489  SDValue Op = N->getOperand(I);
2490  if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2491  Op.getValueType() == MVT::Glue)
2492  continue;
2493  Ops.push_back(Op);
2494  }
2495 
2496  // Transitively apply any node glued to our new node.
2497  if (auto *TGlued = Glued->getGluedNode())
2498  Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2499 
2500  SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2501  ReplaceUses(N, Result);
2502 
2503  return true;
2504 }
2505 
2506 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2507 // for instruction scheduling.
2509  CodeGenOpt::Level OptLevel) {
2510  return new RISCVDAGToDAGISel(TM, OptLevel);
2511 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::TargetMachine::getOptLevel
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Definition: TargetMachine.cpp:182
llvm::RISCVISD::VFMV_S_F_VL
@ VFMV_S_F_VL
Definition: RISCVISelLowering.h:162
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:28
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition: MachineValueType.h:528
B1
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1101
llvm::ISD::INTRINSIC_VOID
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
llvm::RISCVDAGToDAGISel::selectVLXSEG
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:384
MathExtras.h
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:70
llvm::SelectionDAGISel::TLI
const TargetLowering * TLI
Definition: SelectionDAGISel.h:54
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::RISCVISD::SLLW
@ SLLW
Definition: RISCVISelLowering.h:63
llvm::SelectionDAGISel::TM
TargetMachine & TM
Definition: SelectionDAGISel.h:42
llvm::EVT::isScalarInteger
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:149
llvm::RISCV::VLSEGPseudo
Definition: RISCVISelDAGToDAG.h:136
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1090
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition: MachineValueType.h:386
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2293
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MVT::isInteger
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: MachineValueType.h:358
llvm::RISCVDAGToDAGISel::PreprocessISelDAG
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
Definition: RISCVISelDAGToDAG.cpp:45
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:886
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition: RISCVSubtarget.h:137
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:750
llvm::RISCV::VLXSEGPseudo
Definition: RISCVISelDAGToDAG.h:147
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:151
llvm::RISCVDAGToDAGISel::selectZExti32
bool selectZExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1999
llvm::isOneConstant
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Definition: SelectionDAG.cpp:10591
llvm::SelectionDAG::allnodes_end
allnodes_const_iterator allnodes_end() const
Definition: SelectionDAG.h:509
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::RISCVISD::FMV_H_X
@ FMV_H_X
Definition: RISCVISelLowering.h:99
llvm::SDNode::isUndef
bool isUndef() const
Return true if the type of the node type undefined.
Definition: SelectionDAGNodes.h:655
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::RISCVISD::DIVUW
@ DIVUW
Definition: RISCVISelLowering.h:70
llvm::MVT::bitsLT
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
Definition: MachineValueType.h:1163
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1679
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::RISCVDAGToDAGISel::selectVSETVLI
void selectVSETVLI(SDNode *Node)
Definition: RISCVISelDAGToDAG.cpp:512
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:9176
llvm::MachineSDNode
An SDNode that represents everything that will be needed to construct a MachineInstr.
Definition: SelectionDAGNodes.h:2871
llvm::RISCVSubtarget::hasVInstructions
bool hasVInstructions() const
Definition: RISCVSubtarget.h:229
llvm::RISCVDAGToDAGISel::SelectFrameAddrRegImm
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1755
llvm::SelectionDAG::allnodes_begin
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:508
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:528
llvm::HandleSDNode
This class is used to form a handle around another node that is persistent and is updated across invo...
Definition: SelectionDAGNodes.h:1217
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:177
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:454
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::RISCVDAGToDAGISel::selectVSplat
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2247
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:270
llvm::MemOp
Definition: TargetLowering.h:111
llvm::RISCVDAGToDAGISel
Definition: RISCVISelDAGToDAG.h:23
llvm::SelectionDAG::getMemBasePlusOffset
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
Definition: SelectionDAG.cpp:6645
llvm::RISCVSubtarget::hasStdExtZbs
bool hasStdExtZbs() const
Definition: RISCVSubtarget.h:166
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1411
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7850
llvm::InlineAsm::Constraint_m
@ Constraint_m
Definition: InlineAsm.h:255
llvm::SelectionDAG::isBaseWithConstantOffset
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
Definition: SelectionDAG.cpp:4557
llvm::RISCV::VLX_VSXPseudo
Definition: RISCVISelDAGToDAG.h:195
llvm::Optional< unsigned >
llvm::SelectionDAG::RemoveDeadNodes
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
Definition: SelectionDAG.cpp:900
llvm::RISCVTargetLowering::getRegClassIDForVecVT
static unsigned getRegClassIDForVecVT(MVT VT)
Definition: RISCVISelLowering.cpp:1467
llvm::RISCV::VLMaxSentinel
static constexpr int64_t VLMaxSentinel
Definition: RISCVInstrInfo.h:209
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::RISCVDAGToDAGISel::selectVSSEG
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:438
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::commonAlignment
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:213
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2299
KnownBits.h
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition: MachineValueType.h:381
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:2061
llvm::RISCVDAGToDAGISel::SelectAddrFrameIndex
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1743
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:189
llvm::RISCV::VSSEGPseudo
Definition: RISCVISelDAGToDAG.h:158
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::RISCV::RISCVMaskedPseudoInfo
Definition: RISCVISelDAGToDAG.h:205
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1617
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:362
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:205
llvm::RISCVDAGToDAGISel::selectShiftMask
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition: RISCVISelDAGToDAG.cpp:1938
llvm::SelectionDAG::getTargetFrameIndex
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:703
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1125
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:220
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
getReg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:517
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
llvm::User
Definition: User.h:44
llvm::SelectionDAG::getUNDEF
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:971
llvm::ISD::CopyToReg
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:203
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:781
createTuple
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF, RISCVII::VLMUL LMUL)
Definition: RISCVISelDAGToDAG.cpp:195
llvm::SelectionDAG::getTargetLoweringInfo
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:458
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:34
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MVT::f64
@ f64
Definition: MachineValueType.h:58
llvm::isShiftedMask_64
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:485
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3415
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition: MachineValueType.h:1091
llvm::SelectionDAG::MaskedValueIsZero
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
Definition: SelectionDAG.cpp:2514
llvm::RISCVTargetLowering::getSubregIndexByMVT
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Definition: RISCVISelLowering.cpp:1444
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:692
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition: RISCVSubtarget.h:197
RISCVISelDAGToDAG.h
llvm::SelectionDAGISel::ReplaceNode
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
Definition: SelectionDAGISel.h:229
llvm::TypeSize::Fixed
static TypeSize Fixed(ScalarTy MinVal)
Definition: TypeSize.h:441
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:623
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::SelectionDAG::setRoot
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:537
llvm::RISCVVType::decodeVSEW
static unsigned decodeVSEW(unsigned VSEW)
Definition: RISCVBaseInfo.h:413
RISCVMCTargetDesc.h
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::ISD::ATOMIC_STORE
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1163
llvm::RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition: RISCVISelDAGToDAG.cpp:1725
llvm::SelectionDAG::getMemIntrinsicNode
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
Definition: SelectionDAG.cpp:7575
llvm::RISCVSubtarget::hasStdExtZbb
bool hasStdExtZbb() const
Definition: RISCVSubtarget.h:159
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:666
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:133
CASE_VMSLT_OPCODES
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::ISD::SPLAT_VECTOR
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:613
llvm::RISCVSubtarget::hasStdExtZbp
bool hasStdExtZbp() const
Definition: RISCVSubtarget.h:164
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::SDValue::getConstantOperandVal
uint64_t getConstantOperandVal(unsigned i) const
Definition: SelectionDAGNodes.h:1137
isWorthFoldingAdd
static bool isWorthFoldingAdd(SDValue Add)
Definition: RISCVISelDAGToDAG.cpp:1826
llvm::RISCVISD::DIVW
@ DIVW
Definition: RISCVISelLowering.h:69
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::SelectionDAG::getTargetGlobalAddress
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:698
llvm::RISCVISD::CLZW
@ CLZW
Definition: RISCVISelLowering.h:78
llvm::RISCVDAGToDAGISel::SelectAddrRegImm
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1848
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:74
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1207
llvm::createRISCVISelDag
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel)
Definition: RISCVISelDAGToDAG.cpp:2508
llvm::RISCVISD::GREV
@ GREV
Definition: RISCVISelLowering.h:124
llvm::RISCVISD::GORC
@ GORC
Definition: RISCVISelLowering.h:126
llvm::RISCVISD::VMV_S_X_VL
@ VMV_S_X_VL
Definition: RISCVISelLowering.h:160
llvm::SelectionDAG::RemoveDeadNode
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:954
llvm::RISCV::VSEPseudo
Definition: RISCVISelDAGToDAG.h:187
llvm::RISCVDAGToDAGISel::selectVLOp
bool selectVLOp(SDValue N, SDValue &VL)
Definition: RISCVISelDAGToDAG.cpp:2223
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::RISCVDAGToDAGISel::selectVSXSEG
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:470
llvm::SelectionDAGISel::IsProfitableToFold
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
Definition: SelectionDAGISel.cpp:2057
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
selectVSplatSimmHelper
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
Definition: RISCVISelDAGToDAG.cpp:2256
uint64_t
llvm::RISCVDAGToDAGISel::selectVSplatUimm5
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2307
llvm::SelectionDAGISel::TII
const TargetInstrInfo * TII
Definition: SelectionDAGISel.h:53
llvm::ISD::LOAD
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:966
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:78
llvm::SelectionDAGISel::FuncInfo
std::unique_ptr< FunctionLoweringInfo > FuncInfo
Definition: SelectionDAGISel.h:44
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:39
llvm::SelectionDAG::getCopyFromReg
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:776
llvm::SelectionDAGISel::IsLegalToFold
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
Definition: SelectionDAGISel.cpp:2065
llvm::SDNode::getOperand
const SDValue & getOperand(unsigned Num) const
Definition: SelectionDAGNodes.h:908
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::SelectionDAG::getNode
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
Definition: SelectionDAG.cpp:8884
llvm::countTrailingOnes
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
Definition: MathExtras.h:525
llvm::RISCVISD::ROLW
@ ROLW
Definition: RISCVISelLowering.h:74
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:47
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::SDValue::getValue
SDValue getValue(unsigned R) const
Definition: SelectionDAGNodes.h:171
llvm::RISCVDAGToDAGISel::selectVLSEG
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:296
llvm::RISCVISD::ADD_LO
@ ADD_LO
Definition: RISCVISelLowering.h:46
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:883
llvm::RISCVMatInt::RegX0
@ RegX0
Definition: RISCVMatInt.h:25
llvm::SelectionDAGISel::CurDAG
SelectionDAG * CurDAG
Definition: SelectionDAGISel.h:48
llvm::RISCVDAGToDAGISel::hasAllWUsers
bool hasAllWUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:76
llvm::SelectionDAG::getMachineNode
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
Definition: SelectionDAG.cpp:9614
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::RISCVISD::SRAW
@ SRAW
Definition: RISCVISelLowering.h:64
llvm::RISCVDAGToDAGISel::selectVSplatSimm5
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2288
llvm::SelectionDAG::setNodeMemRefs
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Definition: SelectionDAG.cpp:9382
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:158
llvm::MachinePointerInfo::getWithOffset
MachinePointerInfo getWithOffset(int64_t O) const
Definition: MachineMemOperand.h:79
llvm::SDNode::isMachineOpcode
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
Definition: SelectionDAGNodes.h:687
llvm::RISCVII::hasMergeOp
static bool hasMergeOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:147
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
Definition: RISCVISelLowering.cpp:1479
llvm::RISCVISD::REMUW
@ REMUW
Definition: RISCVISelLowering.h:71
llvm::NVPTXISD::Dummy
@ Dummy
Definition: NVPTXISelLowering.h:60
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:44
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:49
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::SelectionDAG::getTargetInsertSubreg
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
Definition: SelectionDAG.cpp:9742
llvm::RISCVISD::VMV_V_X_VL
@ VMV_V_X_VL
Definition: RISCVISelLowering.h:151
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:134
llvm::SDValue::getMachineOpcode
unsigned getMachineOpcode() const
Definition: SelectionDAGNodes.h:1157
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
llvm::SelectionDAG::ReplaceAllUsesOfValueWith
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
Definition: SelectionDAG.cpp:10202
llvm::SDValue::getOperand
const SDValue & getOperand(unsigned i) const
Definition: SelectionDAGNodes.h:1133
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::InlineAsm::Constraint_A
@ Constraint_A
Definition: InlineAsm.h:258
llvm::SDValue::hasOneUse
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Definition: SelectionDAGNodes.h:1169
llvm::SDValue::getSimpleValueType
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:182
llvm::CodeGenOpt::Level
Level
Definition: CodeGen.h:52
llvm::SDVTList
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Definition: SelectionDAGNodes.h:78
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:811
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
llvm::ISD::INTRINSIC_WO_CHAIN
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
llvm::MVT::getVectorElementCount
ElementCount getVectorElementCount() const
Definition: MachineValueType.h:865
llvm::RISCVISD::RORW
@ RORW
Definition: RISCVISelLowering.h:75
llvm::ISD::INSERT_SUBVECTOR
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:558
llvm::SelectionDAGISel::MF
MachineFunction * MF
Definition: SelectionDAGISel.h:46
CASE_VMSLT_VMNAND_VMSET_OPCODES
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
Alignment.h
selectImm
static SDNode * selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
Definition: RISCVISelDAGToDAG.cpp:187
selectImmSeq
static SDNode * selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
Definition: RISCVISelDAGToDAG.cpp:158
llvm::SelectionDAG::computeKnownBits
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
Definition: SelectionDAG.cpp:2898
llvm::KnownBits
Definition: KnownBits.h:23
llvm::RISCVISD::SRLW
@ SRLW
Definition: RISCVISelLowering.h:65
llvm::ISD::EXTRACT_SUBVECTOR
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:572
llvm::isNullConstant
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
Definition: SelectionDAG.cpp:10576
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::RISCV::VSXSEGPseudo
Definition: RISCVISelDAGToDAG.h:167
RISCVISelLowering.h
llvm::RISCVDAGToDAGISel::PostprocessISelDAG
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition: RISCVISelDAGToDAG.cpp:137
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
MachineFrameInfo.h
llvm::SelectionDAG::getEntryNode
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:531
llvm::RISCVVType::encodeVTYPE
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
Definition: RISCVBaseInfo.cpp:130
llvm::SelectionDAG::getDataLayout
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:452
llvm::RISCVDAGToDAGISel::selectVLSEGFF
void selectVLSEGFF(SDNode *Node, bool IsMasked)
Definition: RISCVISelDAGToDAG.cpp:339
llvm::SelectionDAG::getTargetExtractSubreg
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
Definition: SelectionDAG.cpp:9732
llvm::SelectionDAGISel::ReplaceUses
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
Definition: SelectionDAGISel.h:208
selectConstantAddr
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1778
llvm::MVT::i32
@ i32
Definition: MachineValueType.h:48
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:198
llvm::SDValue
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Definition: SelectionDAGNodes.h:137
llvm::RISCVTargetLowering
Definition: RISCVISelLowering.h:347
llvm::RISCVMatInt::RegReg
@ RegReg
Definition: RISCVMatInt.h:24
llvm::XCoreISD::LMUL
@ LMUL
Definition: XCoreISelLowering.h:59
llvm::countLeadingZeros
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: MathExtras.h:225
llvm::ISD::STORE
@ STORE
Definition: ISDOpcodes.h:967
llvm::RISCVTargetLowering::getLMUL
static RISCVII::VLMUL getLMUL(MVT VT)
Definition: RISCVISelLowering.cpp:1400
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:439
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::RISCVISD::VFMV_V_F_VL
@ VFMV_V_F_VL
Definition: RISCVISelLowering.h:155
llvm::SDValue::isUndef
bool isUndef() const
Definition: SelectionDAGNodes.h:1161
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::RISCVII::LMUL_F8
@ LMUL_F8
Definition: RISCVBaseInfo.h:114
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:691
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1006
llvm::RISCVMatInt::RegImm
@ RegImm
Definition: RISCVMatInt.h:22
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::MVT::f16
@ f16
Definition: MachineValueType.h:56
N
#define N
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:693
RISCVMachineFunctionInfo.h
llvm::RISCVDAGToDAGISel::selectRVVSimm5
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition: RISCVISelDAGToDAG.cpp:2324
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::RISCVII::LMUL_F4
@ LMUL_F4
Definition: RISCVBaseInfo.h:115
llvm::RISCVDAGToDAGISel::Select
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition: RISCVISelDAGToDAG.cpp:582
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::MVT::Untyped
@ Untyped
Definition: MachineValueType.h:274
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::ISD::MULHU
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:637
llvm::SDValue::getOpcode
unsigned getOpcode() const
Definition: SelectionDAGNodes.h:1121
llvm::SelectionDAG::getTargetConstant
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:652
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::ISD::INTRINSIC_W_CHAIN
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
CASE_VMXOR_VMANDN_VMOR_OPCODES
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
llvm::SelectionDAG::getMachineFunction
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:449
llvm::SelectionDAG::ComputeNumSignBits
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: SelectionDAG.cpp:3890
llvm::RISCVII::hasDummyMaskOp
static bool hasDummyMaskOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:139
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
llvm::isMask_64
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
Definition: MathExtras.h:473
llvm::RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
Definition: RISCVISelLowering.h:166
llvm::M1
unsigned M1(unsigned Val)
Definition: VE.h:370
raw_ostream.h
llvm::SDValue::isMachineOpcode
bool isMachineOpcode() const
Definition: SelectionDAGNodes.h:1153
llvm::RISCVDAGToDAGISel::selectSHXADDOp
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
Definition: RISCVISelDAGToDAG.cpp:2020
llvm::RISCV::VLEPseudo
Definition: RISCVISelDAGToDAG.h:177
llvm::MVT::f32
@ f32
Definition: MachineValueType.h:57
llvm::RISCVDAGToDAGISel::hasAllHUsers
bool hasAllHUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:75
ValidateFn
bool(*)(int64_t) ValidateFn
Definition: RISCVISelDAGToDAG.cpp:2254
llvm::RISCVISD::CTZW
@ CTZW
Definition: RISCVISelLowering.h:79
llvm::RISCVDAGToDAGISel::hasAllNBitUsers
bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const
Definition: RISCVISelDAGToDAG.cpp:2123
Debug.h
llvm::ISD::ATOMIC_LOAD
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1159
llvm::RISCVDAGToDAGISel::selectSExti32
bool selectSExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1984
llvm::TargetLoweringBase::getPointerTy
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Definition: TargetLowering.h:354
llvm::ISD::TokenFactor
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
llvm::RISCVII::LMUL_F2
@ LMUL_F2
Definition: RISCVBaseInfo.h:116
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::RISCVII::TAIL_AGNOSTIC
@ TAIL_AGNOSTIC
Definition: RISCVBaseInfo.h:120
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::RISCVDAGToDAGISel::addVectorLoadStoreOperands
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
Definition: RISCVISelDAGToDAG.cpp:249
isAllUndef
static bool isAllUndef(ArrayRef< SDValue > Values)
Definition: RISCVISelDAGToDAG.cpp:292