LLVM  14.0.0git
RISCVISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
16 #include "RISCVISelLowering.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
46  E = CurDAG->allnodes_end();
47  I != E;) {
48  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50  // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51  // load. Done after lowering and combining so that we have a chance to
52  // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53  if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54  continue;
55 
56  assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57  MVT VT = N->getSimpleValueType(0);
58  SDValue Lo = N->getOperand(0);
59  SDValue Hi = N->getOperand(1);
60  SDValue VL = N->getOperand(2);
62  Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63  "Unexpected VTs!");
66  SDLoc DL(N);
67 
68  // We use the same frame index we use for moving two i32s into 64-bit FPR.
69  // This is an analogous operation.
70  int FI = FuncInfo->getMoveF64FrameIndex(MF);
73  SDValue StackSlot =
75 
76  SDValue Chain = CurDAG->getEntryNode();
77  Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79  SDValue OffsetSlot =
81  Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82  Align(8));
83 
85 
86  SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87  SDValue IntID =
88  CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89  SDValue Ops[] = {Chain, IntID, StackSlot,
90  CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
93  ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
95 
96  // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97  // vlse we created. This will cause general havok on the dag because
98  // anything below the conversion could be folded into other existing nodes.
99  // To avoid invalidating 'I', back it up to the convert node.
100  --I;
102 
103  // Now that we did that, the node is dead. Increment the iterator to the
104  // next node to process, then delete N.
105  ++I;
106  CurDAG->DeleteNode(N);
107  }
108 }
109 
112 
113  bool MadeChange = false;
114  while (Position != CurDAG->allnodes_begin()) {
115  SDNode *N = &*--Position;
116  // Skip dead nodes and any non-machine opcodes.
117  if (N->use_empty() || !N->isMachineOpcode())
118  continue;
119 
120  MadeChange |= doPeepholeSExtW(N);
121  MadeChange |= doPeepholeLoadStoreADDI(N);
122  }
123 
124  if (MadeChange)
126 }
127 
128 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
129  const RISCVSubtarget &Subtarget) {
130  MVT XLenVT = Subtarget.getXLenVT();
132  RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
133 
134  SDNode *Result = nullptr;
135  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
136  for (RISCVMatInt::Inst &Inst : Seq) {
137  SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
138  if (Inst.Opc == RISCV::LUI)
139  Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
140  else if (Inst.Opc == RISCV::ADDUW)
141  Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
142  CurDAG->getRegister(RISCV::X0, XLenVT));
143  else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
144  Inst.Opc == RISCV::SH3ADD)
145  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
146  else
147  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
148 
149  // Only the first instruction has X0 as its source.
150  SrcReg = SDValue(Result, 0);
151  }
152 
153  return Result;
154 }
155 
157  unsigned RegClassID, unsigned SubReg0) {
158  assert(Regs.size() >= 2 && Regs.size() <= 8);
159 
160  SDLoc DL(Regs[0]);
162 
163  Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
164 
165  for (unsigned I = 0; I < Regs.size(); ++I) {
166  Ops.push_back(Regs[I]);
167  Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
168  }
169  SDNode *N =
170  CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
171  return SDValue(N, 0);
172 }
173 
175  unsigned NF) {
176  static const unsigned RegClassIDs[] = {
177  RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
178  RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
179  RISCV::VRN8M1RegClassID};
180 
181  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
182 }
183 
185  unsigned NF) {
186  static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
187  RISCV::VRN3M2RegClassID,
188  RISCV::VRN4M2RegClassID};
189 
190  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
191 }
192 
194  unsigned NF) {
195  return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
196  RISCV::sub_vrm4_0);
197 }
198 
200  unsigned NF, RISCVII::VLMUL LMUL) {
201  switch (LMUL) {
202  default:
203  llvm_unreachable("Invalid LMUL.");
208  return createM1Tuple(CurDAG, Regs, NF);
210  return createM2Tuple(CurDAG, Regs, NF);
212  return createM4Tuple(CurDAG, Regs, NF);
213  }
214 }
215 
217  SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
218  bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
219  bool IsLoad, MVT *IndexVT) {
220  SDValue Chain = Node->getOperand(0);
221  SDValue Glue;
222 
223  SDValue Base;
224  SelectBaseAddr(Node->getOperand(CurOp++), Base);
225  Operands.push_back(Base); // Base pointer.
226 
227  if (IsStridedOrIndexed) {
228  Operands.push_back(Node->getOperand(CurOp++)); // Index.
229  if (IndexVT)
230  *IndexVT = Operands.back()->getSimpleValueType(0);
231  }
232 
233  if (IsMasked) {
234  // Mask needs to be copied to V0.
235  SDValue Mask = Node->getOperand(CurOp++);
236  Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
237  Glue = Chain.getValue(1);
238  Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
239  }
240  SDValue VL;
241  selectVLOp(Node->getOperand(CurOp++), VL);
242  Operands.push_back(VL);
243 
244  MVT XLenVT = Subtarget->getXLenVT();
245  SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
246  Operands.push_back(SEWOp);
247 
248  // Masked load has the tail policy argument.
249  if (IsMasked && IsLoad) {
250  // Policy must be a constant.
251  uint64_t Policy = Node->getConstantOperandVal(CurOp++);
252  SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
253  Operands.push_back(PolicyOp);
254  }
255 
256  Operands.push_back(Chain); // Chain.
257  if (Glue)
258  Operands.push_back(Glue);
259 }
260 
261 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
262  bool IsStrided) {
263  SDLoc DL(Node);
264  unsigned NF = Node->getNumValues() - 1;
265  MVT VT = Node->getSimpleValueType(0);
266  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
268 
269  unsigned CurOp = 2;
271  if (IsMasked) {
272  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
273  Node->op_begin() + CurOp + NF);
274  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
275  Operands.push_back(MaskedOff);
276  CurOp += NF;
277  }
278 
279  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
280  Operands, /*IsLoad=*/true);
281 
282  const RISCV::VLSEGPseudo *P =
283  RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
284  static_cast<unsigned>(LMUL));
287 
288  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
289  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
290 
291  SDValue SuperReg = SDValue(Load, 0);
292  for (unsigned I = 0; I < NF; ++I) {
293  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
294  ReplaceUses(SDValue(Node, I),
295  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
296  }
297 
298  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
299  CurDAG->RemoveDeadNode(Node);
300 }
301 
302 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
303  SDLoc DL(Node);
304  unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
305  MVT VT = Node->getSimpleValueType(0);
306  MVT XLenVT = Subtarget->getXLenVT();
307  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
309 
310  unsigned CurOp = 2;
312  if (IsMasked) {
313  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
314  Node->op_begin() + CurOp + NF);
315  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
316  Operands.push_back(MaskedOff);
317  CurOp += NF;
318  }
319 
320  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
321  /*IsStridedOrIndexed*/ false, Operands,
322  /*IsLoad=*/true);
323 
324  const RISCV::VLSEGPseudo *P =
325  RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
326  Log2SEW, static_cast<unsigned>(LMUL));
329  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
330  /*Glue*/ SDValue(Load, 2));
331 
332  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
333  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
334 
335  SDValue SuperReg = SDValue(Load, 0);
336  for (unsigned I = 0; I < NF; ++I) {
337  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
338  ReplaceUses(SDValue(Node, I),
339  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
340  }
341 
342  ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL
343  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
344  CurDAG->RemoveDeadNode(Node);
345 }
346 
347 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
348  bool IsOrdered) {
349  SDLoc DL(Node);
350  unsigned NF = Node->getNumValues() - 1;
351  MVT VT = Node->getSimpleValueType(0);
352  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
354 
355  unsigned CurOp = 2;
357  if (IsMasked) {
358  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
359  Node->op_begin() + CurOp + NF);
360  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
361  Operands.push_back(MaskedOff);
362  CurOp += NF;
363  }
364 
365  MVT IndexVT;
366  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
367  /*IsStridedOrIndexed*/ true, Operands,
368  /*IsLoad=*/true, &IndexVT);
369 
371  "Element count mismatch");
372 
373  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
374  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
375  const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
376  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
377  static_cast<unsigned>(IndexLMUL));
380 
381  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
382  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
383 
384  SDValue SuperReg = SDValue(Load, 0);
385  for (unsigned I = 0; I < NF; ++I) {
386  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
387  ReplaceUses(SDValue(Node, I),
388  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
389  }
390 
391  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
392  CurDAG->RemoveDeadNode(Node);
393 }
394 
395 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
396  bool IsStrided) {
397  SDLoc DL(Node);
398  unsigned NF = Node->getNumOperands() - 4;
399  if (IsStrided)
400  NF--;
401  if (IsMasked)
402  NF--;
403  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
404  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
406  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
407  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
408 
410  Operands.push_back(StoreVal);
411  unsigned CurOp = 2 + NF;
412 
413  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
414  Operands);
415 
416  const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
417  NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
419  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
420 
421  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
422  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
423 
424  ReplaceNode(Node, Store);
425 }
426 
427 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
428  bool IsOrdered) {
429  SDLoc DL(Node);
430  unsigned NF = Node->getNumOperands() - 5;
431  if (IsMasked)
432  --NF;
433  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
434  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
436  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
437  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
438 
440  Operands.push_back(StoreVal);
441  unsigned CurOp = 2 + NF;
442 
443  MVT IndexVT;
444  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
445  /*IsStridedOrIndexed*/ true, Operands,
446  /*IsLoad=*/false, &IndexVT);
447 
449  "Element count mismatch");
450 
451  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
452  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
453  const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
454  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
455  static_cast<unsigned>(IndexLMUL));
457  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
458 
459  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
460  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
461 
462  ReplaceNode(Node, Store);
463 }
464 
465 
467  // If we have a custom node, we have already selected.
468  if (Node->isMachineOpcode()) {
469  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
470  Node->setNodeId(-1);
471  return;
472  }
473 
474  // Instruction Selection not handled by the auto-generated tablegen selection
475  // should be handled here.
476  unsigned Opcode = Node->getOpcode();
477  MVT XLenVT = Subtarget->getXLenVT();
478  SDLoc DL(Node);
479  MVT VT = Node->getSimpleValueType(0);
480 
481  switch (Opcode) {
482  case ISD::Constant: {
483  auto *ConstNode = cast<ConstantSDNode>(Node);
484  if (VT == XLenVT && ConstNode->isZero()) {
485  SDValue New =
486  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
487  ReplaceNode(Node, New.getNode());
488  return;
489  }
490  int64_t Imm = ConstNode->getSExtValue();
491  // If the upper XLen-16 bits are not used, try to convert this to a simm12
492  // by sign extending bit 15.
493  if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
494  hasAllHUsers(Node))
495  Imm = SignExtend64(Imm, 16);
496  // If the upper 32-bits are not used try to convert this into a simm32 by
497  // sign extending bit 32.
498  if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
499  Imm = SignExtend64(Imm, 32);
500 
501  ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
502  return;
503  }
504  case ISD::FrameIndex: {
505  SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
506  int FI = cast<FrameIndexSDNode>(Node)->getIndex();
507  SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
508  ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
509  return;
510  }
511  case ISD::SRL: {
512  // We don't need this transform if zext.h is supported.
513  if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
514  break;
515  // Optimize (srl (and X, 0xffff), C) ->
516  // (srli (slli X, (XLen-16), (XLen-16) + C)
517  // Taking into account that the 0xffff may have had lower bits unset by
518  // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
519  // This pattern occurs when type legalizing i16 right shifts.
520  // FIXME: This could be extended to other AND masks.
521  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
522  if (N1C) {
523  uint64_t ShAmt = N1C->getZExtValue();
524  SDValue N0 = Node->getOperand(0);
525  if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
526  isa<ConstantSDNode>(N0.getOperand(1))) {
528  Mask |= maskTrailingOnes<uint64_t>(ShAmt);
529  if (Mask == 0xffff) {
530  unsigned LShAmt = Subtarget->getXLen() - 16;
531  SDNode *SLLI =
532  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
533  CurDAG->getTargetConstant(LShAmt, DL, VT));
534  SDNode *SRLI = CurDAG->getMachineNode(
535  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
536  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
537  ReplaceNode(Node, SRLI);
538  return;
539  }
540  }
541  }
542 
543  break;
544  }
545  case ISD::AND: {
546  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
547  if (!N1C)
548  break;
549 
550  SDValue N0 = Node->getOperand(0);
551 
552  bool LeftShift = N0.getOpcode() == ISD::SHL;
553  if (!LeftShift && N0.getOpcode() != ISD::SRL)
554  break;
555 
556  auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
557  if (!C)
558  break;
559  uint64_t C2 = C->getZExtValue();
560  unsigned XLen = Subtarget->getXLen();
561  if (!C2 || C2 >= XLen)
562  break;
563 
564  uint64_t C1 = N1C->getZExtValue();
565 
566  // Keep track of whether this is a andi, zext.h, or zext.w.
567  bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
568  if (C1 == UINT64_C(0xFFFF) &&
569  (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
570  ZExtOrANDI = true;
571  if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
572  ZExtOrANDI = true;
573 
574  // Clear irrelevant bits in the mask.
575  if (LeftShift)
576  C1 &= maskTrailingZeros<uint64_t>(C2);
577  else
578  C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
579 
580  // Some transforms should only be done if the shift has a single use or
581  // the AND would become (srli (slli X, 32), 32)
582  bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
583 
584  SDValue X = N0.getOperand(0);
585 
586  // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
587  // with c3 leading zeros.
588  if (!LeftShift && isMask_64(C1)) {
589  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
590  if (C2 < C3) {
591  // If the number of leading zeros is C2+32 this can be SRLIW.
592  if (C2 + 32 == C3) {
593  SDNode *SRLIW =
594  CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
595  CurDAG->getTargetConstant(C2, DL, XLenVT));
596  ReplaceNode(Node, SRLIW);
597  return;
598  }
599 
600  // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
601  // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
602  //
603  // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
604  // legalized and goes through DAG combine.
605  SDValue Y;
606  if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
607  selectSExti32(X, Y)) {
608  SDNode *SRAIW =
609  CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
610  CurDAG->getTargetConstant(31, DL, XLenVT));
611  SDNode *SRLIW = CurDAG->getMachineNode(
612  RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
613  CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
614  ReplaceNode(Node, SRLIW);
615  return;
616  }
617 
618  // (srli (slli x, c3-c2), c3).
619  if (OneUseOrZExtW && !ZExtOrANDI) {
620  SDNode *SLLI = CurDAG->getMachineNode(
621  RISCV::SLLI, DL, XLenVT, X,
622  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
623  SDNode *SRLI =
624  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
625  CurDAG->getTargetConstant(C3, DL, XLenVT));
626  ReplaceNode(Node, SRLI);
627  return;
628  }
629  }
630  }
631 
632  // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
633  // shifted by c2 bits with c3 leading zeros.
634  if (LeftShift && isShiftedMask_64(C1)) {
635  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
636 
637  if (C2 + C3 < XLen &&
638  C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
639  // Use slli.uw when possible.
640  if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
641  SDNode *SLLIUW =
642  CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
643  CurDAG->getTargetConstant(C2, DL, XLenVT));
644  ReplaceNode(Node, SLLIUW);
645  return;
646  }
647 
648  // (srli (slli c2+c3), c3)
649  if (OneUseOrZExtW && !ZExtOrANDI) {
650  SDNode *SLLI = CurDAG->getMachineNode(
651  RISCV::SLLI, DL, XLenVT, X,
652  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
653  SDNode *SRLI =
654  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
655  CurDAG->getTargetConstant(C3, DL, XLenVT));
656  ReplaceNode(Node, SRLI);
657  return;
658  }
659  }
660  }
661 
662  // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
663  // shifted mask with c2 leading zeros and c3 trailing zeros.
664  if (!LeftShift && isShiftedMask_64(C1)) {
665  uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
667  if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !ZExtOrANDI) {
668  SDNode *SRLI = CurDAG->getMachineNode(
669  RISCV::SRLI, DL, XLenVT, X,
670  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
671  SDNode *SLLI =
672  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
673  CurDAG->getTargetConstant(C3, DL, XLenVT));
674  ReplaceNode(Node, SLLI);
675  return;
676  }
677  // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
678  if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
679  OneUseOrZExtW && !ZExtOrANDI) {
680  SDNode *SRLIW = CurDAG->getMachineNode(
681  RISCV::SRLIW, DL, XLenVT, X,
682  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
683  SDNode *SLLI =
684  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
685  CurDAG->getTargetConstant(C3, DL, XLenVT));
686  ReplaceNode(Node, SLLI);
687  return;
688  }
689  }
690 
691  // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
692  // shifted mask with no leading zeros and c3 trailing zeros.
693  if (LeftShift && isShiftedMask_64(C1)) {
694  uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
696  if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !ZExtOrANDI) {
697  SDNode *SRLI = CurDAG->getMachineNode(
698  RISCV::SRLI, DL, XLenVT, X,
699  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
700  SDNode *SLLI =
701  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
702  CurDAG->getTargetConstant(C3, DL, XLenVT));
703  ReplaceNode(Node, SLLI);
704  return;
705  }
706  // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
707  if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !ZExtOrANDI) {
708  SDNode *SRLIW = CurDAG->getMachineNode(
709  RISCV::SRLIW, DL, XLenVT, X,
710  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
711  SDNode *SLLI =
712  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
713  CurDAG->getTargetConstant(C3, DL, XLenVT));
714  ReplaceNode(Node, SLLI);
715  return;
716  }
717  }
718 
719  break;
720  }
722  unsigned IntNo = Node->getConstantOperandVal(0);
723  switch (IntNo) {
724  // By default we do not custom select any intrinsic.
725  default:
726  break;
727  case Intrinsic::riscv_vmsgeu:
728  case Intrinsic::riscv_vmsge: {
729  SDValue Src1 = Node->getOperand(1);
730  SDValue Src2 = Node->getOperand(2);
731  // Only custom select scalar second operand.
732  if (Src2.getValueType() != XLenVT)
733  break;
734  // Small constants are handled with patterns.
735  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
736  int64_t CVal = C->getSExtValue();
737  if (CVal >= -15 && CVal <= 16)
738  break;
739  }
740  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
741  MVT Src1VT = Src1.getSimpleValueType();
742  unsigned VMSLTOpcode, VMNANDOpcode;
743  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
744  default:
745  llvm_unreachable("Unexpected LMUL!");
747  VMSLTOpcode =
748  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
749  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
750  break;
752  VMSLTOpcode =
753  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
754  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
755  break;
757  VMSLTOpcode =
758  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
759  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
760  break;
762  VMSLTOpcode =
763  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
764  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
765  break;
767  VMSLTOpcode =
768  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
769  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
770  break;
772  VMSLTOpcode =
773  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
774  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
775  break;
777  VMSLTOpcode =
778  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
779  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
780  break;
781  }
783  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
784  SDValue VL;
785  selectVLOp(Node->getOperand(3), VL);
786 
787  // Expand to
788  // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
789  SDValue Cmp = SDValue(
790  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
791  0);
792  ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
793  {Cmp, Cmp, VL, SEW}));
794  return;
795  }
796  case Intrinsic::riscv_vmsgeu_mask:
797  case Intrinsic::riscv_vmsge_mask: {
798  SDValue Src1 = Node->getOperand(2);
799  SDValue Src2 = Node->getOperand(3);
800  // Only custom select scalar second operand.
801  if (Src2.getValueType() != XLenVT)
802  break;
803  // Small constants are handled with patterns.
804  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
805  int64_t CVal = C->getSExtValue();
806  if (CVal >= -15 && CVal <= 16)
807  break;
808  }
809  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
810  MVT Src1VT = Src1.getSimpleValueType();
811  unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
812  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
813  default:
814  llvm_unreachable("Unexpected LMUL!");
816  VMSLTOpcode =
817  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
818  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
819  : RISCV::PseudoVMSLT_VX_MF8_MASK;
820  break;
822  VMSLTOpcode =
823  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
824  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
825  : RISCV::PseudoVMSLT_VX_MF4_MASK;
826  break;
828  VMSLTOpcode =
829  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
830  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
831  : RISCV::PseudoVMSLT_VX_MF2_MASK;
832  break;
834  VMSLTOpcode =
835  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
836  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
837  : RISCV::PseudoVMSLT_VX_M1_MASK;
838  break;
840  VMSLTOpcode =
841  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
842  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
843  : RISCV::PseudoVMSLT_VX_M2_MASK;
844  break;
846  VMSLTOpcode =
847  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
848  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
849  : RISCV::PseudoVMSLT_VX_M4_MASK;
850  break;
852  VMSLTOpcode =
853  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
854  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
855  : RISCV::PseudoVMSLT_VX_M8_MASK;
856  break;
857  }
858  // Mask operations use the LMUL from the mask type.
859  switch (RISCVTargetLowering::getLMUL(VT)) {
860  default:
861  llvm_unreachable("Unexpected LMUL!");
863  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
864  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
865  break;
867  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
868  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
869  break;
871  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
872  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
873  break;
875  VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
876  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
877  break;
879  VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
880  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
881  break;
883  VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
884  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
885  break;
887  VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
888  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
889  break;
890  }
892  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
893  SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
894  SDValue VL;
895  selectVLOp(Node->getOperand(5), VL);
896  SDValue MaskedOff = Node->getOperand(1);
897  SDValue Mask = Node->getOperand(4);
898  // If the MaskedOff value and the Mask are the same value use
899  // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
900  // This avoids needing to copy v0 to vd before starting the next sequence.
901  if (Mask == MaskedOff) {
902  SDValue Cmp = SDValue(
903  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
904  0);
905  ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
906  {Mask, Cmp, VL, MaskSEW}));
907  return;
908  }
909 
910  // Mask needs to be copied to V0.
912  RISCV::V0, Mask, SDValue());
913  SDValue Glue = Chain.getValue(1);
914  SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
915 
916  // Otherwise use
917  // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
918  SDValue Cmp = SDValue(
919  CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
920  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
921  0);
922  ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
923  {Cmp, Mask, VL, MaskSEW}));
924  return;
925  }
926  }
927  break;
928  }
929  case ISD::INTRINSIC_W_CHAIN: {
930  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
931  switch (IntNo) {
932  // By default we do not custom select any intrinsic.
933  default:
934  break;
935 
936  case Intrinsic::riscv_vsetvli:
937  case Intrinsic::riscv_vsetvlimax: {
938  if (!Subtarget->hasStdExtV())
939  break;
940 
941  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
942  unsigned Offset = VLMax ? 2 : 3;
943 
944  assert(Node->getNumOperands() == Offset + 2 &&
945  "Unexpected number of operands");
946 
947  unsigned SEW =
948  RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
949  RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
950  Node->getConstantOperandVal(Offset + 1) & 0x7);
951 
952  unsigned VTypeI = RISCVVType::encodeVTYPE(
953  VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
954  SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
955 
956  SDValue VLOperand;
957  unsigned Opcode = RISCV::PseudoVSETVLI;
958  if (VLMax) {
959  VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
960  Opcode = RISCV::PseudoVSETVLIX0;
961  } else {
962  VLOperand = Node->getOperand(2);
963 
964  if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
965  uint64_t AVL = C->getZExtValue();
966  if (isUInt<5>(AVL)) {
967  SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
968  ReplaceNode(
969  Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
970  MVT::Other, VLImm, VTypeIOp,
971  /* Chain */ Node->getOperand(0)));
972  return;
973  }
974  }
975  }
976 
977  ReplaceNode(Node,
978  CurDAG->getMachineNode(Opcode, DL, XLenVT,
979  MVT::Other, VLOperand, VTypeIOp,
980  /* Chain */ Node->getOperand(0)));
981  return;
982  }
983  case Intrinsic::riscv_vlseg2:
984  case Intrinsic::riscv_vlseg3:
985  case Intrinsic::riscv_vlseg4:
986  case Intrinsic::riscv_vlseg5:
987  case Intrinsic::riscv_vlseg6:
988  case Intrinsic::riscv_vlseg7:
989  case Intrinsic::riscv_vlseg8: {
990  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
991  return;
992  }
993  case Intrinsic::riscv_vlseg2_mask:
994  case Intrinsic::riscv_vlseg3_mask:
995  case Intrinsic::riscv_vlseg4_mask:
996  case Intrinsic::riscv_vlseg5_mask:
997  case Intrinsic::riscv_vlseg6_mask:
998  case Intrinsic::riscv_vlseg7_mask:
999  case Intrinsic::riscv_vlseg8_mask: {
1000  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1001  return;
1002  }
1003  case Intrinsic::riscv_vlsseg2:
1004  case Intrinsic::riscv_vlsseg3:
1005  case Intrinsic::riscv_vlsseg4:
1006  case Intrinsic::riscv_vlsseg5:
1007  case Intrinsic::riscv_vlsseg6:
1008  case Intrinsic::riscv_vlsseg7:
1009  case Intrinsic::riscv_vlsseg8: {
1010  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1011  return;
1012  }
1013  case Intrinsic::riscv_vlsseg2_mask:
1014  case Intrinsic::riscv_vlsseg3_mask:
1015  case Intrinsic::riscv_vlsseg4_mask:
1016  case Intrinsic::riscv_vlsseg5_mask:
1017  case Intrinsic::riscv_vlsseg6_mask:
1018  case Intrinsic::riscv_vlsseg7_mask:
1019  case Intrinsic::riscv_vlsseg8_mask: {
1020  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1021  return;
1022  }
1023  case Intrinsic::riscv_vloxseg2:
1024  case Intrinsic::riscv_vloxseg3:
1025  case Intrinsic::riscv_vloxseg4:
1026  case Intrinsic::riscv_vloxseg5:
1027  case Intrinsic::riscv_vloxseg6:
1028  case Intrinsic::riscv_vloxseg7:
1029  case Intrinsic::riscv_vloxseg8:
1030  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1031  return;
1032  case Intrinsic::riscv_vluxseg2:
1033  case Intrinsic::riscv_vluxseg3:
1034  case Intrinsic::riscv_vluxseg4:
1035  case Intrinsic::riscv_vluxseg5:
1036  case Intrinsic::riscv_vluxseg6:
1037  case Intrinsic::riscv_vluxseg7:
1038  case Intrinsic::riscv_vluxseg8:
1039  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1040  return;
1041  case Intrinsic::riscv_vloxseg2_mask:
1042  case Intrinsic::riscv_vloxseg3_mask:
1043  case Intrinsic::riscv_vloxseg4_mask:
1044  case Intrinsic::riscv_vloxseg5_mask:
1045  case Intrinsic::riscv_vloxseg6_mask:
1046  case Intrinsic::riscv_vloxseg7_mask:
1047  case Intrinsic::riscv_vloxseg8_mask:
1048  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1049  return;
1050  case Intrinsic::riscv_vluxseg2_mask:
1051  case Intrinsic::riscv_vluxseg3_mask:
1052  case Intrinsic::riscv_vluxseg4_mask:
1053  case Intrinsic::riscv_vluxseg5_mask:
1054  case Intrinsic::riscv_vluxseg6_mask:
1055  case Intrinsic::riscv_vluxseg7_mask:
1056  case Intrinsic::riscv_vluxseg8_mask:
1057  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1058  return;
1059  case Intrinsic::riscv_vlseg8ff:
1060  case Intrinsic::riscv_vlseg7ff:
1061  case Intrinsic::riscv_vlseg6ff:
1062  case Intrinsic::riscv_vlseg5ff:
1063  case Intrinsic::riscv_vlseg4ff:
1064  case Intrinsic::riscv_vlseg3ff:
1065  case Intrinsic::riscv_vlseg2ff: {
1066  selectVLSEGFF(Node, /*IsMasked*/ false);
1067  return;
1068  }
1069  case Intrinsic::riscv_vlseg8ff_mask:
1070  case Intrinsic::riscv_vlseg7ff_mask:
1071  case Intrinsic::riscv_vlseg6ff_mask:
1072  case Intrinsic::riscv_vlseg5ff_mask:
1073  case Intrinsic::riscv_vlseg4ff_mask:
1074  case Intrinsic::riscv_vlseg3ff_mask:
1075  case Intrinsic::riscv_vlseg2ff_mask: {
1076  selectVLSEGFF(Node, /*IsMasked*/ true);
1077  return;
1078  }
1079  case Intrinsic::riscv_vloxei:
1080  case Intrinsic::riscv_vloxei_mask:
1081  case Intrinsic::riscv_vluxei:
1082  case Intrinsic::riscv_vluxei_mask: {
1083  bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1084  IntNo == Intrinsic::riscv_vluxei_mask;
1085  bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1086  IntNo == Intrinsic::riscv_vloxei_mask;
1087 
1088  MVT VT = Node->getSimpleValueType(0);
1089  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1090 
1091  unsigned CurOp = 2;
1093  if (IsMasked)
1094  Operands.push_back(Node->getOperand(CurOp++));
1095 
1096  MVT IndexVT;
1097  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1098  /*IsStridedOrIndexed*/ true, Operands,
1099  /*IsLoad=*/true, &IndexVT);
1100 
1102  "Element count mismatch");
1103 
1105  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1106  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1107  const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1108  IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1109  static_cast<unsigned>(IndexLMUL));
1110  MachineSDNode *Load =
1111  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1112 
1113  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1114  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1115 
1116  ReplaceNode(Node, Load);
1117  return;
1118  }
1119  case Intrinsic::riscv_vlm:
1120  case Intrinsic::riscv_vle:
1121  case Intrinsic::riscv_vle_mask:
1122  case Intrinsic::riscv_vlse:
1123  case Intrinsic::riscv_vlse_mask: {
1124  bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1125  IntNo == Intrinsic::riscv_vlse_mask;
1126  bool IsStrided =
1127  IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1128 
1129  MVT VT = Node->getSimpleValueType(0);
1130  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1131 
1132  unsigned CurOp = 2;
1134  if (IsMasked)
1135  Operands.push_back(Node->getOperand(CurOp++));
1136 
1137  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1138  Operands, /*IsLoad=*/true);
1139 
1141  const RISCV::VLEPseudo *P =
1142  RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1143  static_cast<unsigned>(LMUL));
1144  MachineSDNode *Load =
1145  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1146 
1147  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1148  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1149 
1150  ReplaceNode(Node, Load);
1151  return;
1152  }
1153  case Intrinsic::riscv_vleff:
1154  case Intrinsic::riscv_vleff_mask: {
1155  bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1156 
1157  MVT VT = Node->getSimpleValueType(0);
1158  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1159 
1160  unsigned CurOp = 2;
1162  if (IsMasked)
1163  Operands.push_back(Node->getOperand(CurOp++));
1164 
1165  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1166  /*IsStridedOrIndexed*/ false, Operands,
1167  /*IsLoad=*/true);
1168 
1170  const RISCV::VLEPseudo *P =
1171  RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1172  static_cast<unsigned>(LMUL));
1173  MachineSDNode *Load =
1174  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1176  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1177  /*Glue*/ SDValue(Load, 2));
1178 
1179  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1180  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1181 
1182  ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1183  ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1184  ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain
1185  CurDAG->RemoveDeadNode(Node);
1186  return;
1187  }
1188  }
1189  break;
1190  }
1191  case ISD::INTRINSIC_VOID: {
1192  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1193  switch (IntNo) {
1194  case Intrinsic::riscv_vsseg2:
1195  case Intrinsic::riscv_vsseg3:
1196  case Intrinsic::riscv_vsseg4:
1197  case Intrinsic::riscv_vsseg5:
1198  case Intrinsic::riscv_vsseg6:
1199  case Intrinsic::riscv_vsseg7:
1200  case Intrinsic::riscv_vsseg8: {
1201  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1202  return;
1203  }
1204  case Intrinsic::riscv_vsseg2_mask:
1205  case Intrinsic::riscv_vsseg3_mask:
1206  case Intrinsic::riscv_vsseg4_mask:
1207  case Intrinsic::riscv_vsseg5_mask:
1208  case Intrinsic::riscv_vsseg6_mask:
1209  case Intrinsic::riscv_vsseg7_mask:
1210  case Intrinsic::riscv_vsseg8_mask: {
1211  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1212  return;
1213  }
1214  case Intrinsic::riscv_vssseg2:
1215  case Intrinsic::riscv_vssseg3:
1216  case Intrinsic::riscv_vssseg4:
1217  case Intrinsic::riscv_vssseg5:
1218  case Intrinsic::riscv_vssseg6:
1219  case Intrinsic::riscv_vssseg7:
1220  case Intrinsic::riscv_vssseg8: {
1221  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1222  return;
1223  }
1224  case Intrinsic::riscv_vssseg2_mask:
1225  case Intrinsic::riscv_vssseg3_mask:
1226  case Intrinsic::riscv_vssseg4_mask:
1227  case Intrinsic::riscv_vssseg5_mask:
1228  case Intrinsic::riscv_vssseg6_mask:
1229  case Intrinsic::riscv_vssseg7_mask:
1230  case Intrinsic::riscv_vssseg8_mask: {
1231  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1232  return;
1233  }
1234  case Intrinsic::riscv_vsoxseg2:
1235  case Intrinsic::riscv_vsoxseg3:
1236  case Intrinsic::riscv_vsoxseg4:
1237  case Intrinsic::riscv_vsoxseg5:
1238  case Intrinsic::riscv_vsoxseg6:
1239  case Intrinsic::riscv_vsoxseg7:
1240  case Intrinsic::riscv_vsoxseg8:
1241  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1242  return;
1243  case Intrinsic::riscv_vsuxseg2:
1244  case Intrinsic::riscv_vsuxseg3:
1245  case Intrinsic::riscv_vsuxseg4:
1246  case Intrinsic::riscv_vsuxseg5:
1247  case Intrinsic::riscv_vsuxseg6:
1248  case Intrinsic::riscv_vsuxseg7:
1249  case Intrinsic::riscv_vsuxseg8:
1250  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1251  return;
1252  case Intrinsic::riscv_vsoxseg2_mask:
1253  case Intrinsic::riscv_vsoxseg3_mask:
1254  case Intrinsic::riscv_vsoxseg4_mask:
1255  case Intrinsic::riscv_vsoxseg5_mask:
1256  case Intrinsic::riscv_vsoxseg6_mask:
1257  case Intrinsic::riscv_vsoxseg7_mask:
1258  case Intrinsic::riscv_vsoxseg8_mask:
1259  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1260  return;
1261  case Intrinsic::riscv_vsuxseg2_mask:
1262  case Intrinsic::riscv_vsuxseg3_mask:
1263  case Intrinsic::riscv_vsuxseg4_mask:
1264  case Intrinsic::riscv_vsuxseg5_mask:
1265  case Intrinsic::riscv_vsuxseg6_mask:
1266  case Intrinsic::riscv_vsuxseg7_mask:
1267  case Intrinsic::riscv_vsuxseg8_mask:
1268  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1269  return;
1270  case Intrinsic::riscv_vsoxei:
1271  case Intrinsic::riscv_vsoxei_mask:
1272  case Intrinsic::riscv_vsuxei:
1273  case Intrinsic::riscv_vsuxei_mask: {
1274  bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1275  IntNo == Intrinsic::riscv_vsuxei_mask;
1276  bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1277  IntNo == Intrinsic::riscv_vsoxei_mask;
1278 
1279  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1280  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1281 
1282  unsigned CurOp = 2;
1284  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1285 
1286  MVT IndexVT;
1287  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1288  /*IsStridedOrIndexed*/ true, Operands,
1289  /*IsLoad=*/false, &IndexVT);
1290 
1292  "Element count mismatch");
1293 
1295  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1296  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1297  const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1298  IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1299  static_cast<unsigned>(IndexLMUL));
1300  MachineSDNode *Store =
1301  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1302 
1303  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1304  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1305 
1306  ReplaceNode(Node, Store);
1307  return;
1308  }
1309  case Intrinsic::riscv_vsm:
1310  case Intrinsic::riscv_vse:
1311  case Intrinsic::riscv_vse_mask:
1312  case Intrinsic::riscv_vsse:
1313  case Intrinsic::riscv_vsse_mask: {
1314  bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1315  IntNo == Intrinsic::riscv_vsse_mask;
1316  bool IsStrided =
1317  IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1318 
1319  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1320  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1321 
1322  unsigned CurOp = 2;
1324  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1325 
1326  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1327  Operands);
1328 
1330  const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1331  IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1332  MachineSDNode *Store =
1333  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1334  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1335  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1336 
1337  ReplaceNode(Node, Store);
1338  return;
1339  }
1340  }
1341  break;
1342  }
1343  case ISD::BITCAST: {
1344  MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1345  // Just drop bitcasts between vectors if both are fixed or both are
1346  // scalable.
1347  if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1348  (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1349  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1350  CurDAG->RemoveDeadNode(Node);
1351  return;
1352  }
1353  break;
1354  }
1355  case ISD::INSERT_SUBVECTOR: {
1356  SDValue V = Node->getOperand(0);
1357  SDValue SubV = Node->getOperand(1);
1358  SDLoc DL(SubV);
1359  auto Idx = Node->getConstantOperandVal(2);
1360  MVT SubVecVT = SubV.getSimpleValueType();
1361 
1362  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1363  MVT SubVecContainerVT = SubVecVT;
1364  // Establish the correct scalable-vector types for any fixed-length type.
1365  if (SubVecVT.isFixedLengthVector())
1366  SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1367  if (VT.isFixedLengthVector())
1368  VT = TLI.getContainerForFixedLengthVector(VT);
1369 
1370  const auto *TRI = Subtarget->getRegisterInfo();
1371  unsigned SubRegIdx;
1372  std::tie(SubRegIdx, Idx) =
1374  VT, SubVecContainerVT, Idx, TRI);
1375 
1376  // If the Idx hasn't been completely eliminated then this is a subvector
1377  // insert which doesn't naturally align to a vector register. These must
1378  // be handled using instructions to manipulate the vector registers.
1379  if (Idx != 0)
1380  break;
1381 
1382  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1383  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1384  SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1385  SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1386  (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1387  assert((!IsSubVecPartReg || V.isUndef()) &&
1388  "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1389  "the subvector is smaller than a full-sized register");
1390 
1391  // If we haven't set a SubRegIdx, then we must be going between
1392  // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1393  if (SubRegIdx == RISCV::NoSubRegister) {
1394  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1396  InRegClassID &&
1397  "Unexpected subvector extraction");
1398  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1399  SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1400  DL, VT, SubV, RC);
1401  ReplaceNode(Node, NewNode);
1402  return;
1403  }
1404 
1405  SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1406  ReplaceNode(Node, Insert.getNode());
1407  return;
1408  }
1409  case ISD::EXTRACT_SUBVECTOR: {
1410  SDValue V = Node->getOperand(0);
1411  auto Idx = Node->getConstantOperandVal(1);
1412  MVT InVT = V.getSimpleValueType();
1413  SDLoc DL(V);
1414 
1415  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1416  MVT SubVecContainerVT = VT;
1417  // Establish the correct scalable-vector types for any fixed-length type.
1418  if (VT.isFixedLengthVector())
1419  SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1420  if (InVT.isFixedLengthVector())
1421  InVT = TLI.getContainerForFixedLengthVector(InVT);
1422 
1423  const auto *TRI = Subtarget->getRegisterInfo();
1424  unsigned SubRegIdx;
1425  std::tie(SubRegIdx, Idx) =
1427  InVT, SubVecContainerVT, Idx, TRI);
1428 
1429  // If the Idx hasn't been completely eliminated then this is a subvector
1430  // extract which doesn't naturally align to a vector register. These must
1431  // be handled using instructions to manipulate the vector registers.
1432  if (Idx != 0)
1433  break;
1434 
1435  // If we haven't set a SubRegIdx, then we must be going between
1436  // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1437  if (SubRegIdx == RISCV::NoSubRegister) {
1438  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1440  InRegClassID &&
1441  "Unexpected subvector extraction");
1442  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1443  SDNode *NewNode =
1444  CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1445  ReplaceNode(Node, NewNode);
1446  return;
1447  }
1448 
1449  SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1450  ReplaceNode(Node, Extract.getNode());
1451  return;
1452  }
1453  case RISCVISD::VMV_V_X_VL:
1454  case RISCVISD::VFMV_V_F_VL: {
1455  // Try to match splat of a scalar load to a strided load with stride of x0.
1456  SDValue Src = Node->getOperand(0);
1457  auto *Ld = dyn_cast<LoadSDNode>(Src);
1458  if (!Ld)
1459  break;
1460  EVT MemVT = Ld->getMemoryVT();
1461  // The memory VT should be the same size as the element type.
1462  if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1463  break;
1464  if (!IsProfitableToFold(Src, Node, Node) ||
1465  !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1466  break;
1467 
1468  SDValue VL;
1469  selectVLOp(Node->getOperand(1), VL);
1470 
1471  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1472  SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1473 
1474  SDValue Operands[] = {Ld->getBasePtr(),
1475  CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1476  Ld->getChain()};
1477 
1479  const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1480  /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1481  static_cast<unsigned>(LMUL));
1482  MachineSDNode *Load =
1483  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1484 
1485  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1486  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1487 
1488  ReplaceNode(Node, Load);
1489  return;
1490  }
1491  }
1492 
1493  // Select the default instruction.
1494  SelectCode(Node);
1495 }
1496 
1498  const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1499  switch (ConstraintID) {
1501  // We just support simple memory operands that have a single address
1502  // operand and need no special handling.
1503  OutOps.push_back(Op);
1504  return false;
1506  OutOps.push_back(Op);
1507  return false;
1508  default:
1509  break;
1510  }
1511 
1512  return true;
1513 }
1514 
1516  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1517  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1518  return true;
1519  }
1520  return false;
1521 }
1522 
1524  // If this is FrameIndex, select it directly. Otherwise just let it get
1525  // selected to a register independently.
1526  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1527  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1528  else
1529  Base = Addr;
1530  return true;
1531 }
1532 
1534  SDValue &ShAmt) {
1535  // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1536  // amount. If there is an AND on the shift amount, we can bypass it if it
1537  // doesn't affect any of those bits.
1538  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1539  const APInt &AndMask = N->getConstantOperandAPInt(1);
1540 
1541  // Since the max shift amount is a power of 2 we can subtract 1 to make a
1542  // mask that covers the bits needed to represent all shift amounts.
1543  assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1544  APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1545 
1546  if (ShMask.isSubsetOf(AndMask)) {
1547  ShAmt = N.getOperand(0);
1548  return true;
1549  }
1550 
1551  // SimplifyDemandedBits may have optimized the mask so try restoring any
1552  // bits that are known zero.
1553  KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1554  if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1555  ShAmt = N.getOperand(0);
1556  return true;
1557  }
1558  }
1559 
1560  ShAmt = N;
1561  return true;
1562 }
1563 
1565  if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1566  cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1567  Val = N.getOperand(0);
1568  return true;
1569  }
1570  MVT VT = N.getSimpleValueType();
1571  if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1572  Val = N;
1573  return true;
1574  }
1575 
1576  return false;
1577 }
1578 
1580  if (N.getOpcode() == ISD::AND) {
1581  auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1582  if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1583  Val = N.getOperand(0);
1584  return true;
1585  }
1586  }
1587  MVT VT = N.getSimpleValueType();
1589  if (CurDAG->MaskedValueIsZero(N, Mask)) {
1590  Val = N;
1591  return true;
1592  }
1593 
1594  return false;
1595 }
1596 
1597 // Return true if all users of this SDNode* only consume the lower \p Bits.
1598 // This can be used to form W instructions for add/sub/mul/shl even when the
1599 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1600 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1601 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1602 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1603 // may be able to use a W instruction and CSE with the other instruction if
1604 // this has happened. We could try to detect that the CSE opportunity exists
1605 // before doing this, but that would be more complicated.
1606 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1607 // opportunities.
1608 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1609  assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1610  Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1611  Node->getOpcode() == ISD::SRL ||
1612  Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1613  isa<ConstantSDNode>(Node)) &&
1614  "Unexpected opcode");
1615 
1616  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1617  SDNode *User = *UI;
1618  // Users of this node should have already been instruction selected
1619  if (!User->isMachineOpcode())
1620  return false;
1621 
1622  // TODO: Add more opcodes?
1623  switch (User->getMachineOpcode()) {
1624  default:
1625  return false;
1626  case RISCV::ADDW:
1627  case RISCV::ADDIW:
1628  case RISCV::SUBW:
1629  case RISCV::MULW:
1630  case RISCV::SLLW:
1631  case RISCV::SLLIW:
1632  case RISCV::SRAW:
1633  case RISCV::SRAIW:
1634  case RISCV::SRLW:
1635  case RISCV::SRLIW:
1636  case RISCV::DIVW:
1637  case RISCV::DIVUW:
1638  case RISCV::REMW:
1639  case RISCV::REMUW:
1640  case RISCV::ROLW:
1641  case RISCV::RORW:
1642  case RISCV::RORIW:
1643  case RISCV::CLZW:
1644  case RISCV::CTZW:
1645  case RISCV::CPOPW:
1646  case RISCV::SLLIUW:
1647  case RISCV::FCVT_H_W:
1648  case RISCV::FCVT_H_WU:
1649  case RISCV::FCVT_S_W:
1650  case RISCV::FCVT_S_WU:
1651  case RISCV::FCVT_D_W:
1652  case RISCV::FCVT_D_WU:
1653  if (Bits < 32)
1654  return false;
1655  break;
1656  case RISCV::SLLI:
1657  // SLLI only uses the lower (XLen - ShAmt) bits.
1658  if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1659  return false;
1660  break;
1661  case RISCV::ADDUW:
1662  case RISCV::SH1ADDUW:
1663  case RISCV::SH2ADDUW:
1664  case RISCV::SH3ADDUW:
1665  // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1666  // 32 bits.
1667  if (UI.getOperandNo() != 0 || Bits < 32)
1668  return false;
1669  break;
1670  case RISCV::SB:
1671  if (UI.getOperandNo() != 0 || Bits < 8)
1672  return false;
1673  break;
1674  case RISCV::SH:
1675  if (UI.getOperandNo() != 0 || Bits < 16)
1676  return false;
1677  break;
1678  case RISCV::SW:
1679  if (UI.getOperandNo() != 0 || Bits < 32)
1680  return false;
1681  break;
1682  }
1683  }
1684 
1685  return true;
1686 }
1687 
1688 // Select VL as a 5 bit immediate or a value that will become a register. This
1689 // allows us to choose betwen VSETIVLI or VSETVLI later.
1691  auto *C = dyn_cast<ConstantSDNode>(N);
1692  if (C && isUInt<5>(C->getZExtValue()))
1693  VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1694  N->getValueType(0));
1695  else
1696  VL = N;
1697 
1698  return true;
1699 }
1700 
1702  if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1703  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1704  N.getOpcode() != RISCVISD::VMV_V_X_VL)
1705  return false;
1706  SplatVal = N.getOperand(0);
1707  return true;
1708 }
1709 
1710 using ValidateFn = bool (*)(int64_t);
1711 
1712 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1713  SelectionDAG &DAG,
1714  const RISCVSubtarget &Subtarget,
1715  ValidateFn ValidateImm) {
1716  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1717  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1718  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1719  !isa<ConstantSDNode>(N.getOperand(0)))
1720  return false;
1721 
1722  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1723 
1724  // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1725  // share semantics when the operand type is wider than the resulting vector
1726  // element type: an implicit truncation first takes place. Therefore, perform
1727  // a manual truncation/sign-extension in order to ignore any truncated bits
1728  // and catch any zero-extended immediate.
1729  // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1730  // sign-extending to (XLenVT -1).
1731  MVT XLenVT = Subtarget.getXLenVT();
1732  assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1733  "Unexpected splat operand type");
1734  MVT EltVT = N.getSimpleValueType().getVectorElementType();
1735  if (EltVT.bitsLT(XLenVT))
1736  SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1737 
1738  if (!ValidateImm(SplatImm))
1739  return false;
1740 
1741  SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1742  return true;
1743 }
1744 
1746  return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1747  [](int64_t Imm) { return isInt<5>(Imm); });
1748 }
1749 
1751  return selectVSplatSimmHelper(
1752  N, SplatVal, *CurDAG, *Subtarget,
1753  [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1754 }
1755 
1757  SDValue &SplatVal) {
1758  return selectVSplatSimmHelper(
1759  N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1760  return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1761  });
1762 }
1763 
1765  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1766  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1767  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1768  !isa<ConstantSDNode>(N.getOperand(0)))
1769  return false;
1770 
1771  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1772 
1773  if (!isUInt<5>(SplatImm))
1774  return false;
1775 
1776  SplatVal =
1777  CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1778 
1779  return true;
1780 }
1781 
1783  SDValue &Imm) {
1784  if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1785  int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1786 
1787  if (!isInt<5>(ImmVal))
1788  return false;
1789 
1790  Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1791  return true;
1792  }
1793 
1794  return false;
1795 }
1796 
1797 // Merge an ADDI into the offset of a load/store instruction where possible.
1798 // (load (addi base, off1), off2) -> (load base, off1+off2)
1799 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1800 // This is possible when off1+off2 fits a 12-bit immediate.
1801 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
1802  int OffsetOpIdx;
1803  int BaseOpIdx;
1804 
1805  // Only attempt this optimisation for I-type loads and S-type stores.
1806  switch (N->getMachineOpcode()) {
1807  default:
1808  return false;
1809  case RISCV::LB:
1810  case RISCV::LH:
1811  case RISCV::LW:
1812  case RISCV::LBU:
1813  case RISCV::LHU:
1814  case RISCV::LWU:
1815  case RISCV::LD:
1816  case RISCV::FLH:
1817  case RISCV::FLW:
1818  case RISCV::FLD:
1819  BaseOpIdx = 0;
1820  OffsetOpIdx = 1;
1821  break;
1822  case RISCV::SB:
1823  case RISCV::SH:
1824  case RISCV::SW:
1825  case RISCV::SD:
1826  case RISCV::FSH:
1827  case RISCV::FSW:
1828  case RISCV::FSD:
1829  BaseOpIdx = 1;
1830  OffsetOpIdx = 2;
1831  break;
1832  }
1833 
1834  if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1835  return false;
1836 
1837  SDValue Base = N->getOperand(BaseOpIdx);
1838 
1839  // If the base is an ADDI, we can merge it in to the load/store.
1840  if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1841  return false;
1842 
1843  SDValue ImmOperand = Base.getOperand(1);
1844  uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1845 
1846  if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1847  int64_t Offset1 = Const->getSExtValue();
1848  int64_t CombinedOffset = Offset1 + Offset2;
1849  if (!isInt<12>(CombinedOffset))
1850  return false;
1851  ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1852  ImmOperand.getValueType());
1853  } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1854  // If the off1 in (addi base, off1) is a global variable's address (its
1855  // low part, really), then we can rely on the alignment of that variable
1856  // to provide a margin of safety before off1 can overflow the 12 bits.
1857  // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1858  const DataLayout &DL = CurDAG->getDataLayout();
1859  Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1860  if (Offset2 != 0 && Alignment <= Offset2)
1861  return false;
1862  int64_t Offset1 = GA->getOffset();
1863  int64_t CombinedOffset = Offset1 + Offset2;
1864  ImmOperand = CurDAG->getTargetGlobalAddress(
1865  GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1866  CombinedOffset, GA->getTargetFlags());
1867  } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1868  // Ditto.
1869  Align Alignment = CP->getAlign();
1870  if (Offset2 != 0 && Alignment <= Offset2)
1871  return false;
1872  int64_t Offset1 = CP->getOffset();
1873  int64_t CombinedOffset = Offset1 + Offset2;
1874  ImmOperand = CurDAG->getTargetConstantPool(
1875  CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1876  CombinedOffset, CP->getTargetFlags());
1877  } else {
1878  return false;
1879  }
1880 
1881  LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
1882  LLVM_DEBUG(Base->dump(CurDAG));
1883  LLVM_DEBUG(dbgs() << "\nN: ");
1884  LLVM_DEBUG(N->dump(CurDAG));
1885  LLVM_DEBUG(dbgs() << "\n");
1886 
1887  // Modify the offset operand of the load/store.
1888  if (BaseOpIdx == 0) // Load
1889  CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1890  N->getOperand(2));
1891  else // Store
1892  CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1893  ImmOperand, N->getOperand(3));
1894 
1895  return true;
1896 }
1897 
1898 // Try to remove sext.w if the input is a W instruction or can be made into
1899 // a W instruction cheaply.
1900 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
1901  // Look for the sext.w pattern, addiw rd, rs1, 0.
1902  if (N->getMachineOpcode() != RISCV::ADDIW ||
1903  !isNullConstant(N->getOperand(1)))
1904  return false;
1905 
1906  SDValue N0 = N->getOperand(0);
1907  if (!N0.isMachineOpcode())
1908  return false;
1909 
1910  switch (N0.getMachineOpcode()) {
1911  default:
1912  break;
1913  case RISCV::ADD:
1914  case RISCV::ADDI:
1915  case RISCV::SUB:
1916  case RISCV::MUL:
1917  case RISCV::SLLI: {
1918  // Convert sext.w+add/sub/mul to their W instructions. This will create
1919  // a new independent instruction. This improves latency.
1920  unsigned Opc;
1921  switch (N0.getMachineOpcode()) {
1922  default:
1923  llvm_unreachable("Unexpected opcode!");
1924  case RISCV::ADD: Opc = RISCV::ADDW; break;
1925  case RISCV::ADDI: Opc = RISCV::ADDIW; break;
1926  case RISCV::SUB: Opc = RISCV::SUBW; break;
1927  case RISCV::MUL: Opc = RISCV::MULW; break;
1928  case RISCV::SLLI: Opc = RISCV::SLLIW; break;
1929  }
1930 
1931  SDValue N00 = N0.getOperand(0);
1932  SDValue N01 = N0.getOperand(1);
1933 
1934  // Shift amount needs to be uimm5.
1935  if (N0.getMachineOpcode() == RISCV::SLLI &&
1936  !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
1937  break;
1938 
1939  SDNode *Result =
1940  CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
1941  N00, N01);
1942  ReplaceUses(N, Result);
1943  return true;
1944  }
1945  case RISCV::ADDW:
1946  case RISCV::ADDIW:
1947  case RISCV::SUBW:
1948  case RISCV::MULW:
1949  case RISCV::SLLIW:
1950  // Result is already sign extended just remove the sext.w.
1951  // NOTE: We only handle the nodes that are selected with hasAllWUsers.
1952  ReplaceUses(N, N0.getNode());
1953  return true;
1954  }
1955 
1956  return false;
1957 }
1958 
1959 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1960 // for instruction scheduling.
1962  return new RISCVDAGToDAGISel(TM);
1963 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:96
llvm::TargetMachine::getOptLevel
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Definition: TargetMachine.cpp:185
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:21
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition: MachineValueType.h:519
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1072
llvm::ISD::INTRINSIC_VOID
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
llvm::RISCVDAGToDAGISel::selectVLXSEG
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:347
MathExtras.h
llvm::SelectionDAGISel::TLI
const TargetLowering * TLI
Definition: SelectionDAGISel.h:53
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::RISCVISD::SLLW
@ SLLW
Definition: RISCVISelLowering.h:48
llvm::SelectionDAGISel::TM
TargetMachine & TM
Definition: SelectionDAGISel.h:41
llvm::RISCV::VLSEGPseudo
Definition: RISCVISelDAGToDAG.h:121
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1086
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition: MachineValueType.h:378
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1750
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::RISCVDAGToDAGISel::PreprocessISelDAG
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
Definition: RISCVISelDAGToDAG.cpp:44
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:848
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition: RISCVSubtarget.h:96
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:735
llvm::RISCV::VLXSEGPseudo
Definition: RISCVISelDAGToDAG.h:131
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:152
llvm::RISCVDAGToDAGISel::selectZExti32
bool selectZExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1579
llvm::InlineAsm::Constraint_A
@ Constraint_A
Definition: InlineAsm.h:250
llvm::SelectionDAG::allnodes_end
allnodes_const_iterator allnodes_end() const
Definition: SelectionDAG.h:494
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::RISCVISD::DIVUW
@ DIVUW
Definition: RISCVISelLowering.h:55
llvm::MVT::bitsLT
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
Definition: MachineValueType.h:1128
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1625
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:8580
llvm::MipsISD::Lo
@ Lo
Definition: MipsISelLowering.h:79
llvm::MachineSDNode
An SDNode that represents everything that will be needed to construct a MachineInstr.
Definition: SelectionDAGNodes.h:2746
llvm::SelectionDAG::allnodes_begin
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:493
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:137
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:455
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::RISCVDAGToDAGISel::selectVSplat
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1701
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:99
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:262
llvm::MemOp
Definition: TargetLowering.h:112
llvm::RISCVDAGToDAGISel
Definition: RISCVISelDAGToDAG.h:23
llvm::SelectionDAG::getMemBasePlusOffset
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
Definition: SelectionDAG.cpp:6379
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1399
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7541
llvm::RISCV::VLX_VSXPseudo
Definition: RISCVISelDAGToDAG.h:177
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SelectionDAG::RemoveDeadNodes
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
Definition: SelectionDAG.cpp:852
llvm::RISCVTargetLowering::getRegClassIDForVecVT
static unsigned getRegClassIDForVecVT(MVT VT)
Definition: RISCVISelLowering.cpp:1292
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::RISCVDAGToDAGISel::selectVSSEG
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:395
RISCVMatInt.h
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1756
llvm::RISCVDAGToDAGISel::SelectBaseAddr
bool SelectBaseAddr(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1523
KnownBits.h
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition: MachineValueType.h:373
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:2007
llvm::MipsISD::Hi
@ Hi
Definition: MipsISelLowering.h:75
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::RISCV::VSSEGPseudo
Definition: RISCVISelDAGToDAG.h:141
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:98
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:363
llvm::RISCVDAGToDAGISel::selectShiftMask
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition: RISCVISelDAGToDAG.cpp:1533
llvm::SelectionDAG::getTargetFrameIndex
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:688
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1121
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:216
llvm::SelectionDAG::UpdateNodeOperands
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
Definition: SelectionDAG.cpp:8670
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:724
llvm::User
Definition: User.h:44
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:747
llvm::RISCVISD::SPLAT_VECTOR_I64
@ SPLAT_VECTOR_I64
Definition: RISCVISelLowering.h:137
createTuple
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF, RISCVII::VLMUL LMUL)
Definition: RISCVISelDAGToDAG.cpp:199
llvm::SelectionDAG::getTargetLoweringInfo
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:443
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::isShiftedMask_64
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:485
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3189
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition: MachineValueType.h:1062
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::SelectionDAG::MaskedValueIsZero
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
Definition: SelectionDAG.cpp:2485
llvm::RISCVTargetLowering::getSubregIndexByMVT
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Definition: RISCVISelLowering.cpp:1269
llvm::RISCVDAGToDAGISel::SelectAddrFI
bool SelectAddrFI(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1515
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition: RISCVSubtarget.h:127
RISCVISelDAGToDAG.h
llvm::SelectionDAGISel::ReplaceNode
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
Definition: SelectionDAGISel.h:227
llvm::TypeSize::Fixed
static TypeSize Fixed(ScalarTy MinVal)
Definition: TypeSize.h:423
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::RISCVVType::decodeVSEW
static unsigned decodeVSEW(unsigned VSEW)
Definition: RISCVBaseInfo.h:351
RISCVMCTargetDesc.h
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition: RISCVISelDAGToDAG.cpp:1497
llvm::SelectionDAG::getMemIntrinsicNode
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
Definition: SelectionDAG.cpp:7268
createM1Tuple
static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:174
llvm::RISCVSubtarget::hasStdExtZbb
bool hasStdExtZbb() const
Definition: RISCVSubtarget.h:109
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:632
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::ISD::SPLAT_VECTOR
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:590
llvm::RISCVSubtarget::hasStdExtZbp
bool hasStdExtZbp() const
Definition: RISCVSubtarget.h:114
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::SDValue::getConstantOperandVal
uint64_t getConstantOperandVal(unsigned i) const
Definition: SelectionDAGNodes.h:1133
llvm::RISCVISD::DIVW
@ DIVW
Definition: RISCVISelLowering.h:54
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::SelectionDAG::getTargetGlobalAddress
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:683
llvm::RISCVISD::CLZW
@ CLZW
Definition: RISCVISelLowering.h:63
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1177
selectImm
static SDNode * selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm, const RISCVSubtarget &Subtarget)
Definition: RISCVISelDAGToDAG.cpp:128
createM2Tuple
static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:184
llvm::SelectionDAG::RemoveDeadNode
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:906
llvm::RISCV::VSEPseudo
Definition: RISCVISelDAGToDAG.h:169
llvm::RISCVDAGToDAGISel::selectVLOp
bool selectVLOp(SDValue N, SDValue &VL)
Definition: RISCVISelDAGToDAG.cpp:1690
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::RISCVDAGToDAGISel::selectVSXSEG
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:427
llvm::SelectionDAGISel::IsProfitableToFold
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
Definition: SelectionDAGISel.cpp:2099
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
selectVSplatSimmHelper
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
Definition: RISCVISelDAGToDAG.cpp:1712
uint64_t
llvm::RISCVDAGToDAGISel::selectVSplatUimm5
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1764
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::SelectionDAGISel::FuncInfo
std::unique_ptr< FunctionLoweringInfo > FuncInfo
Definition: SelectionDAGISel.h:43
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
llvm::SelectionDAG::getCopyFromReg
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:761
llvm::SelectionDAGISel::IsLegalToFold
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
Definition: SelectionDAGISel.cpp:2107
llvm::SDNode::getOperand
const SDValue & getOperand(unsigned Num) const
Definition: SelectionDAGNodes.h:904
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::SelectionDAG::getNode
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
Definition: SelectionDAG.cpp:8345
llvm::RISCVISD::ROLW
@ ROLW
Definition: RISCVISelLowering.h:59
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:24
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::SDValue::getValue
SDValue getValue(unsigned R) const
Definition: SelectionDAGNodes.h:172
llvm::RISCVDAGToDAGISel::selectVLSEG
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:261
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
createTupleImpl
static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned RegClassID, unsigned SubReg0)
Definition: RISCVISelDAGToDAG.cpp:156
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:860
llvm::SelectionDAGISel::CurDAG
SelectionDAG * CurDAG
Definition: SelectionDAGISel.h:47
llvm::RISCVDAGToDAGISel::hasAllWUsers
bool hasAllWUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:63
llvm::SelectionDAG::getMachineNode
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
Definition: SelectionDAG.cpp:9018
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::RISCVISD::SRAW
@ SRAW
Definition: RISCVISelLowering.h:49
llvm::RISCVDAGToDAGISel::selectVSplatSimm5
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1745
llvm::SelectionDAG::setNodeMemRefs
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Definition: SelectionDAG.cpp:8786
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:108
llvm::MachinePointerInfo::getWithOffset
MachinePointerInfo getWithOffset(int64_t O) const
Definition: MachineMemOperand.h:80
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:230
llvm::RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
Definition: RISCVISelLowering.cpp:1304
llvm::RISCVISD::REMUW
@ REMUW
Definition: RISCVISelLowering.h:56
llvm::SelectionDAG::getTargetConstantPool
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:699
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::createRISCVISelDag
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM)
Definition: RISCVISelDAGToDAG.cpp:1961
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:47
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::SelectionDAG::getTargetInsertSubreg
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
Definition: SelectionDAG.cpp:9146
llvm::RISCVISD::VMV_V_X_VL
@ VMV_V_X_VL
Definition: RISCVISelLowering.h:124
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:93
llvm::SDValue::getMachineOpcode
unsigned getMachineOpcode() const
Definition: SelectionDAGNodes.h:1153
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:97
llvm::SelectionDAG::ReplaceAllUsesOfValueWith
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
Definition: SelectionDAG.cpp:9606
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:840
llvm::SDValue::getOperand
const SDValue & getOperand(unsigned i) const
Definition: SelectionDAGNodes.h:1129
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::SDValue::hasOneUse
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Definition: SelectionDAGNodes.h:1165
llvm::SDValue::getSimpleValueType
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:183
llvm::SDVTList
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Definition: SelectionDAGNodes.h:79
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
llvm::ISD::INTRINSIC_WO_CHAIN
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
llvm::MVT::getVectorElementCount
ElementCount getVectorElementCount() const
Definition: MachineValueType.h:846
llvm::RISCVISD::RORW
@ RORW
Definition: RISCVISelLowering.h:60
createM4Tuple
static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:193
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::ISD::INSERT_SUBVECTOR
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:535
llvm::HexagonISD::CP
@ CP
Definition: HexagonISelLowering.h:53
llvm::InlineAsm::Constraint_m
@ Constraint_m
Definition: InlineAsm.h:247
llvm::SelectionDAGISel::MF
MachineFunction * MF
Definition: SelectionDAGISel.h:45
Alignment.h
llvm::SelectionDAG::computeKnownBits
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
Definition: SelectionDAG.cpp:2779
llvm::KnownBits
Definition: KnownBits.h:23
llvm::RISCVISD::SRLW
@ SRLW
Definition: RISCVISelLowering.h:50
llvm::ISD::EXTRACT_SUBVECTOR
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:549
llvm::isNullConstant
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
Definition: SelectionDAG.cpp:9957
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::RISCV::VSXSEGPseudo
Definition: RISCVISelDAGToDAG.h:150
RISCVISelLowering.h
llvm::RISCVDAGToDAGISel::PostprocessISelDAG
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition: RISCVISelDAGToDAG.cpp:110
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
MachineFrameInfo.h
llvm::SelectionDAG::getEntryNode
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:516
llvm::RISCVVType::encodeVTYPE
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
Definition: RISCVBaseInfo.cpp:110
llvm::SelectionDAG::getDataLayout
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:440
llvm::RISCVDAGToDAGISel::selectVLSEGFF
void selectVLSEGFF(SDNode *Node, bool IsMasked)
Definition: RISCVISelDAGToDAG.cpp:302
llvm::SelectionDAG::getTargetExtractSubreg
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
Definition: SelectionDAG.cpp:9136
llvm::SelectionDAGISel::ReplaceUses
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
Definition: SelectionDAGISel.h:206
llvm::MVT::i32
@ i32
Definition: MachineValueType.h:46
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:128
llvm::SDValue
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Definition: SelectionDAGNodes.h:138
llvm::RISCVTargetLowering
Definition: RISCVISelLowering.h:295
llvm::XCoreISD::LMUL
@ LMUL
Definition: XCoreISelLowering.h:59
llvm::countLeadingZeros
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: MathExtras.h:225
llvm::RISCVTargetLowering::getLMUL
static RISCVII::VLMUL getLMUL(MVT VT)
Definition: RISCVISelLowering.cpp:1225
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:413
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::RISCVISD::VFMV_V_F_VL
@ VFMV_V_F_VL
Definition: RISCVISelLowering.h:127
llvm::SDValue::isUndef
bool isUndef() const
Definition: SelectionDAGNodes.h:1157
llvm::RISCVII::LMUL_F8
@ LMUL_F8
Definition: RISCVBaseInfo.h:101
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:657
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1008
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
N
#define N
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:659
RISCVMachineFunctionInfo.h
llvm::RISCVDAGToDAGISel::selectRVVSimm5
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition: RISCVISelDAGToDAG.cpp:1782
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::RISCVII::LMUL_F4
@ LMUL_F4
Definition: RISCVBaseInfo.h:102
llvm::RISCVDAGToDAGISel::Select
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition: RISCVISelDAGToDAG.cpp:466
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:95
llvm::MVT::Untyped
@ Untyped
Definition: MachineValueType.h:266
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SDValue::getOpcode
unsigned getOpcode() const
Definition: SelectionDAGNodes.h:1117
llvm::SelectionDAG::getTargetConstant
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:637
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::ISD::INTRINSIC_W_CHAIN
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
llvm::SelectionDAG::DeleteNode
void DeleteNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:917
llvm::SelectionDAG::getMachineFunction
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:437
llvm::SelectionDAG::ComputeNumSignBits
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: SelectionDAG.cpp:3708
llvm::isMask_64
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
Definition: MathExtras.h:473
llvm::RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
Definition: RISCVISelLowering.h:140
raw_ostream.h
llvm::SDValue::isMachineOpcode
bool isMachineOpcode() const
Definition: SelectionDAGNodes.h:1149
llvm::RISCV::VLEPseudo
Definition: RISCVISelDAGToDAG.h:160
llvm::RISCVDAGToDAGISel::hasAllHUsers
bool hasAllHUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:62
ValidateFn
bool(*)(int64_t) ValidateFn
Definition: RISCVISelDAGToDAG.cpp:1710
llvm::RISCVISD::CTZW
@ CTZW
Definition: RISCVISelLowering.h:64
llvm::RISCVDAGToDAGISel::hasAllNBitUsers
bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const
Definition: RISCVISelDAGToDAG.cpp:1608
Debug.h
llvm::RISCVDAGToDAGISel::selectSExti32
bool selectSExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1564
llvm::TargetLoweringBase::getPointerTy
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Definition: TargetLowering.h:346
llvm::ISD::TokenFactor
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
llvm::RISCVII::LMUL_F2
@ LMUL_F2
Definition: RISCVBaseInfo.h:103
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::RISCVSubtarget::hasStdExtV
bool hasStdExtV() const
Definition: RISCVSubtarget.h:118
llvm::RISCVDAGToDAGISel::addVectorLoadStoreOperands
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
Definition: RISCVISelDAGToDAG.cpp:216