LLVM  15.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/MC/MCInstBuilder.h"
29 #include "llvm/MC/TargetRegistry.h"
31 
32 using namespace llvm;
33 
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
36 
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
40 
42  "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
43  cl::desc("Prefer whole register move for vector registers."));
44 
45 namespace llvm {
46 namespace RISCVVPseudosTable {
47 
48 using namespace RISCV;
49 
50 #define GET_RISCVVPseudosTable_IMPL
51 #include "RISCVGenSearchableTables.inc"
52 
53 } // namespace RISCVVPseudosTable
54 } // namespace llvm
55 
57  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
58  STI(STI) {}
59 
61  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
62  return MCInstBuilder(RISCV::C_NOP);
63  return MCInstBuilder(RISCV::ADDI)
64  .addReg(RISCV::X0)
65  .addReg(RISCV::X0)
66  .addImm(0);
67 }
68 
70  int &FrameIndex) const {
71  switch (MI.getOpcode()) {
72  default:
73  return 0;
74  case RISCV::LB:
75  case RISCV::LBU:
76  case RISCV::LH:
77  case RISCV::LHU:
78  case RISCV::FLH:
79  case RISCV::LW:
80  case RISCV::FLW:
81  case RISCV::LWU:
82  case RISCV::LD:
83  case RISCV::FLD:
84  break;
85  }
86 
87  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
88  MI.getOperand(2).getImm() == 0) {
89  FrameIndex = MI.getOperand(1).getIndex();
90  return MI.getOperand(0).getReg();
91  }
92 
93  return 0;
94 }
95 
97  int &FrameIndex) const {
98  switch (MI.getOpcode()) {
99  default:
100  return 0;
101  case RISCV::SB:
102  case RISCV::SH:
103  case RISCV::SW:
104  case RISCV::FSH:
105  case RISCV::FSW:
106  case RISCV::SD:
107  case RISCV::FSD:
108  break;
109  }
110 
111  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
112  MI.getOperand(2).getImm() == 0) {
113  FrameIndex = MI.getOperand(1).getIndex();
114  return MI.getOperand(0).getReg();
115  }
116 
117  return 0;
118 }
119 
120 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
121  unsigned NumRegs) {
122  return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
123 }
124 
125 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
126  const MachineBasicBlock &MBB,
129  RISCVII::VLMUL LMul) {
131  return false;
132 
133  assert(MBBI->getOpcode() == TargetOpcode::COPY &&
134  "Unexpected COPY instruction.");
135  Register SrcReg = MBBI->getOperand(1).getReg();
136  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
137 
138  bool FoundDef = false;
139  bool FirstVSetVLI = false;
140  unsigned FirstSEW = 0;
141  while (MBBI != MBB.begin()) {
142  --MBBI;
143  if (MBBI->isMetaInstruction())
144  continue;
145 
146  if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
147  MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
148  MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
149  // There is a vsetvli between COPY and source define instruction.
150  // vy = def_vop ... (producing instruction)
151  // ...
152  // vsetvli
153  // ...
154  // vx = COPY vy
155  if (!FoundDef) {
156  if (!FirstVSetVLI) {
157  FirstVSetVLI = true;
158  unsigned FirstVType = MBBI->getOperand(2).getImm();
159  RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
160  FirstSEW = RISCVVType::getSEW(FirstVType);
161  // The first encountered vsetvli must have the same lmul as the
162  // register class of COPY.
163  if (FirstLMul != LMul)
164  return false;
165  }
166  // Only permit `vsetvli x0, x0, vtype` between COPY and the source
167  // define instruction.
168  if (MBBI->getOperand(0).getReg() != RISCV::X0)
169  return false;
170  if (MBBI->getOperand(1).isImm())
171  return false;
172  if (MBBI->getOperand(1).getReg() != RISCV::X0)
173  return false;
174  continue;
175  }
176 
177  // MBBI is the first vsetvli before the producing instruction.
178  unsigned VType = MBBI->getOperand(2).getImm();
179  // If there is a vsetvli between COPY and the producing instruction.
180  if (FirstVSetVLI) {
181  // If SEW is different, return false.
182  if (RISCVVType::getSEW(VType) != FirstSEW)
183  return false;
184  }
185 
186  // If the vsetvli is tail undisturbed, keep the whole register move.
187  if (!RISCVVType::isTailAgnostic(VType))
188  return false;
189 
190  // The checking is conservative. We only have register classes for
191  // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
192  // for fractional LMUL operations. However, we could not use the vsetvli
193  // lmul for widening operations. The result of widening operation is
194  // 2 x LMUL.
195  return LMul == RISCVVType::getVLMUL(VType);
196  } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
197  return false;
198  } else if (MBBI->getNumDefs()) {
199  // Check all the instructions which will change VL.
200  // For example, vleff has implicit def VL.
201  if (MBBI->modifiesRegister(RISCV::VL))
202  return false;
203 
204  // Only converting whole register copies to vmv.v.v when the defining
205  // value appears in the explicit operands.
206  for (const MachineOperand &MO : MBBI->explicit_operands()) {
207  if (!MO.isReg() || !MO.isDef())
208  continue;
209  if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
210  // We only permit the source of COPY has the same LMUL as the defined
211  // operand.
212  // There are cases we need to keep the whole register copy if the LMUL
213  // is different.
214  // For example,
215  // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
216  // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
217  // # The COPY may be created by vlmul_trunc intrinsic.
218  // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
219  //
220  // After widening, the valid value will be 4 x e32 elements. If we
221  // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
222  // FIXME: The COPY of subregister of Zvlsseg register will not be able
223  // to convert to vmv.v.[v|i] under the constraint.
224  if (MO.getReg() != SrcReg)
225  return false;
226 
227  // In widening reduction instructions with LMUL_1 input vector case,
228  // only checking the LMUL is insufficient due to reduction result is
229  // always LMUL_1.
230  // For example,
231  // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
232  // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
233  // $v26 = COPY killed renamable $v8
234  // After widening, The valid value will be 1 x e16 elements. If we
235  // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
236  uint64_t TSFlags = MBBI->getDesc().TSFlags;
237  if (RISCVII::isRVVWideningReduction(TSFlags))
238  return false;
239 
240  // Found the definition.
241  FoundDef = true;
242  DefMBBI = MBBI;
243  // If the producing instruction does not depend on vsetvli, do not
244  // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
245  if (!RISCVII::hasSEWOp(TSFlags))
246  return false;
247  break;
248  }
249  }
250  }
251  }
252 
253  return false;
254 }
255 
258  const DebugLoc &DL, MCRegister DstReg,
259  MCRegister SrcReg, bool KillSrc) const {
260  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
261  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
262  .addReg(SrcReg, getKillRegState(KillSrc))
263  .addImm(0);
264  return;
265  }
266 
267  // Handle copy from csr
268  if (RISCV::VCSRRegClass.contains(SrcReg) &&
269  RISCV::GPRRegClass.contains(DstReg)) {
271  BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
273  .addReg(RISCV::X0);
274  return;
275  }
276 
277  // FPR->FPR copies and VR->VR copies.
278  unsigned Opc;
279  bool IsScalableVector = true;
280  unsigned NF = 1;
282  unsigned SubRegIdx = RISCV::sub_vrm1_0;
283  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
284  Opc = RISCV::FSGNJ_H;
285  IsScalableVector = false;
286  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
287  Opc = RISCV::FSGNJ_S;
288  IsScalableVector = false;
289  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
290  Opc = RISCV::FSGNJ_D;
291  IsScalableVector = false;
292  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
293  Opc = RISCV::PseudoVMV1R_V;
294  LMul = RISCVII::LMUL_1;
295  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
296  Opc = RISCV::PseudoVMV2R_V;
297  LMul = RISCVII::LMUL_2;
298  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
299  Opc = RISCV::PseudoVMV4R_V;
300  LMul = RISCVII::LMUL_4;
301  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
302  Opc = RISCV::PseudoVMV8R_V;
303  LMul = RISCVII::LMUL_8;
304  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
305  Opc = RISCV::PseudoVMV1R_V;
306  SubRegIdx = RISCV::sub_vrm1_0;
307  NF = 2;
308  LMul = RISCVII::LMUL_1;
309  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
310  Opc = RISCV::PseudoVMV2R_V;
311  SubRegIdx = RISCV::sub_vrm2_0;
312  NF = 2;
313  LMul = RISCVII::LMUL_2;
314  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
315  Opc = RISCV::PseudoVMV4R_V;
316  SubRegIdx = RISCV::sub_vrm4_0;
317  NF = 2;
318  LMul = RISCVII::LMUL_4;
319  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
320  Opc = RISCV::PseudoVMV1R_V;
321  SubRegIdx = RISCV::sub_vrm1_0;
322  NF = 3;
323  LMul = RISCVII::LMUL_1;
324  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
325  Opc = RISCV::PseudoVMV2R_V;
326  SubRegIdx = RISCV::sub_vrm2_0;
327  NF = 3;
328  LMul = RISCVII::LMUL_2;
329  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
330  Opc = RISCV::PseudoVMV1R_V;
331  SubRegIdx = RISCV::sub_vrm1_0;
332  NF = 4;
333  LMul = RISCVII::LMUL_1;
334  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
335  Opc = RISCV::PseudoVMV2R_V;
336  SubRegIdx = RISCV::sub_vrm2_0;
337  NF = 4;
338  LMul = RISCVII::LMUL_2;
339  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
340  Opc = RISCV::PseudoVMV1R_V;
341  SubRegIdx = RISCV::sub_vrm1_0;
342  NF = 5;
343  LMul = RISCVII::LMUL_1;
344  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
345  Opc = RISCV::PseudoVMV1R_V;
346  SubRegIdx = RISCV::sub_vrm1_0;
347  NF = 6;
348  LMul = RISCVII::LMUL_1;
349  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
350  Opc = RISCV::PseudoVMV1R_V;
351  SubRegIdx = RISCV::sub_vrm1_0;
352  NF = 7;
353  LMul = RISCVII::LMUL_1;
354  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
355  Opc = RISCV::PseudoVMV1R_V;
356  SubRegIdx = RISCV::sub_vrm1_0;
357  NF = 8;
358  LMul = RISCVII::LMUL_1;
359  } else {
360  llvm_unreachable("Impossible reg-to-reg copy");
361  }
362 
363  if (IsScalableVector) {
364  bool UseVMV_V_V = false;
366  unsigned DefExplicitOpNum;
367  unsigned VIOpc;
368  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
369  UseVMV_V_V = true;
370  DefExplicitOpNum = DefMBBI->getNumExplicitOperands();
371  // We only need to handle LMUL = 1/2/4/8 here because we only define
372  // vector register classes for LMUL = 1/2/4/8.
373  switch (LMul) {
374  default:
375  llvm_unreachable("Impossible LMUL for vector register copy.");
376  case RISCVII::LMUL_1:
377  Opc = RISCV::PseudoVMV_V_V_M1;
378  VIOpc = RISCV::PseudoVMV_V_I_M1;
379  break;
380  case RISCVII::LMUL_2:
381  Opc = RISCV::PseudoVMV_V_V_M2;
382  VIOpc = RISCV::PseudoVMV_V_I_M2;
383  break;
384  case RISCVII::LMUL_4:
385  Opc = RISCV::PseudoVMV_V_V_M4;
386  VIOpc = RISCV::PseudoVMV_V_I_M4;
387  break;
388  case RISCVII::LMUL_8:
389  Opc = RISCV::PseudoVMV_V_V_M8;
390  VIOpc = RISCV::PseudoVMV_V_I_M8;
391  break;
392  }
393  }
394 
395  bool UseVMV_V_I = false;
396  if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
397  UseVMV_V_I = true;
398  Opc = VIOpc;
399  }
400 
401  if (NF == 1) {
402  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
403  if (UseVMV_V_I)
404  MIB = MIB.add(DefMBBI->getOperand(1));
405  else
406  MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
407  if (UseVMV_V_V) {
408  // The last two arguments of vector instructions are
409  // AVL, SEW. We also need to append the implicit-use vl and vtype.
410  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
411  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
412  MIB.addReg(RISCV::VL, RegState::Implicit);
413  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
414  }
415  } else {
417 
418  int I = 0, End = NF, Incr = 1;
419  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
420  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
421  unsigned LMulVal;
422  bool Fractional;
423  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
424  assert(!Fractional && "It is impossible be fractional lmul here.");
425  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
426  I = NF - 1;
427  End = -1;
428  Incr = -1;
429  }
430 
431  for (; I != End; I += Incr) {
432  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
433  TRI->getSubReg(DstReg, SubRegIdx + I));
434  if (UseVMV_V_I)
435  MIB = MIB.add(DefMBBI->getOperand(1));
436  else
437  MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
438  getKillRegState(KillSrc));
439  if (UseVMV_V_V) {
440  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
441  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
442  MIB.addReg(RISCV::VL, RegState::Implicit);
443  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
444  }
445  }
446  }
447  } else {
448  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
449  .addReg(SrcReg, getKillRegState(KillSrc))
450  .addReg(SrcReg, getKillRegState(KillSrc));
451  }
452 }
453 
456  Register SrcReg, bool IsKill, int FI,
457  const TargetRegisterClass *RC,
458  const TargetRegisterInfo *TRI) const {
459  DebugLoc DL;
460  if (I != MBB.end())
461  DL = I->getDebugLoc();
462 
463  MachineFunction *MF = MBB.getParent();
464  MachineFrameInfo &MFI = MF->getFrameInfo();
465 
466  unsigned Opcode;
467  bool IsScalableVector = true;
468  bool IsZvlsseg = true;
469  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
470  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
471  RISCV::SW : RISCV::SD;
472  IsScalableVector = false;
473  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
474  Opcode = RISCV::FSH;
475  IsScalableVector = false;
476  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
477  Opcode = RISCV::FSW;
478  IsScalableVector = false;
479  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
480  Opcode = RISCV::FSD;
481  IsScalableVector = false;
482  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
483  Opcode = RISCV::PseudoVSPILL_M1;
484  IsZvlsseg = false;
485  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
486  Opcode = RISCV::PseudoVSPILL_M2;
487  IsZvlsseg = false;
488  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
489  Opcode = RISCV::PseudoVSPILL_M4;
490  IsZvlsseg = false;
491  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
492  Opcode = RISCV::PseudoVSPILL_M8;
493  IsZvlsseg = false;
494  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
495  Opcode = RISCV::PseudoVSPILL2_M1;
496  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
497  Opcode = RISCV::PseudoVSPILL2_M2;
498  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
499  Opcode = RISCV::PseudoVSPILL2_M4;
500  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
501  Opcode = RISCV::PseudoVSPILL3_M1;
502  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
503  Opcode = RISCV::PseudoVSPILL3_M2;
504  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
505  Opcode = RISCV::PseudoVSPILL4_M1;
506  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
507  Opcode = RISCV::PseudoVSPILL4_M2;
508  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
509  Opcode = RISCV::PseudoVSPILL5_M1;
510  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
511  Opcode = RISCV::PseudoVSPILL6_M1;
512  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
513  Opcode = RISCV::PseudoVSPILL7_M1;
514  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
515  Opcode = RISCV::PseudoVSPILL8_M1;
516  else
517  llvm_unreachable("Can't store this register to stack slot");
518 
519  if (IsScalableVector) {
523 
525  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
526  .addReg(SrcReg, getKillRegState(IsKill))
527  .addFrameIndex(FI)
528  .addMemOperand(MMO);
529  if (IsZvlsseg) {
530  // For spilling/reloading Zvlsseg registers, append the dummy field for
531  // the scaled vector length. The argument will be used when expanding
532  // these pseudo instructions.
533  MIB.addReg(RISCV::X0);
534  }
535  } else {
538  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
539 
540  BuildMI(MBB, I, DL, get(Opcode))
541  .addReg(SrcReg, getKillRegState(IsKill))
542  .addFrameIndex(FI)
543  .addImm(0)
544  .addMemOperand(MMO);
545  }
546 }
547 
550  Register DstReg, int FI,
551  const TargetRegisterClass *RC,
552  const TargetRegisterInfo *TRI) const {
553  DebugLoc DL;
554  if (I != MBB.end())
555  DL = I->getDebugLoc();
556 
557  MachineFunction *MF = MBB.getParent();
558  MachineFrameInfo &MFI = MF->getFrameInfo();
559 
560  unsigned Opcode;
561  bool IsScalableVector = true;
562  bool IsZvlsseg = true;
563  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
564  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
565  RISCV::LW : RISCV::LD;
566  IsScalableVector = false;
567  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
568  Opcode = RISCV::FLH;
569  IsScalableVector = false;
570  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
571  Opcode = RISCV::FLW;
572  IsScalableVector = false;
573  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
574  Opcode = RISCV::FLD;
575  IsScalableVector = false;
576  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
577  Opcode = RISCV::PseudoVRELOAD_M1;
578  IsZvlsseg = false;
579  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
580  Opcode = RISCV::PseudoVRELOAD_M2;
581  IsZvlsseg = false;
582  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
583  Opcode = RISCV::PseudoVRELOAD_M4;
584  IsZvlsseg = false;
585  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
586  Opcode = RISCV::PseudoVRELOAD_M8;
587  IsZvlsseg = false;
588  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
589  Opcode = RISCV::PseudoVRELOAD2_M1;
590  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
591  Opcode = RISCV::PseudoVRELOAD2_M2;
592  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
593  Opcode = RISCV::PseudoVRELOAD2_M4;
594  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
595  Opcode = RISCV::PseudoVRELOAD3_M1;
596  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
597  Opcode = RISCV::PseudoVRELOAD3_M2;
598  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
599  Opcode = RISCV::PseudoVRELOAD4_M1;
600  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
601  Opcode = RISCV::PseudoVRELOAD4_M2;
602  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
603  Opcode = RISCV::PseudoVRELOAD5_M1;
604  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
605  Opcode = RISCV::PseudoVRELOAD6_M1;
606  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
607  Opcode = RISCV::PseudoVRELOAD7_M1;
608  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
609  Opcode = RISCV::PseudoVRELOAD8_M1;
610  else
611  llvm_unreachable("Can't load this register from stack slot");
612 
613  if (IsScalableVector) {
617 
619  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
620  .addFrameIndex(FI)
621  .addMemOperand(MMO);
622  if (IsZvlsseg) {
623  // For spilling/reloading Zvlsseg registers, append the dummy field for
624  // the scaled vector length. The argument will be used when expanding
625  // these pseudo instructions.
626  MIB.addReg(RISCV::X0);
627  }
628  } else {
631  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
632 
633  BuildMI(MBB, I, DL, get(Opcode), DstReg)
634  .addFrameIndex(FI)
635  .addImm(0)
636  .addMemOperand(MMO);
637  }
638 }
639 
642  const DebugLoc &DL, Register DstReg, uint64_t Val,
643  MachineInstr::MIFlag Flag) const {
644  Register SrcReg = RISCV::X0;
645 
646  if (!STI.is64Bit() && !isInt<32>(Val))
647  report_fatal_error("Should only materialize 32-bit constants for RV32");
648 
650  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
651  assert(!Seq.empty());
652 
653  for (RISCVMatInt::Inst &Inst : Seq) {
654  switch (Inst.getOpndKind()) {
655  case RISCVMatInt::Imm:
656  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
657  .addImm(Inst.Imm)
658  .setMIFlag(Flag);
659  break;
660  case RISCVMatInt::RegX0:
661  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
662  .addReg(SrcReg, RegState::Kill)
663  .addReg(RISCV::X0)
664  .setMIFlag(Flag);
665  break;
666  case RISCVMatInt::RegReg:
667  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
668  .addReg(SrcReg, RegState::Kill)
669  .addReg(SrcReg, RegState::Kill)
670  .setMIFlag(Flag);
671  break;
672  case RISCVMatInt::RegImm:
673  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
674  .addReg(SrcReg, RegState::Kill)
675  .addImm(Inst.Imm)
676  .setMIFlag(Flag);
677  break;
678  }
679 
680  // Only the first instruction has X0 as its source.
681  SrcReg = DstReg;
682  }
683 }
684 
686  switch (Opc) {
687  default:
688  return RISCVCC::COND_INVALID;
689  case RISCV::BEQ:
690  return RISCVCC::COND_EQ;
691  case RISCV::BNE:
692  return RISCVCC::COND_NE;
693  case RISCV::BLT:
694  return RISCVCC::COND_LT;
695  case RISCV::BGE:
696  return RISCVCC::COND_GE;
697  case RISCV::BLTU:
698  return RISCVCC::COND_LTU;
699  case RISCV::BGEU:
700  return RISCVCC::COND_GEU;
701  }
702 }
703 
704 // The contents of values added to Cond are not examined outside of
705 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
706 // push BranchOpcode, Reg1, Reg2.
709  // Block ends with fall-through condbranch.
710  assert(LastInst.getDesc().isConditionalBranch() &&
711  "Unknown conditional branch");
712  Target = LastInst.getOperand(2).getMBB();
713  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
714  Cond.push_back(MachineOperand::CreateImm(CC));
715  Cond.push_back(LastInst.getOperand(0));
716  Cond.push_back(LastInst.getOperand(1));
717 }
718 
720  switch (CC) {
721  default:
722  llvm_unreachable("Unknown condition code!");
723  case RISCVCC::COND_EQ:
724  return get(RISCV::BEQ);
725  case RISCVCC::COND_NE:
726  return get(RISCV::BNE);
727  case RISCVCC::COND_LT:
728  return get(RISCV::BLT);
729  case RISCVCC::COND_GE:
730  return get(RISCV::BGE);
731  case RISCVCC::COND_LTU:
732  return get(RISCV::BLTU);
733  case RISCVCC::COND_GEU:
734  return get(RISCV::BGEU);
735  }
736 }
737 
739  switch (CC) {
740  default:
741  llvm_unreachable("Unrecognized conditional branch");
742  case RISCVCC::COND_EQ:
743  return RISCVCC::COND_NE;
744  case RISCVCC::COND_NE:
745  return RISCVCC::COND_EQ;
746  case RISCVCC::COND_LT:
747  return RISCVCC::COND_GE;
748  case RISCVCC::COND_GE:
749  return RISCVCC::COND_LT;
750  case RISCVCC::COND_LTU:
751  return RISCVCC::COND_GEU;
752  case RISCVCC::COND_GEU:
753  return RISCVCC::COND_LTU;
754  }
755 }
756 
758  MachineBasicBlock *&TBB,
759  MachineBasicBlock *&FBB,
761  bool AllowModify) const {
762  TBB = FBB = nullptr;
763  Cond.clear();
764 
765  // If the block has no terminators, it just falls into the block after it.
767  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
768  return false;
769 
770  // Count the number of terminators and find the first unconditional or
771  // indirect branch.
772  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
773  int NumTerminators = 0;
774  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
775  J++) {
776  NumTerminators++;
777  if (J->getDesc().isUnconditionalBranch() ||
778  J->getDesc().isIndirectBranch()) {
779  FirstUncondOrIndirectBr = J.getReverse();
780  }
781  }
782 
783  // If AllowModify is true, we can erase any terminators after
784  // FirstUncondOrIndirectBR.
785  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
786  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
787  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
788  NumTerminators--;
789  }
790  I = FirstUncondOrIndirectBr;
791  }
792 
793  // We can't handle blocks that end in an indirect branch.
794  if (I->getDesc().isIndirectBranch())
795  return true;
796 
797  // We can't handle blocks with more than 2 terminators.
798  if (NumTerminators > 2)
799  return true;
800 
801  // Handle a single unconditional branch.
802  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
803  TBB = getBranchDestBlock(*I);
804  return false;
805  }
806 
807  // Handle a single conditional branch.
808  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
809  parseCondBranch(*I, TBB, Cond);
810  return false;
811  }
812 
813  // Handle a conditional branch followed by an unconditional branch.
814  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
815  I->getDesc().isUnconditionalBranch()) {
816  parseCondBranch(*std::prev(I), TBB, Cond);
817  FBB = getBranchDestBlock(*I);
818  return false;
819  }
820 
821  // Otherwise, we can't handle this.
822  return true;
823 }
824 
826  int *BytesRemoved) const {
827  if (BytesRemoved)
828  *BytesRemoved = 0;
830  if (I == MBB.end())
831  return 0;
832 
833  if (!I->getDesc().isUnconditionalBranch() &&
834  !I->getDesc().isConditionalBranch())
835  return 0;
836 
837  // Remove the branch.
838  if (BytesRemoved)
839  *BytesRemoved += getInstSizeInBytes(*I);
840  I->eraseFromParent();
841 
842  I = MBB.end();
843 
844  if (I == MBB.begin())
845  return 1;
846  --I;
847  if (!I->getDesc().isConditionalBranch())
848  return 1;
849 
850  // Remove the branch.
851  if (BytesRemoved)
852  *BytesRemoved += getInstSizeInBytes(*I);
853  I->eraseFromParent();
854  return 2;
855 }
856 
857 // Inserts a branch into the end of the specific MachineBasicBlock, returning
858 // the number of instructions inserted.
861  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
862  if (BytesAdded)
863  *BytesAdded = 0;
864 
865  // Shouldn't be a fall through.
866  assert(TBB && "insertBranch must not be told to insert a fallthrough");
867  assert((Cond.size() == 3 || Cond.size() == 0) &&
868  "RISCV branch conditions have two components!");
869 
870  // Unconditional branch.
871  if (Cond.empty()) {
872  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
873  if (BytesAdded)
874  *BytesAdded += getInstSizeInBytes(MI);
875  return 1;
876  }
877 
878  // Either a one or two-way conditional branch.
879  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
880  MachineInstr &CondMI =
881  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
882  if (BytesAdded)
883  *BytesAdded += getInstSizeInBytes(CondMI);
884 
885  // One-way conditional branch.
886  if (!FBB)
887  return 1;
888 
889  // Two-way conditional branch.
890  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
891  if (BytesAdded)
892  *BytesAdded += getInstSizeInBytes(MI);
893  return 2;
894 }
895 
897  MachineBasicBlock &DestBB,
898  MachineBasicBlock &RestoreBB,
899  const DebugLoc &DL, int64_t BrOffset,
900  RegScavenger *RS) const {
901  assert(RS && "RegScavenger required for long branching");
902  assert(MBB.empty() &&
903  "new block should be inserted for expanding unconditional branch");
904  assert(MBB.pred_size() == 1);
905 
906  MachineFunction *MF = MBB.getParent();
908 
909  if (!isInt<32>(BrOffset))
911  "Branch offsets outside of the signed 32-bit range not supported");
912 
913  // FIXME: A virtual register must be used initially, as the register
914  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
915  // uses the same workaround).
916  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
917  auto II = MBB.end();
918 
919  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
920  .addReg(ScratchReg, RegState::Define | RegState::Dead)
921  .addMBB(&DestBB, RISCVII::MO_CALL);
922 
923  RS->enterBasicBlockEnd(MBB);
924  Register Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
925  MI.getIterator(), false, 0);
926  // TODO: The case when there is no scavenged register needs special handling.
927  assert(Scav != RISCV::NoRegister && "No register is scavenged!");
928  MRI.replaceRegWith(ScratchReg, Scav);
929  MRI.clearVirtRegs();
930  RS->setRegUsed(Scav);
931 }
932 
935  assert((Cond.size() == 3) && "Invalid branch condition!");
936  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
937  Cond[0].setImm(getOppositeBranchCondition(CC));
938  return false;
939 }
940 
943  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
944  // The branch target is always the last operand.
945  int NumOp = MI.getNumExplicitOperands();
946  return MI.getOperand(NumOp - 1).getMBB();
947 }
948 
950  int64_t BrOffset) const {
951  unsigned XLen = STI.getXLen();
952  // Ideally we could determine the supported branch offset from the
953  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
954  // PseudoBR.
955  switch (BranchOp) {
956  default:
957  llvm_unreachable("Unexpected opcode!");
958  case RISCV::BEQ:
959  case RISCV::BNE:
960  case RISCV::BLT:
961  case RISCV::BGE:
962  case RISCV::BLTU:
963  case RISCV::BGEU:
964  return isIntN(13, BrOffset);
965  case RISCV::JAL:
966  case RISCV::PseudoBR:
967  return isIntN(21, BrOffset);
968  case RISCV::PseudoJump:
969  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
970  }
971 }
972 
974  if (MI.isMetaInstruction())
975  return 0;
976 
977  unsigned Opcode = MI.getOpcode();
978 
979  if (Opcode == TargetOpcode::INLINEASM ||
980  Opcode == TargetOpcode::INLINEASM_BR) {
981  const MachineFunction &MF = *MI.getParent()->getParent();
982  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
983  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
984  *TM.getMCAsmInfo());
985  }
986 
987  if (MI.getParent() && MI.getParent()->getParent()) {
988  const auto MF = MI.getMF();
989  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
990  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
991  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
992  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
993  if (isCompressibleInst(MI, &ST, MRI, STI))
994  return 2;
995  }
996  return get(Opcode).getSize();
997 }
998 
1000  const unsigned Opcode = MI.getOpcode();
1001  switch (Opcode) {
1002  default:
1003  break;
1004  case RISCV::FSGNJ_D:
1005  case RISCV::FSGNJ_S:
1006  case RISCV::FSGNJ_H:
1007  // The canonical floating-point move is fsgnj rd, rs, rs.
1008  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1009  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1010  case RISCV::ADDI:
1011  case RISCV::ORI:
1012  case RISCV::XORI:
1013  return (MI.getOperand(1).isReg() &&
1014  MI.getOperand(1).getReg() == RISCV::X0) ||
1015  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1016  }
1017  return MI.isAsCheapAsAMove();
1018 }
1019 
1022  if (MI.isMoveReg())
1023  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1024  switch (MI.getOpcode()) {
1025  default:
1026  break;
1027  case RISCV::ADDI:
1028  // Operand 1 can be a frameindex but callers expect registers
1029  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1030  MI.getOperand(2).getImm() == 0)
1031  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1032  break;
1033  case RISCV::FSGNJ_D:
1034  case RISCV::FSGNJ_S:
1035  case RISCV::FSGNJ_H:
1036  // The canonical floating-point move is fsgnj rd, rs, rs.
1037  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1038  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1039  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1040  break;
1041  }
1042  return None;
1043 }
1044 
1046  StringRef &ErrInfo) const {
1047  const MCInstrInfo *MCII = STI.getInstrInfo();
1048  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
1049 
1050  for (auto &OI : enumerate(Desc.operands())) {
1051  unsigned OpType = OI.value().OperandType;
1052  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1053  OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1054  const MachineOperand &MO = MI.getOperand(OI.index());
1055  if (MO.isImm()) {
1056  int64_t Imm = MO.getImm();
1057  bool Ok;
1058  switch (OpType) {
1059  default:
1060  llvm_unreachable("Unexpected operand type");
1061 
1062  // clang-format off
1063 #define CASE_OPERAND_UIMM(NUM) \
1064  case RISCVOp::OPERAND_UIMM##NUM: \
1065  Ok = isUInt<NUM>(Imm); \
1066  break;
1072  CASE_OPERAND_UIMM(12)
1073  CASE_OPERAND_UIMM(20)
1074  // clang-format on
1076  Ok = isInt<12>(Imm);
1077  break;
1079  if (STI.getTargetTriple().isArch64Bit())
1080  Ok = isUInt<6>(Imm);
1081  else
1082  Ok = isUInt<5>(Imm);
1083  break;
1085  Ok = Imm >= 0 && Imm <= 10;
1086  break;
1087  }
1088  if (!Ok) {
1089  ErrInfo = "Invalid immediate";
1090  return false;
1091  }
1092  }
1093  }
1094  }
1095 
1096  return true;
1097 }
1098 
1099 // Return true if get the base operand, byte offset of an instruction and the
1100 // memory width. Width is the size of memory that is being loaded/stored.
1102  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1103  unsigned &Width, const TargetRegisterInfo *TRI) const {
1104  if (!LdSt.mayLoadOrStore())
1105  return false;
1106 
1107  // Here we assume the standard RISC-V ISA, which uses a base+offset
1108  // addressing mode. You'll need to relax these conditions to support custom
1109  // load/stores instructions.
1110  if (LdSt.getNumExplicitOperands() != 3)
1111  return false;
1112  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1113  return false;
1114 
1115  if (!LdSt.hasOneMemOperand())
1116  return false;
1117 
1118  Width = (*LdSt.memoperands_begin())->getSize();
1119  BaseReg = &LdSt.getOperand(1);
1120  Offset = LdSt.getOperand(2).getImm();
1121  return true;
1122 }
1123 
1125  const MachineInstr &MIa, const MachineInstr &MIb) const {
1126  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1127  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1128 
1129  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1131  return false;
1132 
1133  // Retrieve the base register, offset from the base register and width. Width
1134  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1135  // base registers are identical, and the offset of a lower memory access +
1136  // the width doesn't overlap the offset of a higher memory access,
1137  // then the memory accesses are different.
1139  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1140  int64_t OffsetA = 0, OffsetB = 0;
1141  unsigned int WidthA = 0, WidthB = 0;
1142  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1143  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1144  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1145  int LowOffset = std::min(OffsetA, OffsetB);
1146  int HighOffset = std::max(OffsetA, OffsetB);
1147  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1148  if (LowOffset + LowWidth <= HighOffset)
1149  return true;
1150  }
1151  }
1152  return false;
1153 }
1154 
1155 std::pair<unsigned, unsigned>
1157  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1158  return std::make_pair(TF & Mask, TF & ~Mask);
1159 }
1160 
1163  using namespace RISCVII;
1164  static const std::pair<unsigned, const char *> TargetFlags[] = {
1165  {MO_CALL, "riscv-call"},
1166  {MO_PLT, "riscv-plt"},
1167  {MO_LO, "riscv-lo"},
1168  {MO_HI, "riscv-hi"},
1169  {MO_PCREL_LO, "riscv-pcrel-lo"},
1170  {MO_PCREL_HI, "riscv-pcrel-hi"},
1171  {MO_GOT_HI, "riscv-got-hi"},
1172  {MO_TPREL_LO, "riscv-tprel-lo"},
1173  {MO_TPREL_HI, "riscv-tprel-hi"},
1174  {MO_TPREL_ADD, "riscv-tprel-add"},
1175  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1176  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1177  return makeArrayRef(TargetFlags);
1178 }
1180  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1181  const Function &F = MF.getFunction();
1182 
1183  // Can F be deduplicated by the linker? If it can, don't outline from it.
1184  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1185  return false;
1186 
1187  // Don't outline from functions with section markings; the program could
1188  // expect that all the code is in the named section.
1189  if (F.hasSection())
1190  return false;
1191 
1192  // It's safe to outline from MF.
1193  return true;
1194 }
1195 
1197  unsigned &Flags) const {
1198  // More accurate safety checking is done in getOutliningCandidateInfo.
1200 }
1201 
1202 // Enum values indicating how an outlined call should be constructed.
1205 };
1206 
1208  MachineFunction &MF) const {
1209  return MF.getFunction().hasMinSize();
1210 }
1211 
1213  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1214 
1215  // First we need to filter out candidates where the X5 register (IE t0) can't
1216  // be used to setup the function call.
1217  auto CannotInsertCall = [](outliner::Candidate &C) {
1218  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1219  return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
1220  };
1221 
1222  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1223 
1224  // If the sequence doesn't have enough candidates left, then we're done.
1225  if (RepeatedSequenceLocs.size() < 2)
1226  return outliner::OutlinedFunction();
1227 
1228  unsigned SequenceSize = 0;
1229 
1230  auto I = RepeatedSequenceLocs[0].front();
1231  auto E = std::next(RepeatedSequenceLocs[0].back());
1232  for (; I != E; ++I)
1233  SequenceSize += getInstSizeInBytes(*I);
1234 
1235  // call t0, function = 8 bytes.
1236  unsigned CallOverhead = 8;
1237  for (auto &C : RepeatedSequenceLocs)
1238  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1239 
1240  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1241  unsigned FrameOverhead = 4;
1242  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1243  .getFeatureBits()[RISCV::FeatureStdExtC])
1244  FrameOverhead = 2;
1245 
1246  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1247  FrameOverhead, MachineOutlinerDefault);
1248 }
1249 
1252  unsigned Flags) const {
1253  MachineInstr &MI = *MBBI;
1254  MachineBasicBlock *MBB = MI.getParent();
1255  const TargetRegisterInfo *TRI =
1257 
1258  // Positions generally can't safely be outlined.
1259  if (MI.isPosition()) {
1260  // We can manually strip out CFI instructions later.
1261  if (MI.isCFIInstruction())
1262  // If current function has exception handling code, we can't outline &
1263  // strip these CFI instructions since it may break .eh_frame section
1264  // needed in unwinding.
1265  return MI.getMF()->getFunction().needsUnwindTableEntry()
1268 
1270  }
1271 
1272  // Don't trust the user to write safe inline assembly.
1273  if (MI.isInlineAsm())
1275 
1276  // We can't outline branches to other basic blocks.
1277  if (MI.isTerminator() && !MBB->succ_empty())
1279 
1280  // We need support for tail calls to outlined functions before return
1281  // statements can be allowed.
1282  if (MI.isReturn())
1284 
1285  // Don't allow modifying the X5 register which we use for return addresses for
1286  // these outlined functions.
1287  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1288  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1290 
1291  // Make sure the operands don't reference something unsafe.
1292  for (const auto &MO : MI.operands())
1293  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1295 
1296  // Don't allow instructions which won't be materialized to impact outlining
1297  // analysis.
1298  if (MI.isMetaInstruction())
1300 
1302 }
1303 
1306  const outliner::OutlinedFunction &OF) const {
1307 
1308  // Strip out any CFI instructions
1309  bool Changed = true;
1310  while (Changed) {
1311  Changed = false;
1312  auto I = MBB.begin();
1313  auto E = MBB.end();
1314  for (; I != E; ++I) {
1315  if (I->isCFIInstruction()) {
1316  I->removeFromParent();
1317  Changed = true;
1318  break;
1319  }
1320  }
1321  }
1322 
1323  MBB.addLiveIn(RISCV::X5);
1324 
1325  // Add in a return instruction to the end of the outlined frame.
1326  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1327  .addReg(RISCV::X0, RegState::Define)
1328  .addReg(RISCV::X5)
1329  .addImm(0));
1330 }
1331 
1334  MachineFunction &MF, outliner::Candidate &C) const {
1335 
1336  // Add in a call instruction to the outlined function at the given location.
1337  It = MBB.insert(It,
1338  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1339  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1340  RISCVII::MO_CALL));
1341  return It;
1342 }
1343 
1344 // MIR printer helper function to annotate Operands with a comment.
1346  const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1347  const TargetRegisterInfo *TRI) const {
1348  // Print a generic comment for this operand if there is one.
1349  std::string GenericComment =
1351  if (!GenericComment.empty())
1352  return GenericComment;
1353 
1354  // If not, we must have an immediate operand.
1355  if (!Op.isImm())
1356  return std::string();
1357 
1358  std::string Comment;
1359  raw_string_ostream OS(Comment);
1360 
1361  uint64_t TSFlags = MI.getDesc().TSFlags;
1362 
1363  // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
1364  // operand of vector codegen pseudos.
1365  if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
1366  MI.getOpcode() == RISCV::PseudoVSETVLI ||
1367  MI.getOpcode() == RISCV::PseudoVSETIVLI ||
1368  MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
1369  OpIdx == 2) {
1370  unsigned Imm = MI.getOperand(OpIdx).getImm();
1372  } else if (RISCVII::hasSEWOp(TSFlags)) {
1373  unsigned NumOperands = MI.getNumExplicitOperands();
1374  bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags);
1375 
1376  // The SEW operand is before any policy operand.
1377  if (OpIdx != NumOperands - HasPolicy - 1)
1378  return std::string();
1379 
1380  unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
1381  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1382  assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
1383 
1384  OS << "e" << SEW;
1385  }
1386 
1387  OS.flush();
1388  return Comment;
1389 }
1390 
1391 // clang-format off
1392 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1393  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1394 
1395 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1396  CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1397  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1398  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1399  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1400 
1401 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1402  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1403  case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1404 
1405 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1406  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1407  case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1408 
1409 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1410  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1411  case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1412 
1413 #define CASE_VFMA_SPLATS(OP) \
1414  CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1415  case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1416  case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1417 // clang-format on
1418 
1420  unsigned &SrcOpIdx1,
1421  unsigned &SrcOpIdx2) const {
1422  const MCInstrDesc &Desc = MI.getDesc();
1423  if (!Desc.isCommutable())
1424  return false;
1425 
1426  switch (MI.getOpcode()) {
1427  case CASE_VFMA_SPLATS(FMADD):
1428  case CASE_VFMA_SPLATS(FMSUB):
1429  case CASE_VFMA_SPLATS(FMACC):
1430  case CASE_VFMA_SPLATS(FMSAC):
1431  case CASE_VFMA_SPLATS(FNMADD):
1432  case CASE_VFMA_SPLATS(FNMSUB):
1433  case CASE_VFMA_SPLATS(FNMACC):
1434  case CASE_VFMA_SPLATS(FNMSAC):
1435  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1436  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1437  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1438  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1439  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1440  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1441  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1442  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1443  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1444  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1445  // If the tail policy is undisturbed we can't commute.
1446  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1447  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1448  return false;
1449 
1450  // For these instructions we can only swap operand 1 and operand 3 by
1451  // changing the opcode.
1452  unsigned CommutableOpIdx1 = 1;
1453  unsigned CommutableOpIdx2 = 3;
1454  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1455  CommutableOpIdx2))
1456  return false;
1457  return true;
1458  }
1459  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1463  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1464  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1465  // If the tail policy is undisturbed we can't commute.
1466  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1467  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1468  return false;
1469 
1470  // For these instructions we have more freedom. We can commute with the
1471  // other multiplicand or with the addend/subtrahend/minuend.
1472 
1473  // Any fixed operand must be from source 1, 2 or 3.
1474  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1475  return false;
1476  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1477  return false;
1478 
1479  // It both ops are fixed one must be the tied source.
1480  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1481  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1482  return false;
1483 
1484  // Look for two different register operands assumed to be commutable
1485  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1486  // needed.
1487  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1488  SrcOpIdx2 == CommuteAnyOperandIndex) {
1489  // At least one of operands to be commuted is not specified and
1490  // this method is free to choose appropriate commutable operands.
1491  unsigned CommutableOpIdx1 = SrcOpIdx1;
1492  if (SrcOpIdx1 == SrcOpIdx2) {
1493  // Both of operands are not fixed. Set one of commutable
1494  // operands to the tied source.
1495  CommutableOpIdx1 = 1;
1496  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1497  // Only one of the operands is not fixed.
1498  CommutableOpIdx1 = SrcOpIdx2;
1499  }
1500 
1501  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1502  // operand and assign its index to CommutableOpIdx2.
1503  unsigned CommutableOpIdx2;
1504  if (CommutableOpIdx1 != 1) {
1505  // If we haven't already used the tied source, we must use it now.
1506  CommutableOpIdx2 = 1;
1507  } else {
1508  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1509 
1510  // The commuted operands should have different registers.
1511  // Otherwise, the commute transformation does not change anything and
1512  // is useless. We use this as a hint to make our decision.
1513  if (Op1Reg != MI.getOperand(2).getReg())
1514  CommutableOpIdx2 = 2;
1515  else
1516  CommutableOpIdx2 = 3;
1517  }
1518 
1519  // Assign the found pair of commutable indices to SrcOpIdx1 and
1520  // SrcOpIdx2 to return those values.
1521  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1522  CommutableOpIdx2))
1523  return false;
1524  }
1525 
1526  return true;
1527  }
1528  }
1529 
1530  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1531 }
1532 
1533 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1534  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1535  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1536  break;
1537 
1538 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1539  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1540  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1541  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1542  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1543 
1544 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1545  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1546  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1547 
1548 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1549  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1550  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1551 
1552 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1553  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1554  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1555 
1556 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1557  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1558  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1559  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1560 
1562  bool NewMI,
1563  unsigned OpIdx1,
1564  unsigned OpIdx2) const {
1565  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1566  if (NewMI)
1567  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1568  return MI;
1569  };
1570 
1571  switch (MI.getOpcode()) {
1572  case CASE_VFMA_SPLATS(FMACC):
1573  case CASE_VFMA_SPLATS(FMADD):
1574  case CASE_VFMA_SPLATS(FMSAC):
1575  case CASE_VFMA_SPLATS(FMSUB):
1576  case CASE_VFMA_SPLATS(FNMACC):
1577  case CASE_VFMA_SPLATS(FNMADD):
1578  case CASE_VFMA_SPLATS(FNMSAC):
1579  case CASE_VFMA_SPLATS(FNMSUB):
1580  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1581  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1582  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1583  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1584  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1585  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1586  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1587  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1588  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1589  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1590  // It only make sense to toggle these between clobbering the
1591  // addend/subtrahend/minuend one of the multiplicands.
1592  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1593  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1594  unsigned Opc;
1595  switch (MI.getOpcode()) {
1596  default:
1597  llvm_unreachable("Unexpected opcode");
1598  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1599  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1606  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
1610  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1611  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1612  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1613  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1614  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1615  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1616  }
1617 
1618  auto &WorkingMI = cloneIfNew(MI);
1619  WorkingMI.setDesc(get(Opc));
1620  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1621  OpIdx1, OpIdx2);
1622  }
1623  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1627  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1628  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1629  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1630  // If one of the operands, is the addend we need to change opcode.
1631  // Otherwise we're just swapping 2 of the multiplicands.
1632  if (OpIdx1 == 3 || OpIdx2 == 3) {
1633  unsigned Opc;
1634  switch (MI.getOpcode()) {
1635  default:
1636  llvm_unreachable("Unexpected opcode");
1637  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
1641  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1642  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1643  }
1644 
1645  auto &WorkingMI = cloneIfNew(MI);
1646  WorkingMI.setDesc(get(Opc));
1647  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1648  OpIdx1, OpIdx2);
1649  }
1650  // Let the default code handle it.
1651  break;
1652  }
1653  }
1654 
1655  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1656 }
1657 
1658 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1659 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1660 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1661 #undef CASE_VFMA_SPLATS
1662 #undef CASE_VFMA_OPCODE_LMULS
1663 #undef CASE_VFMA_OPCODE_COMMON
1664 
1665 // clang-format off
1666 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1667  RISCV::PseudoV##OP##_##LMUL##_TIED
1668 
1669 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1670  CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1671  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1672  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1673  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1674  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1675 
1676 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1677  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1678  case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1679 // clang-format on
1680 
1681 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1682  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1683  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1684  break;
1685 
1686 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1687  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1688  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1689  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1690  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1691  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1692 
1693 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1694  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1695  CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1696 
1698  LiveVariables *LV,
1699  LiveIntervals *LIS) const {
1700  switch (MI.getOpcode()) {
1701  default:
1702  break;
1703  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
1704  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
1705  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1706  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1707  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1708  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1709  // If the tail policy is undisturbed we can't convert.
1710  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
1711  MI.getNumExplicitOperands() == 6);
1712  if ((MI.getOperand(5).getImm() & 1) == 0)
1713  return nullptr;
1714 
1715  // clang-format off
1716  unsigned NewOpc;
1717  switch (MI.getOpcode()) {
1718  default:
1719  llvm_unreachable("Unexpected opcode");
1726  }
1727  // clang-format on
1728 
1729  MachineBasicBlock &MBB = *MI.getParent();
1730  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
1731  .add(MI.getOperand(0))
1732  .add(MI.getOperand(1))
1733  .add(MI.getOperand(2))
1734  .add(MI.getOperand(3))
1735  .add(MI.getOperand(4));
1736  MIB.copyImplicitOps(MI);
1737 
1738  if (LV) {
1739  unsigned NumOps = MI.getNumOperands();
1740  for (unsigned I = 1; I < NumOps; ++I) {
1741  MachineOperand &Op = MI.getOperand(I);
1742  if (Op.isReg() && Op.isKill())
1743  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1744  }
1745  }
1746 
1747  if (LIS) {
1748  SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1749 
1750  if (MI.getOperand(0).isEarlyClobber()) {
1751  // Use operand 1 was tied to early-clobber def operand 0, so its live
1752  // interval could have ended at an early-clobber slot. Now they are not
1753  // tied we need to update it to the normal register slot.
1754  LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
1756  if (S->end == Idx.getRegSlot(true))
1757  S->end = Idx.getRegSlot();
1758  }
1759  }
1760 
1761  return MIB;
1762  }
1763  }
1764 
1765  return nullptr;
1766 }
1767 
1768 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1769 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1770 #undef CASE_WIDEOP_OPCODE_LMULS
1771 #undef CASE_WIDEOP_OPCODE_COMMON
1772 
1776  const DebugLoc &DL,
1777  int64_t Amount,
1778  MachineInstr::MIFlag Flag) const {
1779  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1780  assert(Amount % 8 == 0 &&
1781  "Reserve the stack by the multiple of one vector size.");
1782 
1784  int64_t NumOfVReg = Amount / 8;
1785 
1786  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1787  BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), VL)
1788  .setMIFlag(Flag);
1789  assert(isInt<32>(NumOfVReg) &&
1790  "Expect the number of vector registers within 32-bits.");
1791  if (isPowerOf2_32(NumOfVReg)) {
1792  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1793  if (ShiftAmount == 0)
1794  return VL;
1795  BuildMI(MBB, II, DL, get(RISCV::SLLI), VL)
1796  .addReg(VL, RegState::Kill)
1797  .addImm(ShiftAmount)
1798  .setMIFlag(Flag);
1799  } else if ((NumOfVReg == 3 || NumOfVReg == 5 || NumOfVReg == 9) &&
1800  STI.hasStdExtZba()) {
1801  // We can use Zba SHXADD instructions for multiply in some cases.
1802  // TODO: Generalize to SHXADD+SLLI.
1803  unsigned Opc;
1804  switch (NumOfVReg) {
1805  default: llvm_unreachable("Unexpected number of vregs");
1806  case 3: Opc = RISCV::SH1ADD; break;
1807  case 5: Opc = RISCV::SH2ADD; break;
1808  case 9: Opc = RISCV::SH3ADD; break;
1809  }
1810  BuildMI(MBB, II, DL, get(Opc), VL)
1811  .addReg(VL, RegState::Kill)
1812  .addReg(VL)
1813  .setMIFlag(Flag);
1814  } else if (isPowerOf2_32(NumOfVReg - 1)) {
1815  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1816  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1817  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
1818  .addReg(VL)
1819  .addImm(ShiftAmount)
1820  .setMIFlag(Flag);
1821  BuildMI(MBB, II, DL, get(RISCV::ADD), VL)
1822  .addReg(ScaledRegister, RegState::Kill)
1823  .addReg(VL, RegState::Kill)
1824  .setMIFlag(Flag);
1825  } else if (isPowerOf2_32(NumOfVReg + 1)) {
1826  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1827  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1828  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
1829  .addReg(VL)
1830  .addImm(ShiftAmount)
1831  .setMIFlag(Flag);
1832  BuildMI(MBB, II, DL, get(RISCV::SUB), VL)
1833  .addReg(ScaledRegister, RegState::Kill)
1834  .addReg(VL, RegState::Kill)
1835  .setMIFlag(Flag);
1836  } else {
1837  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1838  movImm(MBB, II, DL, N, NumOfVReg, Flag);
1839  if (!STI.hasStdExtM())
1841  MF.getFunction(),
1842  "M-extension must be enabled to calculate the vscaled size/offset."});
1843  BuildMI(MBB, II, DL, get(RISCV::MUL), VL)
1844  .addReg(VL, RegState::Kill)
1846  .setMIFlag(Flag);
1847  }
1848 
1849  return VL;
1850 }
1851 
1852 static bool isRVVWholeLoadStore(unsigned Opcode) {
1853  switch (Opcode) {
1854  default:
1855  return false;
1856  case RISCV::VS1R_V:
1857  case RISCV::VS2R_V:
1858  case RISCV::VS4R_V:
1859  case RISCV::VS8R_V:
1860  case RISCV::VL1RE8_V:
1861  case RISCV::VL2RE8_V:
1862  case RISCV::VL4RE8_V:
1863  case RISCV::VL8RE8_V:
1864  case RISCV::VL1RE16_V:
1865  case RISCV::VL2RE16_V:
1866  case RISCV::VL4RE16_V:
1867  case RISCV::VL8RE16_V:
1868  case RISCV::VL1RE32_V:
1869  case RISCV::VL2RE32_V:
1870  case RISCV::VL4RE32_V:
1871  case RISCV::VL8RE32_V:
1872  case RISCV::VL1RE64_V:
1873  case RISCV::VL2RE64_V:
1874  case RISCV::VL4RE64_V:
1875  case RISCV::VL8RE64_V:
1876  return true;
1877  }
1878 }
1879 
1880 bool RISCV::isRVVSpill(const MachineInstr &MI, bool CheckFIs) {
1881  // RVV lacks any support for immediate addressing for stack addresses, so be
1882  // conservative.
1883  unsigned Opcode = MI.getOpcode();
1884  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1885  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1886  return false;
1887  return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1888  return MO.isFI();
1889  });
1890 }
1891 
1894  switch (Opcode) {
1895  default:
1896  return None;
1897  case RISCV::PseudoVSPILL2_M1:
1898  case RISCV::PseudoVRELOAD2_M1:
1899  return std::make_pair(2u, 1u);
1900  case RISCV::PseudoVSPILL2_M2:
1901  case RISCV::PseudoVRELOAD2_M2:
1902  return std::make_pair(2u, 2u);
1903  case RISCV::PseudoVSPILL2_M4:
1904  case RISCV::PseudoVRELOAD2_M4:
1905  return std::make_pair(2u, 4u);
1906  case RISCV::PseudoVSPILL3_M1:
1907  case RISCV::PseudoVRELOAD3_M1:
1908  return std::make_pair(3u, 1u);
1909  case RISCV::PseudoVSPILL3_M2:
1910  case RISCV::PseudoVRELOAD3_M2:
1911  return std::make_pair(3u, 2u);
1912  case RISCV::PseudoVSPILL4_M1:
1913  case RISCV::PseudoVRELOAD4_M1:
1914  return std::make_pair(4u, 1u);
1915  case RISCV::PseudoVSPILL4_M2:
1916  case RISCV::PseudoVRELOAD4_M2:
1917  return std::make_pair(4u, 2u);
1918  case RISCV::PseudoVSPILL5_M1:
1919  case RISCV::PseudoVRELOAD5_M1:
1920  return std::make_pair(5u, 1u);
1921  case RISCV::PseudoVSPILL6_M1:
1922  case RISCV::PseudoVRELOAD6_M1:
1923  return std::make_pair(6u, 1u);
1924  case RISCV::PseudoVSPILL7_M1:
1925  case RISCV::PseudoVRELOAD7_M1:
1926  return std::make_pair(7u, 1u);
1927  case RISCV::PseudoVSPILL8_M1:
1928  case RISCV::PseudoVRELOAD8_M1:
1929  return std::make_pair(8u, 1u);
1930  }
1931 }
1932 
1934  return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
1935  !MI.isInlineAsm();
1936 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:28
llvm::RISCVII::isRVVWideningReduction
static bool isRVVWideningReduction(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:163
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:933
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1162
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::RISCVInstrInfo::shouldOutlineFromFunctionByDefault
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Definition: RISCVInstrInfo.cpp:1207
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:315
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:255
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:56
llvm::MCRegisterInfo::getName
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
Definition: MCRegisterInfo.h:485
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:719
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
PreferWholeRegisterMove
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:36
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:223
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:738
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:37
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1035
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:679
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:60
llvm::RISCVInstrInfo::getOutliningType
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1251
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:709
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:227
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:585
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:632
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::AArch64SysReg::lookupSysRegByName
const SysReg * lookupSysRegByName(StringRef)
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:145
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:205
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:225
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2057
ErrorHandling.h
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1807
llvm::LiveRange::Segment
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:177
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:125
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::RISCVVType::isValidSEW
static bool isValidSEW(unsigned SEW)
Definition: RISCVBaseInfo.h:386
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:234
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::hasSEWOp
static bool hasSEWOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:151
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
CASE_OPERAND_UIMM
#define CASE_OPERAND_UIMM(NUM)
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:552
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:488
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:33
llvm::RISCVVType::isTailAgnostic
static bool isTailAgnostic(unsigned VType)
Definition: RISCVBaseInfo.h:427
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition: RISCVInstrInfo.cpp:896
llvm::RISCVInstrInfo::insertOutlinedCall
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1332
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:181
llvm::Optional
Definition: APInt.h:33
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
STLExtras.h
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:942
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:214
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:69
llvm::RISCVVType::getSEW
static unsigned getSEW(unsigned VType)
Definition: RISCVBaseInfo.h:422
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:949
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
RISCVGenInstrInfo
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition: RISCVInstrInfo.cpp:1697
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:724
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1329
MachineRegisterInfo.h
llvm::RISCV::isFaultFirstLoad
bool isFaultFirstLoad(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:1933
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1025
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:181
CASE_VFMA_OPCODE_LMULS_MF4
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1405
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:338
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:165
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:33
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:782
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::RISCVOp::OPERAND_RVKRNUM
@ OPERAND_RVKRNUM
Definition: RISCVBaseInfo.h:226
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:707
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1419
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:757
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
LiveVariables.h
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:623
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:752
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:35
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:478
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1156
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:288
getOppositeBranchCondition
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
Definition: ARCInstrInfo.cpp:102
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:87
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:187
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:825
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:454
llvm::MCRegisterInfo::isSubRegisterEq
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
Definition: MCRegisterInfo.h:568
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:129
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:200
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:60
llvm::TargetInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
Definition: TargetInstrInfo.cpp:1422
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:24
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:1124
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1548
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1203
llvm::TargetInstrInfo::createMIROperandComment
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
Definition: TargetInstrInfo.cpp:1347
llvm::RISCVII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: RISCVBaseInfo.h:199
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::cl::opt< bool >
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:120
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::RISCVVType::decodeVLMUL
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
Definition: RISCVBaseInfo.cpp:144
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:76
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:469
LiveIntervals.h
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1179
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:37
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:31
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:1101
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1196
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:483
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:263
MachineFunctionPass.h
isConvertibleToVMV_V_V
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
Definition: RISCVInstrInfo.cpp:125
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:553
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:565
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:672
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:198
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::AArch64SysReg::SysReg::Encoding
unsigned Encoding
Definition: AArch64BaseInfo.h:631
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::RISCVMatInt::RegX0
@ RegX0
Definition: RISCVMatInt.h:25
llvm::RISCVInstrInfo::createMIROperandComment
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:1345
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1021
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1413
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:82
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:154
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:253
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::MachineFunction
Definition: MachineFunction.h:257
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1409
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:357
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:728
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1624
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1676
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:491
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:130
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:640
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:836
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:194
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::RISCVInstrInfo::buildOutlinedFrame
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1304
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1773
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:859
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:685
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:811
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:377
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1693
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:243
llvm::RISCVVType::printVType
void printVType(unsigned VType, raw_ostream &OS)
Definition: RISCVBaseInfo.cpp:160
llvm::RISCVVType::getVLMUL
static RISCVII::VLMUL getVLMUL(unsigned VType)
Definition: RISCVBaseInfo.h:398
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:622
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1212
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:277
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:652
llvm::DestSourcePair
Definition: TargetInstrInfo.h:68
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
CASE_WIDEOP_OPCODE_LMULS_MF4
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1669
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:345
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1312
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1409
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1028
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:51
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:190
RISCVInstrInfo.h
llvm::RISCV::isRVVSpill
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs)
Definition: RISCVInstrInfo.cpp:1880
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:96
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:34
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:200
llvm::RISCVMatInt::RegReg
@ RegReg
Definition: RISCVMatInt.h:24
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:436
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:999
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1561
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
RISCVSubtarget.h
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:548
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:256
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1204
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:30
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:215
CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1686
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1006
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:278
MachineInstrBuilder.h
llvm::RISCVMatInt::RegImm
@ RegImm
Definition: RISCVMatInt.h:22
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:294
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:1045
RISCVMachineFunctionInfo.h
llvm::LiveRange::getSegmentContaining
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:250
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1552
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::RISCV::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:1893
llvm::Function::hasMinSize
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:661
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1113
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:1852
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::LiveVariables
Definition: LiveVariables.h:47
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:405
RegisterScavenging.h
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:203
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:147
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1556
llvm::MachineInstrBundleIterator< const MachineInstr >
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:216
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:237
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:280
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:314
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:973
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:210
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:285
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:204
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:32
RISCVTargetMachine.h