LLVM  16.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/MC/MCInstBuilder.h"
29 #include "llvm/MC/TargetRegistry.h"
31 
32 using namespace llvm;
33 
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
36 
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
40 
42  "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
43  cl::desc("Prefer whole register move for vector registers."));
44 
46 
47 using namespace RISCV;
48 
49 #define GET_RISCVVPseudosTable_IMPL
50 #include "RISCVGenSearchableTables.inc"
51 
52 } // namespace llvm::RISCVVPseudosTable
53 
55  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
56  STI(STI) {}
57 
59  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
60  return MCInstBuilder(RISCV::C_NOP);
61  return MCInstBuilder(RISCV::ADDI)
62  .addReg(RISCV::X0)
63  .addReg(RISCV::X0)
64  .addImm(0);
65 }
66 
68  int &FrameIndex) const {
69  switch (MI.getOpcode()) {
70  default:
71  return 0;
72  case RISCV::LB:
73  case RISCV::LBU:
74  case RISCV::LH:
75  case RISCV::LHU:
76  case RISCV::FLH:
77  case RISCV::LW:
78  case RISCV::FLW:
79  case RISCV::LWU:
80  case RISCV::LD:
81  case RISCV::FLD:
82  break;
83  }
84 
85  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
86  MI.getOperand(2).getImm() == 0) {
87  FrameIndex = MI.getOperand(1).getIndex();
88  return MI.getOperand(0).getReg();
89  }
90 
91  return 0;
92 }
93 
95  int &FrameIndex) const {
96  switch (MI.getOpcode()) {
97  default:
98  return 0;
99  case RISCV::SB:
100  case RISCV::SH:
101  case RISCV::SW:
102  case RISCV::FSH:
103  case RISCV::FSW:
104  case RISCV::SD:
105  case RISCV::FSD:
106  break;
107  }
108 
109  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
110  MI.getOperand(2).getImm() == 0) {
111  FrameIndex = MI.getOperand(1).getIndex();
112  return MI.getOperand(0).getReg();
113  }
114 
115  return 0;
116 }
117 
118 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
119  unsigned NumRegs) {
120  return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
121 }
122 
123 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
124  const MachineBasicBlock &MBB,
127  RISCVII::VLMUL LMul) {
129  return false;
130 
131  assert(MBBI->getOpcode() == TargetOpcode::COPY &&
132  "Unexpected COPY instruction.");
133  Register SrcReg = MBBI->getOperand(1).getReg();
134  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
135 
136  bool FoundDef = false;
137  bool FirstVSetVLI = false;
138  unsigned FirstSEW = 0;
139  while (MBBI != MBB.begin()) {
140  --MBBI;
141  if (MBBI->isMetaInstruction())
142  continue;
143 
144  if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
145  MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
146  MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
147  // There is a vsetvli between COPY and source define instruction.
148  // vy = def_vop ... (producing instruction)
149  // ...
150  // vsetvli
151  // ...
152  // vx = COPY vy
153  if (!FoundDef) {
154  if (!FirstVSetVLI) {
155  FirstVSetVLI = true;
156  unsigned FirstVType = MBBI->getOperand(2).getImm();
157  RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
158  FirstSEW = RISCVVType::getSEW(FirstVType);
159  // The first encountered vsetvli must have the same lmul as the
160  // register class of COPY.
161  if (FirstLMul != LMul)
162  return false;
163  }
164  // Only permit `vsetvli x0, x0, vtype` between COPY and the source
165  // define instruction.
166  if (MBBI->getOperand(0).getReg() != RISCV::X0)
167  return false;
168  if (MBBI->getOperand(1).isImm())
169  return false;
170  if (MBBI->getOperand(1).getReg() != RISCV::X0)
171  return false;
172  continue;
173  }
174 
175  // MBBI is the first vsetvli before the producing instruction.
176  unsigned VType = MBBI->getOperand(2).getImm();
177  // If there is a vsetvli between COPY and the producing instruction.
178  if (FirstVSetVLI) {
179  // If SEW is different, return false.
180  if (RISCVVType::getSEW(VType) != FirstSEW)
181  return false;
182  }
183 
184  // If the vsetvli is tail undisturbed, keep the whole register move.
185  if (!RISCVVType::isTailAgnostic(VType))
186  return false;
187 
188  // The checking is conservative. We only have register classes for
189  // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
190  // for fractional LMUL operations. However, we could not use the vsetvli
191  // lmul for widening operations. The result of widening operation is
192  // 2 x LMUL.
193  return LMul == RISCVVType::getVLMUL(VType);
194  } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
195  return false;
196  } else if (MBBI->getNumDefs()) {
197  // Check all the instructions which will change VL.
198  // For example, vleff has implicit def VL.
199  if (MBBI->modifiesRegister(RISCV::VL))
200  return false;
201 
202  // Only converting whole register copies to vmv.v.v when the defining
203  // value appears in the explicit operands.
204  for (const MachineOperand &MO : MBBI->explicit_operands()) {
205  if (!MO.isReg() || !MO.isDef())
206  continue;
207  if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
208  // We only permit the source of COPY has the same LMUL as the defined
209  // operand.
210  // There are cases we need to keep the whole register copy if the LMUL
211  // is different.
212  // For example,
213  // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
214  // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
215  // # The COPY may be created by vlmul_trunc intrinsic.
216  // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
217  //
218  // After widening, the valid value will be 4 x e32 elements. If we
219  // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
220  // FIXME: The COPY of subregister of Zvlsseg register will not be able
221  // to convert to vmv.v.[v|i] under the constraint.
222  if (MO.getReg() != SrcReg)
223  return false;
224 
225  // In widening reduction instructions with LMUL_1 input vector case,
226  // only checking the LMUL is insufficient due to reduction result is
227  // always LMUL_1.
228  // For example,
229  // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
230  // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
231  // $v26 = COPY killed renamable $v8
232  // After widening, The valid value will be 1 x e16 elements. If we
233  // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
234  uint64_t TSFlags = MBBI->getDesc().TSFlags;
235  if (RISCVII::isRVVWideningReduction(TSFlags))
236  return false;
237 
238  // If the producing instruction does not depend on vsetvli, do not
239  // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
240  if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
241  return false;
242 
243  // Found the definition.
244  FoundDef = true;
245  DefMBBI = MBBI;
246  break;
247  }
248  }
249  }
250  }
251 
252  return false;
253 }
254 
257  const DebugLoc &DL, MCRegister DstReg,
258  MCRegister SrcReg, bool KillSrc) const {
259  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
260  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
261  .addReg(SrcReg, getKillRegState(KillSrc))
262  .addImm(0);
263  return;
264  }
265 
266  // Handle copy from csr
267  if (RISCV::VCSRRegClass.contains(SrcReg) &&
268  RISCV::GPRRegClass.contains(DstReg)) {
270  BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
272  .addReg(RISCV::X0);
273  return;
274  }
275 
276  // FPR->FPR copies and VR->VR copies.
277  unsigned Opc;
278  bool IsScalableVector = true;
279  unsigned NF = 1;
281  unsigned SubRegIdx = RISCV::sub_vrm1_0;
282  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
283  Opc = RISCV::FSGNJ_H;
284  IsScalableVector = false;
285  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
286  Opc = RISCV::FSGNJ_S;
287  IsScalableVector = false;
288  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
289  Opc = RISCV::FSGNJ_D;
290  IsScalableVector = false;
291  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
292  Opc = RISCV::PseudoVMV1R_V;
293  LMul = RISCVII::LMUL_1;
294  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
295  Opc = RISCV::PseudoVMV2R_V;
296  LMul = RISCVII::LMUL_2;
297  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
298  Opc = RISCV::PseudoVMV4R_V;
299  LMul = RISCVII::LMUL_4;
300  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
301  Opc = RISCV::PseudoVMV8R_V;
302  LMul = RISCVII::LMUL_8;
303  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
304  Opc = RISCV::PseudoVMV1R_V;
305  SubRegIdx = RISCV::sub_vrm1_0;
306  NF = 2;
307  LMul = RISCVII::LMUL_1;
308  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
309  Opc = RISCV::PseudoVMV2R_V;
310  SubRegIdx = RISCV::sub_vrm2_0;
311  NF = 2;
312  LMul = RISCVII::LMUL_2;
313  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
314  Opc = RISCV::PseudoVMV4R_V;
315  SubRegIdx = RISCV::sub_vrm4_0;
316  NF = 2;
317  LMul = RISCVII::LMUL_4;
318  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
319  Opc = RISCV::PseudoVMV1R_V;
320  SubRegIdx = RISCV::sub_vrm1_0;
321  NF = 3;
322  LMul = RISCVII::LMUL_1;
323  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
324  Opc = RISCV::PseudoVMV2R_V;
325  SubRegIdx = RISCV::sub_vrm2_0;
326  NF = 3;
327  LMul = RISCVII::LMUL_2;
328  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
329  Opc = RISCV::PseudoVMV1R_V;
330  SubRegIdx = RISCV::sub_vrm1_0;
331  NF = 4;
332  LMul = RISCVII::LMUL_1;
333  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
334  Opc = RISCV::PseudoVMV2R_V;
335  SubRegIdx = RISCV::sub_vrm2_0;
336  NF = 4;
337  LMul = RISCVII::LMUL_2;
338  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
339  Opc = RISCV::PseudoVMV1R_V;
340  SubRegIdx = RISCV::sub_vrm1_0;
341  NF = 5;
342  LMul = RISCVII::LMUL_1;
343  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
344  Opc = RISCV::PseudoVMV1R_V;
345  SubRegIdx = RISCV::sub_vrm1_0;
346  NF = 6;
347  LMul = RISCVII::LMUL_1;
348  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
349  Opc = RISCV::PseudoVMV1R_V;
350  SubRegIdx = RISCV::sub_vrm1_0;
351  NF = 7;
352  LMul = RISCVII::LMUL_1;
353  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
354  Opc = RISCV::PseudoVMV1R_V;
355  SubRegIdx = RISCV::sub_vrm1_0;
356  NF = 8;
357  LMul = RISCVII::LMUL_1;
358  } else {
359  llvm_unreachable("Impossible reg-to-reg copy");
360  }
361 
362  if (IsScalableVector) {
363  bool UseVMV_V_V = false;
365  unsigned VIOpc;
366  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
367  UseVMV_V_V = true;
368  // We only need to handle LMUL = 1/2/4/8 here because we only define
369  // vector register classes for LMUL = 1/2/4/8.
370  switch (LMul) {
371  default:
372  llvm_unreachable("Impossible LMUL for vector register copy.");
373  case RISCVII::LMUL_1:
374  Opc = RISCV::PseudoVMV_V_V_M1;
375  VIOpc = RISCV::PseudoVMV_V_I_M1;
376  break;
377  case RISCVII::LMUL_2:
378  Opc = RISCV::PseudoVMV_V_V_M2;
379  VIOpc = RISCV::PseudoVMV_V_I_M2;
380  break;
381  case RISCVII::LMUL_4:
382  Opc = RISCV::PseudoVMV_V_V_M4;
383  VIOpc = RISCV::PseudoVMV_V_I_M4;
384  break;
385  case RISCVII::LMUL_8:
386  Opc = RISCV::PseudoVMV_V_V_M8;
387  VIOpc = RISCV::PseudoVMV_V_I_M8;
388  break;
389  }
390  }
391 
392  bool UseVMV_V_I = false;
393  if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
394  UseVMV_V_I = true;
395  Opc = VIOpc;
396  }
397 
398  if (NF == 1) {
399  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
400  if (UseVMV_V_I)
401  MIB = MIB.add(DefMBBI->getOperand(1));
402  else
403  MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
404  if (UseVMV_V_V) {
405  const MCInstrDesc &Desc = DefMBBI->getDesc();
406  MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
407  MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
408  MIB.addReg(RISCV::VL, RegState::Implicit);
409  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
410  }
411  } else {
413 
414  int I = 0, End = NF, Incr = 1;
415  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
416  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
417  unsigned LMulVal;
418  bool Fractional;
419  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
420  assert(!Fractional && "It is impossible be fractional lmul here.");
421  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
422  I = NF - 1;
423  End = -1;
424  Incr = -1;
425  }
426 
427  for (; I != End; I += Incr) {
428  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
429  TRI->getSubReg(DstReg, SubRegIdx + I));
430  if (UseVMV_V_I)
431  MIB = MIB.add(DefMBBI->getOperand(1));
432  else
433  MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
434  getKillRegState(KillSrc));
435  if (UseVMV_V_V) {
436  const MCInstrDesc &Desc = DefMBBI->getDesc();
437  MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
438  MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
439  MIB.addReg(RISCV::VL, RegState::Implicit);
440  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
441  }
442  }
443  }
444  } else {
445  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
446  .addReg(SrcReg, getKillRegState(KillSrc))
447  .addReg(SrcReg, getKillRegState(KillSrc));
448  }
449 }
450 
453  Register SrcReg, bool IsKill, int FI,
454  const TargetRegisterClass *RC,
455  const TargetRegisterInfo *TRI) const {
456  DebugLoc DL;
457  if (I != MBB.end())
458  DL = I->getDebugLoc();
459 
460  MachineFunction *MF = MBB.getParent();
461  MachineFrameInfo &MFI = MF->getFrameInfo();
462 
463  unsigned Opcode;
464  bool IsScalableVector = true;
465  bool IsZvlsseg = true;
466  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
467  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
468  RISCV::SW : RISCV::SD;
469  IsScalableVector = false;
470  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
471  Opcode = RISCV::FSH;
472  IsScalableVector = false;
473  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
474  Opcode = RISCV::FSW;
475  IsScalableVector = false;
476  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
477  Opcode = RISCV::FSD;
478  IsScalableVector = false;
479  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
480  Opcode = RISCV::PseudoVSPILL_M1;
481  IsZvlsseg = false;
482  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
483  Opcode = RISCV::PseudoVSPILL_M2;
484  IsZvlsseg = false;
485  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
486  Opcode = RISCV::PseudoVSPILL_M4;
487  IsZvlsseg = false;
488  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
489  Opcode = RISCV::PseudoVSPILL_M8;
490  IsZvlsseg = false;
491  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
492  Opcode = RISCV::PseudoVSPILL2_M1;
493  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
494  Opcode = RISCV::PseudoVSPILL2_M2;
495  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
496  Opcode = RISCV::PseudoVSPILL2_M4;
497  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
498  Opcode = RISCV::PseudoVSPILL3_M1;
499  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
500  Opcode = RISCV::PseudoVSPILL3_M2;
501  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
502  Opcode = RISCV::PseudoVSPILL4_M1;
503  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
504  Opcode = RISCV::PseudoVSPILL4_M2;
505  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
506  Opcode = RISCV::PseudoVSPILL5_M1;
507  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
508  Opcode = RISCV::PseudoVSPILL6_M1;
509  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
510  Opcode = RISCV::PseudoVSPILL7_M1;
511  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
512  Opcode = RISCV::PseudoVSPILL8_M1;
513  else
514  llvm_unreachable("Can't store this register to stack slot");
515 
516  if (IsScalableVector) {
520 
522  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
523  .addReg(SrcReg, getKillRegState(IsKill))
524  .addFrameIndex(FI)
525  .addMemOperand(MMO);
526  if (IsZvlsseg) {
527  // For spilling/reloading Zvlsseg registers, append the dummy field for
528  // the scaled vector length. The argument will be used when expanding
529  // these pseudo instructions.
530  MIB.addReg(RISCV::X0);
531  }
532  } else {
535  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
536 
537  BuildMI(MBB, I, DL, get(Opcode))
538  .addReg(SrcReg, getKillRegState(IsKill))
539  .addFrameIndex(FI)
540  .addImm(0)
541  .addMemOperand(MMO);
542  }
543 }
544 
547  Register DstReg, int FI,
548  const TargetRegisterClass *RC,
549  const TargetRegisterInfo *TRI) const {
550  DebugLoc DL;
551  if (I != MBB.end())
552  DL = I->getDebugLoc();
553 
554  MachineFunction *MF = MBB.getParent();
555  MachineFrameInfo &MFI = MF->getFrameInfo();
556 
557  unsigned Opcode;
558  bool IsScalableVector = true;
559  bool IsZvlsseg = true;
560  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
561  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
562  RISCV::LW : RISCV::LD;
563  IsScalableVector = false;
564  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
565  Opcode = RISCV::FLH;
566  IsScalableVector = false;
567  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
568  Opcode = RISCV::FLW;
569  IsScalableVector = false;
570  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
571  Opcode = RISCV::FLD;
572  IsScalableVector = false;
573  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
574  Opcode = RISCV::PseudoVRELOAD_M1;
575  IsZvlsseg = false;
576  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
577  Opcode = RISCV::PseudoVRELOAD_M2;
578  IsZvlsseg = false;
579  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
580  Opcode = RISCV::PseudoVRELOAD_M4;
581  IsZvlsseg = false;
582  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
583  Opcode = RISCV::PseudoVRELOAD_M8;
584  IsZvlsseg = false;
585  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
586  Opcode = RISCV::PseudoVRELOAD2_M1;
587  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
588  Opcode = RISCV::PseudoVRELOAD2_M2;
589  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
590  Opcode = RISCV::PseudoVRELOAD2_M4;
591  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
592  Opcode = RISCV::PseudoVRELOAD3_M1;
593  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
594  Opcode = RISCV::PseudoVRELOAD3_M2;
595  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
596  Opcode = RISCV::PseudoVRELOAD4_M1;
597  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
598  Opcode = RISCV::PseudoVRELOAD4_M2;
599  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
600  Opcode = RISCV::PseudoVRELOAD5_M1;
601  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
602  Opcode = RISCV::PseudoVRELOAD6_M1;
603  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
604  Opcode = RISCV::PseudoVRELOAD7_M1;
605  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
606  Opcode = RISCV::PseudoVRELOAD8_M1;
607  else
608  llvm_unreachable("Can't load this register from stack slot");
609 
610  if (IsScalableVector) {
614 
616  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
617  .addFrameIndex(FI)
618  .addMemOperand(MMO);
619  if (IsZvlsseg) {
620  // For spilling/reloading Zvlsseg registers, append the dummy field for
621  // the scaled vector length. The argument will be used when expanding
622  // these pseudo instructions.
623  MIB.addReg(RISCV::X0);
624  }
625  } else {
628  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
629 
630  BuildMI(MBB, I, DL, get(Opcode), DstReg)
631  .addFrameIndex(FI)
632  .addImm(0)
633  .addMemOperand(MMO);
634  }
635 }
636 
640  VirtRegMap *VRM) const {
641  const MachineFrameInfo &MFI = MF.getFrameInfo();
642 
643  // The below optimizations narrow the load so they are only valid for little
644  // endian.
645  // TODO: Support big endian by adding an offset into the frame object?
646  if (MF.getDataLayout().isBigEndian())
647  return nullptr;
648 
649  // Fold load from stack followed by sext.w into lw.
650  // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
651  if (Ops.size() != 1 || Ops[0] != 1)
652  return nullptr;
653 
654  unsigned LoadOpc;
655  switch (MI.getOpcode()) {
656  default:
657  if (RISCV::isSEXT_W(MI)) {
658  LoadOpc = RISCV::LW;
659  break;
660  }
661  if (RISCV::isZEXT_W(MI)) {
662  LoadOpc = RISCV::LWU;
663  break;
664  }
665  if (RISCV::isZEXT_B(MI)) {
666  LoadOpc = RISCV::LBU;
667  break;
668  }
669  return nullptr;
670  case RISCV::SEXT_H:
671  LoadOpc = RISCV::LH;
672  break;
673  case RISCV::SEXT_B:
674  LoadOpc = RISCV::LB;
675  break;
676  case RISCV::ZEXT_H_RV32:
677  case RISCV::ZEXT_H_RV64:
678  LoadOpc = RISCV::LHU;
679  break;
680  }
681 
686 
687  Register DstReg = MI.getOperand(0).getReg();
688  return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
689  DstReg)
691  .addImm(0)
692  .addMemOperand(MMO);
693 }
694 
697  const DebugLoc &DL, Register DstReg, uint64_t Val,
698  MachineInstr::MIFlag Flag) const {
699  Register SrcReg = RISCV::X0;
700 
701  if (!STI.is64Bit() && !isInt<32>(Val))
702  report_fatal_error("Should only materialize 32-bit constants for RV32");
703 
705  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
706  assert(!Seq.empty());
707 
708  for (RISCVMatInt::Inst &Inst : Seq) {
709  switch (Inst.getOpndKind()) {
710  case RISCVMatInt::Imm:
711  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
712  .addImm(Inst.Imm)
713  .setMIFlag(Flag);
714  break;
715  case RISCVMatInt::RegX0:
716  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
717  .addReg(SrcReg, RegState::Kill)
718  .addReg(RISCV::X0)
719  .setMIFlag(Flag);
720  break;
721  case RISCVMatInt::RegReg:
722  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
723  .addReg(SrcReg, RegState::Kill)
724  .addReg(SrcReg, RegState::Kill)
725  .setMIFlag(Flag);
726  break;
727  case RISCVMatInt::RegImm:
728  BuildMI(MBB, MBBI, DL, get(Inst.Opc), DstReg)
729  .addReg(SrcReg, RegState::Kill)
730  .addImm(Inst.Imm)
731  .setMIFlag(Flag);
732  break;
733  }
734 
735  // Only the first instruction has X0 as its source.
736  SrcReg = DstReg;
737  }
738 }
739 
741  switch (Opc) {
742  default:
743  return RISCVCC::COND_INVALID;
744  case RISCV::BEQ:
745  return RISCVCC::COND_EQ;
746  case RISCV::BNE:
747  return RISCVCC::COND_NE;
748  case RISCV::BLT:
749  return RISCVCC::COND_LT;
750  case RISCV::BGE:
751  return RISCVCC::COND_GE;
752  case RISCV::BLTU:
753  return RISCVCC::COND_LTU;
754  case RISCV::BGEU:
755  return RISCVCC::COND_GEU;
756  }
757 }
758 
759 // The contents of values added to Cond are not examined outside of
760 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
761 // push BranchOpcode, Reg1, Reg2.
764  // Block ends with fall-through condbranch.
765  assert(LastInst.getDesc().isConditionalBranch() &&
766  "Unknown conditional branch");
767  Target = LastInst.getOperand(2).getMBB();
768  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
769  Cond.push_back(MachineOperand::CreateImm(CC));
770  Cond.push_back(LastInst.getOperand(0));
771  Cond.push_back(LastInst.getOperand(1));
772 }
773 
775  switch (CC) {
776  default:
777  llvm_unreachable("Unknown condition code!");
778  case RISCVCC::COND_EQ:
779  return get(RISCV::BEQ);
780  case RISCVCC::COND_NE:
781  return get(RISCV::BNE);
782  case RISCVCC::COND_LT:
783  return get(RISCV::BLT);
784  case RISCVCC::COND_GE:
785  return get(RISCV::BGE);
786  case RISCVCC::COND_LTU:
787  return get(RISCV::BLTU);
788  case RISCVCC::COND_GEU:
789  return get(RISCV::BGEU);
790  }
791 }
792 
794  switch (CC) {
795  default:
796  llvm_unreachable("Unrecognized conditional branch");
797  case RISCVCC::COND_EQ:
798  return RISCVCC::COND_NE;
799  case RISCVCC::COND_NE:
800  return RISCVCC::COND_EQ;
801  case RISCVCC::COND_LT:
802  return RISCVCC::COND_GE;
803  case RISCVCC::COND_GE:
804  return RISCVCC::COND_LT;
805  case RISCVCC::COND_LTU:
806  return RISCVCC::COND_GEU;
807  case RISCVCC::COND_GEU:
808  return RISCVCC::COND_LTU;
809  }
810 }
811 
814  MachineBasicBlock *&FBB,
816  bool AllowModify) const {
817  TBB = FBB = nullptr;
818  Cond.clear();
819 
820  // If the block has no terminators, it just falls into the block after it.
822  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
823  return false;
824 
825  // Count the number of terminators and find the first unconditional or
826  // indirect branch.
827  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
828  int NumTerminators = 0;
829  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
830  J++) {
831  NumTerminators++;
832  if (J->getDesc().isUnconditionalBranch() ||
833  J->getDesc().isIndirectBranch()) {
834  FirstUncondOrIndirectBr = J.getReverse();
835  }
836  }
837 
838  // If AllowModify is true, we can erase any terminators after
839  // FirstUncondOrIndirectBR.
840  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
841  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
842  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
843  NumTerminators--;
844  }
845  I = FirstUncondOrIndirectBr;
846  }
847 
848  // We can't handle blocks that end in an indirect branch.
849  if (I->getDesc().isIndirectBranch())
850  return true;
851 
852  // We can't handle blocks with more than 2 terminators.
853  if (NumTerminators > 2)
854  return true;
855 
856  // Handle a single unconditional branch.
857  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
859  return false;
860  }
861 
862  // Handle a single conditional branch.
863  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
865  return false;
866  }
867 
868  // Handle a conditional branch followed by an unconditional branch.
869  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
870  I->getDesc().isUnconditionalBranch()) {
871  parseCondBranch(*std::prev(I), TBB, Cond);
872  FBB = getBranchDestBlock(*I);
873  return false;
874  }
875 
876  // Otherwise, we can't handle this.
877  return true;
878 }
879 
881  int *BytesRemoved) const {
882  if (BytesRemoved)
883  *BytesRemoved = 0;
885  if (I == MBB.end())
886  return 0;
887 
888  if (!I->getDesc().isUnconditionalBranch() &&
889  !I->getDesc().isConditionalBranch())
890  return 0;
891 
892  // Remove the branch.
893  if (BytesRemoved)
894  *BytesRemoved += getInstSizeInBytes(*I);
895  I->eraseFromParent();
896 
897  I = MBB.end();
898 
899  if (I == MBB.begin())
900  return 1;
901  --I;
902  if (!I->getDesc().isConditionalBranch())
903  return 1;
904 
905  // Remove the branch.
906  if (BytesRemoved)
907  *BytesRemoved += getInstSizeInBytes(*I);
908  I->eraseFromParent();
909  return 2;
910 }
911 
912 // Inserts a branch into the end of the specific MachineBasicBlock, returning
913 // the number of instructions inserted.
916  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
917  if (BytesAdded)
918  *BytesAdded = 0;
919 
920  // Shouldn't be a fall through.
921  assert(TBB && "insertBranch must not be told to insert a fallthrough");
922  assert((Cond.size() == 3 || Cond.size() == 0) &&
923  "RISCV branch conditions have two components!");
924 
925  // Unconditional branch.
926  if (Cond.empty()) {
927  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
928  if (BytesAdded)
929  *BytesAdded += getInstSizeInBytes(MI);
930  return 1;
931  }
932 
933  // Either a one or two-way conditional branch.
934  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
935  MachineInstr &CondMI =
936  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
937  if (BytesAdded)
938  *BytesAdded += getInstSizeInBytes(CondMI);
939 
940  // One-way conditional branch.
941  if (!FBB)
942  return 1;
943 
944  // Two-way conditional branch.
945  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
946  if (BytesAdded)
947  *BytesAdded += getInstSizeInBytes(MI);
948  return 2;
949 }
950 
952  MachineBasicBlock &DestBB,
953  MachineBasicBlock &RestoreBB,
954  const DebugLoc &DL, int64_t BrOffset,
955  RegScavenger *RS) const {
956  assert(RS && "RegScavenger required for long branching");
957  assert(MBB.empty() &&
958  "new block should be inserted for expanding unconditional branch");
959  assert(MBB.pred_size() == 1);
960  assert(RestoreBB.empty() &&
961  "restore block should be inserted for restoring clobbered registers");
962 
963  MachineFunction *MF = MBB.getParent();
967 
968  if (!isInt<32>(BrOffset))
970  "Branch offsets outside of the signed 32-bit range not supported");
971 
972  // FIXME: A virtual register must be used initially, as the register
973  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
974  // uses the same workaround).
975  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
976  auto II = MBB.end();
977  // We may also update the jump target to RestoreBB later.
978  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
979  .addReg(ScratchReg, RegState::Define | RegState::Dead)
980  .addMBB(&DestBB, RISCVII::MO_CALL);
981 
982  RS->enterBasicBlockEnd(MBB);
983  Register TmpGPR =
984  RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
985  /*RestoreAfter=*/false, /*SpAdj=*/0,
986  /*AllowSpill=*/false);
987  if (TmpGPR != RISCV::NoRegister)
988  RS->setRegUsed(TmpGPR);
989  else {
990  // The case when there is no scavenged register needs special handling.
991 
992  // Pick s11 because it doesn't make a difference.
993  TmpGPR = RISCV::X27;
994 
996  if (FrameIndex == -1)
997  report_fatal_error("underestimated function size");
998 
999  storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1000  &RISCV::GPRRegClass, TRI);
1001  TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1002  /*SpAdj=*/0, /*FIOperandNum=*/1);
1003 
1004  MI.getOperand(1).setMBB(&RestoreBB);
1005 
1006  loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1007  &RISCV::GPRRegClass, TRI);
1008  TRI->eliminateFrameIndex(RestoreBB.back(),
1009  /*SpAdj=*/0, /*FIOperandNum=*/1);
1010  }
1011 
1012  MRI.replaceRegWith(ScratchReg, TmpGPR);
1013  MRI.clearVirtRegs();
1014 }
1015 
1018  assert((Cond.size() == 3) && "Invalid branch condition!");
1019  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1020  Cond[0].setImm(getOppositeBranchCondition(CC));
1021  return false;
1022 }
1023 
1026  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1027  // The branch target is always the last operand.
1028  int NumOp = MI.getNumExplicitOperands();
1029  return MI.getOperand(NumOp - 1).getMBB();
1030 }
1031 
1033  int64_t BrOffset) const {
1034  unsigned XLen = STI.getXLen();
1035  // Ideally we could determine the supported branch offset from the
1036  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1037  // PseudoBR.
1038  switch (BranchOp) {
1039  default:
1040  llvm_unreachable("Unexpected opcode!");
1041  case RISCV::BEQ:
1042  case RISCV::BNE:
1043  case RISCV::BLT:
1044  case RISCV::BGE:
1045  case RISCV::BLTU:
1046  case RISCV::BGEU:
1047  return isIntN(13, BrOffset);
1048  case RISCV::JAL:
1049  case RISCV::PseudoBR:
1050  return isIntN(21, BrOffset);
1051  case RISCV::PseudoJump:
1052  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1053  }
1054 }
1055 
1057  if (MI.isMetaInstruction())
1058  return 0;
1059 
1060  unsigned Opcode = MI.getOpcode();
1061 
1062  if (Opcode == TargetOpcode::INLINEASM ||
1063  Opcode == TargetOpcode::INLINEASM_BR) {
1064  const MachineFunction &MF = *MI.getParent()->getParent();
1065  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1066  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1067  *TM.getMCAsmInfo());
1068  }
1069 
1070  if (MI.getParent() && MI.getParent()->getParent()) {
1071  const auto MF = MI.getMF();
1072  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
1073  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
1074  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
1075  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
1076  if (isCompressibleInst(MI, &ST, MRI, STI))
1077  return 2;
1078  }
1079  return get(Opcode).getSize();
1080 }
1081 
1083  const unsigned Opcode = MI.getOpcode();
1084  switch (Opcode) {
1085  default:
1086  break;
1087  case RISCV::FSGNJ_D:
1088  case RISCV::FSGNJ_S:
1089  case RISCV::FSGNJ_H:
1090  // The canonical floating-point move is fsgnj rd, rs, rs.
1091  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1092  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1093  case RISCV::ADDI:
1094  case RISCV::ORI:
1095  case RISCV::XORI:
1096  return (MI.getOperand(1).isReg() &&
1097  MI.getOperand(1).getReg() == RISCV::X0) ||
1098  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1099  }
1100  return MI.isAsCheapAsAMove();
1101 }
1102 
1105  if (MI.isMoveReg())
1106  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1107  switch (MI.getOpcode()) {
1108  default:
1109  break;
1110  case RISCV::ADDI:
1111  // Operand 1 can be a frameindex but callers expect registers
1112  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1113  MI.getOperand(2).getImm() == 0)
1114  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1115  break;
1116  case RISCV::FSGNJ_D:
1117  case RISCV::FSGNJ_S:
1118  case RISCV::FSGNJ_H:
1119  // The canonical floating-point move is fsgnj rd, rs, rs.
1120  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1121  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1122  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1123  break;
1124  }
1125  return None;
1126 }
1127 
1129  StringRef &ErrInfo) const {
1130  MCInstrDesc const &Desc = MI.getDesc();
1131 
1132  for (auto &OI : enumerate(Desc.operands())) {
1133  unsigned OpType = OI.value().OperandType;
1134  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1135  OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1136  const MachineOperand &MO = MI.getOperand(OI.index());
1137  if (MO.isImm()) {
1138  int64_t Imm = MO.getImm();
1139  bool Ok;
1140  switch (OpType) {
1141  default:
1142  llvm_unreachable("Unexpected operand type");
1143 
1144  // clang-format off
1145 #define CASE_OPERAND_UIMM(NUM) \
1146  case RISCVOp::OPERAND_UIMM##NUM: \
1147  Ok = isUInt<NUM>(Imm); \
1148  break;
1155  Ok = isShiftedUInt<5, 2>(Imm);
1156  break;
1158  Ok = isShiftedUInt<6, 2>(Imm);
1159  break;
1161  Ok = isShiftedUInt<5, 3>(Imm);
1162  break;
1163  CASE_OPERAND_UIMM(12)
1164  CASE_OPERAND_UIMM(20)
1165  // clang-format on
1167  Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1168  break;
1169  case RISCVOp::OPERAND_ZERO:
1170  Ok = Imm == 0;
1171  break;
1173  Ok = isInt<5>(Imm);
1174  break;
1176  Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1177  break;
1179  Ok = isInt<6>(Imm);
1180  break;
1182  Ok = Imm != 0 && isInt<6>(Imm);
1183  break;
1185  Ok = isUInt<10>(Imm);
1186  break;
1188  Ok = isUInt<11>(Imm);
1189  break;
1191  Ok = isInt<12>(Imm);
1192  break;
1194  Ok = isShiftedInt<7, 5>(Imm);
1195  break;
1197  Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1198  break;
1200  Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1201  Ok = Ok && Imm != 0;
1202  break;
1204  Ok = STI.is64Bit() ? isUInt<5>(Imm) : isUInt<4>(Imm);
1205  break;
1207  Ok = Imm >= 0 && Imm <= 10;
1208  break;
1209  }
1210  if (!Ok) {
1211  ErrInfo = "Invalid immediate";
1212  return false;
1213  }
1214  }
1215  }
1216  }
1217 
1218  const uint64_t TSFlags = Desc.TSFlags;
1219  if (RISCVII::hasMergeOp(TSFlags)) {
1220  unsigned OpIdx = RISCVII::getMergeOpNum(Desc);
1221  if (MI.findTiedOperandIdx(0) != OpIdx) {
1222  ErrInfo = "Merge op improperly tied";
1223  return false;
1224  }
1225  }
1226  if (RISCVII::hasVLOp(TSFlags)) {
1227  const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
1228  if (!Op.isImm() && !Op.isReg()) {
1229  ErrInfo = "Invalid operand type for VL operand";
1230  return false;
1231  }
1232  if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1233  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1234  auto *RC = MRI.getRegClass(Op.getReg());
1235  if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1236  ErrInfo = "Invalid register class for VL operand";
1237  return false;
1238  }
1239  }
1240  }
1241  if (RISCVII::hasSEWOp(TSFlags)) {
1242  unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
1243  uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
1244  if (Log2SEW > 31) {
1245  ErrInfo = "Unexpected SEW value";
1246  return false;
1247  }
1248  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1249  if (!RISCVVType::isValidSEW(SEW)) {
1250  ErrInfo = "Unexpected SEW value";
1251  return false;
1252  }
1253  }
1254  if (RISCVII::hasVecPolicyOp(TSFlags)) {
1255  unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
1256  uint64_t Policy = MI.getOperand(OpIdx).getImm();
1258  ErrInfo = "Invalid Policy Value";
1259  return false;
1260  }
1261  }
1262 
1263  return true;
1264 }
1265 
1266 // Return true if get the base operand, byte offset of an instruction and the
1267 // memory width. Width is the size of memory that is being loaded/stored.
1269  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1270  unsigned &Width, const TargetRegisterInfo *TRI) const {
1271  if (!LdSt.mayLoadOrStore())
1272  return false;
1273 
1274  // Here we assume the standard RISC-V ISA, which uses a base+offset
1275  // addressing mode. You'll need to relax these conditions to support custom
1276  // load/stores instructions.
1277  if (LdSt.getNumExplicitOperands() != 3)
1278  return false;
1279  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1280  return false;
1281 
1282  if (!LdSt.hasOneMemOperand())
1283  return false;
1284 
1285  Width = (*LdSt.memoperands_begin())->getSize();
1286  BaseReg = &LdSt.getOperand(1);
1287  Offset = LdSt.getOperand(2).getImm();
1288  return true;
1289 }
1290 
1292  const MachineInstr &MIa, const MachineInstr &MIb) const {
1293  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1294  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1295 
1296  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1298  return false;
1299 
1300  // Retrieve the base register, offset from the base register and width. Width
1301  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1302  // base registers are identical, and the offset of a lower memory access +
1303  // the width doesn't overlap the offset of a higher memory access,
1304  // then the memory accesses are different.
1306  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1307  int64_t OffsetA = 0, OffsetB = 0;
1308  unsigned int WidthA = 0, WidthB = 0;
1309  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1310  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1311  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1312  int LowOffset = std::min(OffsetA, OffsetB);
1313  int HighOffset = std::max(OffsetA, OffsetB);
1314  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1315  if (LowOffset + LowWidth <= HighOffset)
1316  return true;
1317  }
1318  }
1319  return false;
1320 }
1321 
1322 std::pair<unsigned, unsigned>
1324  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1325  return std::make_pair(TF & Mask, TF & ~Mask);
1326 }
1327 
1330  using namespace RISCVII;
1331  static const std::pair<unsigned, const char *> TargetFlags[] = {
1332  {MO_CALL, "riscv-call"},
1333  {MO_PLT, "riscv-plt"},
1334  {MO_LO, "riscv-lo"},
1335  {MO_HI, "riscv-hi"},
1336  {MO_PCREL_LO, "riscv-pcrel-lo"},
1337  {MO_PCREL_HI, "riscv-pcrel-hi"},
1338  {MO_GOT_HI, "riscv-got-hi"},
1339  {MO_TPREL_LO, "riscv-tprel-lo"},
1340  {MO_TPREL_HI, "riscv-tprel-hi"},
1341  {MO_TPREL_ADD, "riscv-tprel-add"},
1342  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1343  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1344  return makeArrayRef(TargetFlags);
1345 }
1347  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1348  const Function &F = MF.getFunction();
1349 
1350  // Can F be deduplicated by the linker? If it can, don't outline from it.
1351  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1352  return false;
1353 
1354  // Don't outline from functions with section markings; the program could
1355  // expect that all the code is in the named section.
1356  if (F.hasSection())
1357  return false;
1358 
1359  // It's safe to outline from MF.
1360  return true;
1361 }
1362 
1364  unsigned &Flags) const {
1365  // More accurate safety checking is done in getOutliningCandidateInfo.
1367 }
1368 
1369 // Enum values indicating how an outlined call should be constructed.
1372 };
1373 
1375  MachineFunction &MF) const {
1376  return MF.getFunction().hasMinSize();
1377 }
1378 
1380  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1381 
1382  // First we need to filter out candidates where the X5 register (IE t0) can't
1383  // be used to setup the function call.
1384  auto CannotInsertCall = [](outliner::Candidate &C) {
1385  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1386  return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
1387  };
1388 
1389  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1390 
1391  // If the sequence doesn't have enough candidates left, then we're done.
1392  if (RepeatedSequenceLocs.size() < 2)
1393  return outliner::OutlinedFunction();
1394 
1395  unsigned SequenceSize = 0;
1396 
1397  auto I = RepeatedSequenceLocs[0].front();
1398  auto E = std::next(RepeatedSequenceLocs[0].back());
1399  for (; I != E; ++I)
1400  SequenceSize += getInstSizeInBytes(*I);
1401 
1402  // call t0, function = 8 bytes.
1403  unsigned CallOverhead = 8;
1404  for (auto &C : RepeatedSequenceLocs)
1405  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1406 
1407  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1408  unsigned FrameOverhead = 4;
1409  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1410  .getFeatureBits()[RISCV::FeatureStdExtC])
1411  FrameOverhead = 2;
1412 
1413  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1414  FrameOverhead, MachineOutlinerDefault);
1415 }
1416 
1419  unsigned Flags) const {
1420  MachineInstr &MI = *MBBI;
1421  MachineBasicBlock *MBB = MI.getParent();
1422  const TargetRegisterInfo *TRI =
1424  const auto &F = MI.getMF()->getFunction();
1425 
1426  // Positions generally can't safely be outlined.
1427  if (MI.isPosition()) {
1428  // We can manually strip out CFI instructions later.
1429  if (MI.isCFIInstruction())
1430  // If current function has exception handling code, we can't outline &
1431  // strip these CFI instructions since it may break .eh_frame section
1432  // needed in unwinding.
1433  return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
1435 
1437  }
1438 
1439  // Don't trust the user to write safe inline assembly.
1440  if (MI.isInlineAsm())
1442 
1443  // We can't outline branches to other basic blocks.
1444  if (MI.isTerminator() && !MBB->succ_empty())
1446 
1447  // We need support for tail calls to outlined functions before return
1448  // statements can be allowed.
1449  if (MI.isReturn())
1451 
1452  // Don't allow modifying the X5 register which we use for return addresses for
1453  // these outlined functions.
1454  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1455  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1457 
1458  // Make sure the operands don't reference something unsafe.
1459  for (const auto &MO : MI.operands()) {
1460  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1462 
1463  // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
1464  // if any possible.
1465  if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
1466  (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
1467  F.hasSection()))
1469  }
1470 
1471  // Don't allow instructions which won't be materialized to impact outlining
1472  // analysis.
1473  if (MI.isMetaInstruction())
1475 
1477 }
1478 
1481  const outliner::OutlinedFunction &OF) const {
1482 
1483  // Strip out any CFI instructions
1484  bool Changed = true;
1485  while (Changed) {
1486  Changed = false;
1487  auto I = MBB.begin();
1488  auto E = MBB.end();
1489  for (; I != E; ++I) {
1490  if (I->isCFIInstruction()) {
1491  I->removeFromParent();
1492  Changed = true;
1493  break;
1494  }
1495  }
1496  }
1497 
1498  MBB.addLiveIn(RISCV::X5);
1499 
1500  // Add in a return instruction to the end of the outlined frame.
1501  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1502  .addReg(RISCV::X0, RegState::Define)
1503  .addReg(RISCV::X5)
1504  .addImm(0));
1505 }
1506 
1509  MachineFunction &MF, outliner::Candidate &C) const {
1510 
1511  // Add in a call instruction to the outlined function at the given location.
1512  It = MBB.insert(It,
1513  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1514  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1515  RISCVII::MO_CALL));
1516  return It;
1517 }
1518 
1519 // MIR printer helper function to annotate Operands with a comment.
1521  const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1522  const TargetRegisterInfo *TRI) const {
1523  // Print a generic comment for this operand if there is one.
1524  std::string GenericComment =
1526  if (!GenericComment.empty())
1527  return GenericComment;
1528 
1529  // If not, we must have an immediate operand.
1530  if (!Op.isImm())
1531  return std::string();
1532 
1533  std::string Comment;
1534  raw_string_ostream OS(Comment);
1535 
1536  uint64_t TSFlags = MI.getDesc().TSFlags;
1537 
1538  // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
1539  // operand of vector codegen pseudos.
1540  if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
1541  MI.getOpcode() == RISCV::PseudoVSETVLI ||
1542  MI.getOpcode() == RISCV::PseudoVSETIVLI ||
1543  MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
1544  OpIdx == 2) {
1545  unsigned Imm = MI.getOperand(OpIdx).getImm();
1547  } else if (RISCVII::hasSEWOp(TSFlags) &&
1548  OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
1549  unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
1550  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1551  assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
1552  OS << "e" << SEW;
1553  } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
1554  OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
1555  unsigned Policy = MI.getOperand(OpIdx).getImm();
1557  "Invalid Policy Value");
1558  OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
1559  << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
1560  }
1561 
1562  OS.flush();
1563  return Comment;
1564 }
1565 
1566 // clang-format off
1567 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1568  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1569 
1570 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1571  CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1572  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1573  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1574  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1575 
1576 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1577  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1578  case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1579 
1580 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1581  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1582  case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1583 
1584 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1585  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1586  case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1587 
1588 #define CASE_VFMA_SPLATS(OP) \
1589  CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1590  case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1591  case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1592 // clang-format on
1593 
1595  unsigned &SrcOpIdx1,
1596  unsigned &SrcOpIdx2) const {
1597  const MCInstrDesc &Desc = MI.getDesc();
1598  if (!Desc.isCommutable())
1599  return false;
1600 
1601  switch (MI.getOpcode()) {
1602  case CASE_VFMA_SPLATS(FMADD):
1603  case CASE_VFMA_SPLATS(FMSUB):
1604  case CASE_VFMA_SPLATS(FMACC):
1605  case CASE_VFMA_SPLATS(FMSAC):
1606  case CASE_VFMA_SPLATS(FNMADD):
1607  case CASE_VFMA_SPLATS(FNMSUB):
1608  case CASE_VFMA_SPLATS(FNMACC):
1609  case CASE_VFMA_SPLATS(FNMSAC):
1610  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1611  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1612  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1613  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1614  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1615  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1616  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1617  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1618  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1619  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1620  // If the tail policy is undisturbed we can't commute.
1621  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1622  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1623  return false;
1624 
1625  // For these instructions we can only swap operand 1 and operand 3 by
1626  // changing the opcode.
1627  unsigned CommutableOpIdx1 = 1;
1628  unsigned CommutableOpIdx2 = 3;
1629  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1630  CommutableOpIdx2))
1631  return false;
1632  return true;
1633  }
1634  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1638  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1639  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1640  // If the tail policy is undisturbed we can't commute.
1641  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1642  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1643  return false;
1644 
1645  // For these instructions we have more freedom. We can commute with the
1646  // other multiplicand or with the addend/subtrahend/minuend.
1647 
1648  // Any fixed operand must be from source 1, 2 or 3.
1649  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1650  return false;
1651  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1652  return false;
1653 
1654  // It both ops are fixed one must be the tied source.
1655  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1656  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1657  return false;
1658 
1659  // Look for two different register operands assumed to be commutable
1660  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1661  // needed.
1662  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1663  SrcOpIdx2 == CommuteAnyOperandIndex) {
1664  // At least one of operands to be commuted is not specified and
1665  // this method is free to choose appropriate commutable operands.
1666  unsigned CommutableOpIdx1 = SrcOpIdx1;
1667  if (SrcOpIdx1 == SrcOpIdx2) {
1668  // Both of operands are not fixed. Set one of commutable
1669  // operands to the tied source.
1670  CommutableOpIdx1 = 1;
1671  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1672  // Only one of the operands is not fixed.
1673  CommutableOpIdx1 = SrcOpIdx2;
1674  }
1675 
1676  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1677  // operand and assign its index to CommutableOpIdx2.
1678  unsigned CommutableOpIdx2;
1679  if (CommutableOpIdx1 != 1) {
1680  // If we haven't already used the tied source, we must use it now.
1681  CommutableOpIdx2 = 1;
1682  } else {
1683  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1684 
1685  // The commuted operands should have different registers.
1686  // Otherwise, the commute transformation does not change anything and
1687  // is useless. We use this as a hint to make our decision.
1688  if (Op1Reg != MI.getOperand(2).getReg())
1689  CommutableOpIdx2 = 2;
1690  else
1691  CommutableOpIdx2 = 3;
1692  }
1693 
1694  // Assign the found pair of commutable indices to SrcOpIdx1 and
1695  // SrcOpIdx2 to return those values.
1696  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1697  CommutableOpIdx2))
1698  return false;
1699  }
1700 
1701  return true;
1702  }
1703  }
1704 
1705  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1706 }
1707 
1708 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1709  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1710  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1711  break;
1712 
1713 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1714  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1715  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1716  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1717  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1718 
1719 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1720  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1721  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1722 
1723 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1724  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1725  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1726 
1727 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1728  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1729  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1730 
1731 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1732  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1733  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1734  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1735 
1737  bool NewMI,
1738  unsigned OpIdx1,
1739  unsigned OpIdx2) const {
1740  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1741  if (NewMI)
1742  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1743  return MI;
1744  };
1745 
1746  switch (MI.getOpcode()) {
1747  case CASE_VFMA_SPLATS(FMACC):
1748  case CASE_VFMA_SPLATS(FMADD):
1749  case CASE_VFMA_SPLATS(FMSAC):
1750  case CASE_VFMA_SPLATS(FMSUB):
1751  case CASE_VFMA_SPLATS(FNMACC):
1752  case CASE_VFMA_SPLATS(FNMADD):
1753  case CASE_VFMA_SPLATS(FNMSAC):
1754  case CASE_VFMA_SPLATS(FNMSUB):
1755  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1756  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1757  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1758  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1759  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1760  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1761  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1762  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1763  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1764  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1765  // It only make sense to toggle these between clobbering the
1766  // addend/subtrahend/minuend one of the multiplicands.
1767  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1768  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1769  unsigned Opc;
1770  switch (MI.getOpcode()) {
1771  default:
1772  llvm_unreachable("Unexpected opcode");
1773  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1774  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1781  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
1785  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1786  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1787  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1788  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1789  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1790  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1791  }
1792 
1793  auto &WorkingMI = cloneIfNew(MI);
1794  WorkingMI.setDesc(get(Opc));
1795  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1796  OpIdx1, OpIdx2);
1797  }
1798  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1802  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1803  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1804  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1805  // If one of the operands, is the addend we need to change opcode.
1806  // Otherwise we're just swapping 2 of the multiplicands.
1807  if (OpIdx1 == 3 || OpIdx2 == 3) {
1808  unsigned Opc;
1809  switch (MI.getOpcode()) {
1810  default:
1811  llvm_unreachable("Unexpected opcode");
1812  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
1816  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1817  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1818  }
1819 
1820  auto &WorkingMI = cloneIfNew(MI);
1821  WorkingMI.setDesc(get(Opc));
1822  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1823  OpIdx1, OpIdx2);
1824  }
1825  // Let the default code handle it.
1826  break;
1827  }
1828  }
1829 
1830  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1831 }
1832 
1833 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1834 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1835 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1836 #undef CASE_VFMA_SPLATS
1837 #undef CASE_VFMA_OPCODE_LMULS
1838 #undef CASE_VFMA_OPCODE_COMMON
1839 
1840 // clang-format off
1841 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1842  RISCV::PseudoV##OP##_##LMUL##_TIED
1843 
1844 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1845  CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1846  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1847  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1848  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1849  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1850 
1851 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1852  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1853  case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1854 // clang-format on
1855 
1856 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1857  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1858  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1859  break;
1860 
1861 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1862  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1863  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1864  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1865  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1866  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1867 
1868 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1869  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1870  CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1871 
1873  LiveVariables *LV,
1874  LiveIntervals *LIS) const {
1875  switch (MI.getOpcode()) {
1876  default:
1877  break;
1878  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
1879  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
1880  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1881  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1882  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1883  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1884  // If the tail policy is undisturbed we can't convert.
1885  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
1886  MI.getNumExplicitOperands() == 6);
1887  if ((MI.getOperand(5).getImm() & 1) == 0)
1888  return nullptr;
1889 
1890  // clang-format off
1891  unsigned NewOpc;
1892  switch (MI.getOpcode()) {
1893  default:
1894  llvm_unreachable("Unexpected opcode");
1901  }
1902  // clang-format on
1903 
1904  MachineBasicBlock &MBB = *MI.getParent();
1905  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
1906  .add(MI.getOperand(0))
1907  .add(MI.getOperand(1))
1908  .add(MI.getOperand(2))
1909  .add(MI.getOperand(3))
1910  .add(MI.getOperand(4));
1911  MIB.copyImplicitOps(MI);
1912 
1913  if (LV) {
1914  unsigned NumOps = MI.getNumOperands();
1915  for (unsigned I = 1; I < NumOps; ++I) {
1916  MachineOperand &Op = MI.getOperand(I);
1917  if (Op.isReg() && Op.isKill())
1918  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1919  }
1920  }
1921 
1922  if (LIS) {
1923  SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1924 
1925  if (MI.getOperand(0).isEarlyClobber()) {
1926  // Use operand 1 was tied to early-clobber def operand 0, so its live
1927  // interval could have ended at an early-clobber slot. Now they are not
1928  // tied we need to update it to the normal register slot.
1929  LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
1931  if (S->end == Idx.getRegSlot(true))
1932  S->end = Idx.getRegSlot();
1933  }
1934  }
1935 
1936  return MIB;
1937  }
1938  }
1939 
1940  return nullptr;
1941 }
1942 
1943 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1944 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1945 #undef CASE_WIDEOP_OPCODE_LMULS
1946 #undef CASE_WIDEOP_OPCODE_COMMON
1947 
1951  const DebugLoc &DL,
1952  int64_t Amount,
1953  MachineInstr::MIFlag Flag) const {
1954  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1955  assert(Amount % 8 == 0 &&
1956  "Reserve the stack by the multiple of one vector size.");
1957 
1959  int64_t NumOfVReg = Amount / 8;
1960 
1961  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1962  BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), VL)
1963  .setMIFlag(Flag);
1964  assert(isInt<32>(NumOfVReg) &&
1965  "Expect the number of vector registers within 32-bits.");
1966  if (isPowerOf2_32(NumOfVReg)) {
1967  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1968  if (ShiftAmount == 0)
1969  return VL;
1970  BuildMI(MBB, II, DL, get(RISCV::SLLI), VL)
1971  .addReg(VL, RegState::Kill)
1972  .addImm(ShiftAmount)
1973  .setMIFlag(Flag);
1974  } else if (STI.hasStdExtZba() &&
1975  ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
1976  (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
1977  (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
1978  // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
1979  unsigned Opc;
1980  uint32_t ShiftAmount;
1981  if (NumOfVReg % 9 == 0) {
1982  Opc = RISCV::SH3ADD;
1983  ShiftAmount = Log2_64(NumOfVReg / 9);
1984  } else if (NumOfVReg % 5 == 0) {
1985  Opc = RISCV::SH2ADD;
1986  ShiftAmount = Log2_64(NumOfVReg / 5);
1987  } else if (NumOfVReg % 3 == 0) {
1988  Opc = RISCV::SH1ADD;
1989  ShiftAmount = Log2_64(NumOfVReg / 3);
1990  } else {
1991  llvm_unreachable("Unexpected number of vregs");
1992  }
1993  if (ShiftAmount)
1994  BuildMI(MBB, II, DL, get(RISCV::SLLI), VL)
1995  .addReg(VL, RegState::Kill)
1996  .addImm(ShiftAmount)
1997  .setMIFlag(Flag);
1998  BuildMI(MBB, II, DL, get(Opc), VL)
1999  .addReg(VL, RegState::Kill)
2000  .addReg(VL)
2001  .setMIFlag(Flag);
2002  } else if (isPowerOf2_32(NumOfVReg - 1)) {
2003  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2004  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
2005  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2006  .addReg(VL)
2007  .addImm(ShiftAmount)
2008  .setMIFlag(Flag);
2009  BuildMI(MBB, II, DL, get(RISCV::ADD), VL)
2010  .addReg(ScaledRegister, RegState::Kill)
2011  .addReg(VL, RegState::Kill)
2012  .setMIFlag(Flag);
2013  } else if (isPowerOf2_32(NumOfVReg + 1)) {
2014  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2015  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
2016  BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2017  .addReg(VL)
2018  .addImm(ShiftAmount)
2019  .setMIFlag(Flag);
2020  BuildMI(MBB, II, DL, get(RISCV::SUB), VL)
2021  .addReg(ScaledRegister, RegState::Kill)
2022  .addReg(VL, RegState::Kill)
2023  .setMIFlag(Flag);
2024  } else {
2025  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2026  movImm(MBB, II, DL, N, NumOfVReg, Flag);
2027  if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
2029  MF.getFunction(),
2030  "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2031  "offset."});
2032  BuildMI(MBB, II, DL, get(RISCV::MUL), VL)
2033  .addReg(VL, RegState::Kill)
2035  .setMIFlag(Flag);
2036  }
2037 
2038  return VL;
2039 }
2040 
2041 // Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
2043  return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
2044  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
2045 }
2046 
2047 // Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
2049  return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
2050  MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
2051 }
2052 
2053 // Returns true if this is the zext.b pattern, andi rd, rs1, 255.
2055  return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
2056  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
2057 }
2058 
2059 static bool isRVVWholeLoadStore(unsigned Opcode) {
2060  switch (Opcode) {
2061  default:
2062  return false;
2063  case RISCV::VS1R_V:
2064  case RISCV::VS2R_V:
2065  case RISCV::VS4R_V:
2066  case RISCV::VS8R_V:
2067  case RISCV::VL1RE8_V:
2068  case RISCV::VL2RE8_V:
2069  case RISCV::VL4RE8_V:
2070  case RISCV::VL8RE8_V:
2071  case RISCV::VL1RE16_V:
2072  case RISCV::VL2RE16_V:
2073  case RISCV::VL4RE16_V:
2074  case RISCV::VL8RE16_V:
2075  case RISCV::VL1RE32_V:
2076  case RISCV::VL2RE32_V:
2077  case RISCV::VL4RE32_V:
2078  case RISCV::VL8RE32_V:
2079  case RISCV::VL1RE64_V:
2080  case RISCV::VL2RE64_V:
2081  case RISCV::VL4RE64_V:
2082  case RISCV::VL8RE64_V:
2083  return true;
2084  }
2085 }
2086 
2088  // RVV lacks any support for immediate addressing for stack addresses, so be
2089  // conservative.
2090  unsigned Opcode = MI.getOpcode();
2091  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2092  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
2093  return false;
2094  return true;
2095 }
2096 
2099  switch (Opcode) {
2100  default:
2101  return None;
2102  case RISCV::PseudoVSPILL2_M1:
2103  case RISCV::PseudoVRELOAD2_M1:
2104  return std::make_pair(2u, 1u);
2105  case RISCV::PseudoVSPILL2_M2:
2106  case RISCV::PseudoVRELOAD2_M2:
2107  return std::make_pair(2u, 2u);
2108  case RISCV::PseudoVSPILL2_M4:
2109  case RISCV::PseudoVRELOAD2_M4:
2110  return std::make_pair(2u, 4u);
2111  case RISCV::PseudoVSPILL3_M1:
2112  case RISCV::PseudoVRELOAD3_M1:
2113  return std::make_pair(3u, 1u);
2114  case RISCV::PseudoVSPILL3_M2:
2115  case RISCV::PseudoVRELOAD3_M2:
2116  return std::make_pair(3u, 2u);
2117  case RISCV::PseudoVSPILL4_M1:
2118  case RISCV::PseudoVRELOAD4_M1:
2119  return std::make_pair(4u, 1u);
2120  case RISCV::PseudoVSPILL4_M2:
2121  case RISCV::PseudoVRELOAD4_M2:
2122  return std::make_pair(4u, 2u);
2123  case RISCV::PseudoVSPILL5_M1:
2124  case RISCV::PseudoVRELOAD5_M1:
2125  return std::make_pair(5u, 1u);
2126  case RISCV::PseudoVSPILL6_M1:
2127  case RISCV::PseudoVRELOAD6_M1:
2128  return std::make_pair(6u, 1u);
2129  case RISCV::PseudoVSPILL7_M1:
2130  case RISCV::PseudoVRELOAD7_M1:
2131  return std::make_pair(7u, 1u);
2132  case RISCV::PseudoVSPILL8_M1:
2133  case RISCV::PseudoVRELOAD8_M1:
2134  return std::make_pair(8u, 1u);
2135  }
2136 }
2137 
2139  return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
2140  !MI.isInlineAsm();
2141 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:28
llvm::RISCVII::getSEWOpNum
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:188
llvm::RISCVII::isRVVWideningReduction
static bool isRVVWideningReduction(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:163
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:1016
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1329
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::RISCVInstrInfo::shouldOutlineFromFunctionByDefault
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Definition: RISCVInstrInfo.cpp:1374
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:321
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:255
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:54
llvm::MCRegisterInfo::getName
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
Definition: MCRegisterInfo.h:485
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:774
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
PreferWholeRegisterMove
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:36
llvm::RISCVOp::OPERAND_SIMM6
@ OPERAND_SIMM6
Definition: RISCVBaseInfo.h:240
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:243
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:793
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:37
llvm::RISCVOp::OPERAND_SIMM5_PLUS1
@ OPERAND_SIMM5_PLUS1
Definition: RISCVBaseInfo.h:239
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1079
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:713
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:205
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:60
llvm::RISCVInstrInfo::getOutliningType
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1418
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:731
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:252
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:585
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:628
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::AArch64SysReg::lookupSysRegByName
const SysReg * lookupSysRegByName(StringRef)
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:145
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1182
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:246
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2068
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1802
llvm::LiveRange::Segment
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:176
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:125
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::RISCVII::getVecPolicyOpNum
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:197
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::RISCVVType::isValidSEW
static bool isValidSEW(unsigned SEW)
Definition: RISCVBaseInfo.h:411
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:237
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::hasSEWOp
static bool hasSEWOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:151
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:320
CASE_OPERAND_UIMM
#define CASE_OPERAND_UIMM(NUM)
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:552
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:513
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:33
llvm::RISCVVType::isTailAgnostic
static bool isTailAgnostic(unsigned VType)
Definition: RISCVBaseInfo.h:452
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition: RISCVInstrInfo.cpp:951
llvm::RISCVInstrInfo::insertOutlinedCall
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1507
llvm::RISCVOp::OPERAND_UIMM8_LSB000
@ OPERAND_UIMM8_LSB000
Definition: RISCVBaseInfo.h:235
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:186
llvm::Optional
Definition: APInt.h:33
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:221
llvm::RISCVInstrInfo::foldMemoryOperandImpl
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Definition: RISCVInstrInfo.cpp:637
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
STLExtras.h
llvm::MachineBasicBlock::back
MachineInstr & back()
Definition: MachineBasicBlock.h:285
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1025
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:459
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:214
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
llvm::RISCVOp::OPERAND_SIMM6_NONZERO
@ OPERAND_SIMM6_NONZERO
Definition: RISCVBaseInfo.h:241
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:67
llvm::RISCVVType::getSEW
static unsigned getSEW(unsigned VType)
Definition: RISCVBaseInfo.h:447
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:1032
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::RISCV::isZEXT_W
bool isZEXT_W(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2048
RISCVGenInstrInfo
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition: RISCVInstrInfo.cpp:1872
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:746
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1363
MachineRegisterInfo.h
llvm::RISCV::isFaultFirstLoad
bool isFaultFirstLoad(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2138
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1025
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:197
CASE_VFMA_OPCODE_LMULS_MF4
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1580
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:365
llvm::RISCVII::TAIL_AGNOSTIC
@ TAIL_AGNOSTIC
Definition: RISCVBaseInfo.h:120
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:205
llvm::Log2_64
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:554
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:165
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:33
llvm::RISCVVPseudosTable
Definition: RISCVInstrInfo.cpp:45
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:782
llvm::RISCVSubtarget::hasStdExtZmmul
bool hasStdExtZmmul() const
Definition: RISCVSubtarget.h:195
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:211
llvm::RISCVOp::OPERAND_RVKRNUM
@ OPERAND_RVKRNUM
Definition: RISCVBaseInfo.h:251
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:762
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1594
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:76
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:812
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
TBB
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
Definition: RISCVRedundantCopyElimination.cpp:76
LiveVariables.h
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:548
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:752
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RISCVII::hasVLOp
static bool hasVLOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:155
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:35
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:478
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::LoongArchII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: LoongArchBaseInfo.h:33
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1323
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:315
getOppositeBranchCondition
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
Definition: ARCInstrInfo.cpp:102
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:87
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:185
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:880
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:451
llvm::MCRegisterInfo::isSubRegisterEq
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
Definition: MCRegisterInfo.h:568
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
llvm::RISCV::isZEXT_B
bool isZEXT_B(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2054
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:58
llvm::TargetInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
Definition: TargetInstrInfo.cpp:1424
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::RISCVOp::OPERAND_VTYPEI11
@ OPERAND_VTYPEI11
Definition: RISCVBaseInfo.h:250
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:428
llvm::None
const NoneType None
Definition: None.h:24
llvm::RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
Definition: RISCVBaseInfo.h:242
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:1291
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1723
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:647
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1370
llvm::RISCVOp::OPERAND_UIMM_SHFL
@ OPERAND_UIMM_SHFL
Definition: RISCVBaseInfo.h:248
llvm::TargetInstrInfo::createMIROperandComment
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
Definition: TargetInstrInfo.cpp:1347
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:209
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::DataLayout::isBigEndian
bool isBigEndian() const
Definition: DataLayout.h:245
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::cl::opt< bool >
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:118
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:215
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::RISCVVType::decodeVLMUL
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
Definition: RISCVBaseInfo.cpp:147
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:274
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:76
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:469
LiveIntervals.h
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVII::getVLOpNum
static unsigned getVLOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:177
llvm::RISCVII::getMergeOpNum
static unsigned getMergeOpNum(const MCInstrDesc &Desc)
Definition: RISCVBaseInfo.h:171
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1346
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:37
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:31
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:1268
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1363
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:483
llvm::TargetRegisterInfo::eliminateFrameIndex
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const =0
This method must be overriden to eliminate abstract frame indices from instructions which may use the...
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:439
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:47
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:214
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:263
MachineFunctionPass.h
isConvertibleToVMV_V_V
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
Definition: RISCVInstrInfo.cpp:123
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:553
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:567
llvm::LoongArchII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: LoongArchBaseInfo.h:32
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LoongArchII::MO_CALL
@ MO_CALL
Definition: LoongArchBaseInfo.h:30
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:672
llvm::AArch64SysReg::SysReg::Encoding
unsigned Encoding
Definition: AArch64BaseInfo.h:667
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:261
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::RISCVMatInt::RegX0
@ RegX0
Definition: RISCVMatInt.h:25
llvm::RISCVInstrInfo::createMIROperandComment
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:1520
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1104
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1588
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:82
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:164
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:112
llvm::RISCVII::hasMergeOp
static bool hasMergeOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:147
llvm::MachineFunction
Definition: MachineFunction.h:257
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1584
llvm::RISCVOp::OPERAND_SIMM5
@ OPERAND_SIMM5
Definition: RISCVBaseInfo.h:238
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:384
llvm::ArrayRef< unsigned >
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:728
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1851
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:139
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:695
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:838
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
CC
auto CC
Definition: RISCVRedundantCopyElimination.cpp:79
llvm::RISCVInstrInfo::buildOutlinedFrame
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1479
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1948
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:914
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:740
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:719
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:404
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1868
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:248
llvm::RISCVVType::printVType
void printVType(unsigned VType, raw_ostream &OS)
Definition: RISCVBaseInfo.cpp:163
llvm::RISCVVType::getVLMUL
static RISCVII::VLMUL getVLMUL(unsigned VType)
Definition: RISCVBaseInfo.h:423
llvm::RISCVOp::OPERAND_UIMM8_LSB00
@ OPERAND_UIMM8_LSB00
Definition: RISCVBaseInfo.h:234
llvm::RISCVMachineFunctionInfo::getBranchRelaxationScratchFrameIndex
int getBranchRelaxationScratchFrameIndex() const
Definition: RISCVMachineFunctionInfo.h:90
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:622
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1379
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:280
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:216
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:652
llvm::DestSourcePair
Definition: TargetInstrInfo.h:69
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
CASE_WIDEOP_OPCODE_LMULS_MF4
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1844
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1326
llvm::RISCVOp::OPERAND_VTYPEI10
@ OPERAND_VTYPEI10
Definition: RISCVBaseInfo.h:249
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_UIMMLOG2XLEN_NONZERO
Definition: RISCVBaseInfo.h:247
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1437
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1028
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:51
llvm::RISCV::isRVVSpill
bool isRVVSpill(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2087
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:207
RISCVInstrInfo.h
llvm::LiveIntervals
Definition: LiveIntervals.h:53
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:94
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:34
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:200
llvm::RISCVMatInt::RegReg
@ RegReg
Definition: RISCVMatInt.h:24
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:439
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1082
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1736
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
RISCVSubtarget.h
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:545
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:546
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:255
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1371
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:30
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:215
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1861
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1018
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:305
MachineInstrBuilder.h
llvm::RISCVMatInt::RegImm
@ RegImm
Definition: RISCVMatInt.h:22
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:294
llvm::RISCVOp::OPERAND_ZERO
@ OPERAND_ZERO
Definition: RISCVBaseInfo.h:237
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:1128
RISCVMachineFunctionInfo.h
llvm::LiveRange::getSegmentContaining
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:408
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:285
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:277
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1727
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::RISCVII::MASK_AGNOSTIC
@ MASK_AGNOSTIC
Definition: RISCVBaseInfo.h:121
llvm::RISCV::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:2098
llvm::Function::hasMinSize
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:661
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1134
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:2059
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::RISCVOp::OPERAND_SIMM12_LSB00000
@ OPERAND_SIMM12_LSB00000
Definition: RISCVBaseInfo.h:244
llvm::RISCVOp::OPERAND_UIMM7_LSB00
@ OPERAND_UIMM7_LSB00
Definition: RISCVBaseInfo.h:233
llvm::LiveVariables
Definition: LiveVariables.h:47
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:412
RegisterScavenging.h
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:156
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1731
llvm::MachineInstrBundleIterator< const MachineInstr >
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:464
llvm::RISCV::isSEXT_W
bool isSEXT_W(const MachineInstr &MI)
Definition: RISCVInstrInfo.cpp:2042
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:227
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:237
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:307
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:314
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1056
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:287
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:32
RISCVTargetMachine.h