LLVM  14.0.0git
RISCVInstrInfo.cpp
Go to the documentation of this file.
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
15 #include "RISCV.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/MC/MCInstBuilder.h"
29 #include "llvm/MC/TargetRegistry.h"
31 
32 using namespace llvm;
33 
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
36 
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
40 
42  "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
43  cl::desc("Prefer whole register move for vector registers."));
44 
45 namespace llvm {
46 namespace RISCVVPseudosTable {
47 
48 using namespace RISCV;
49 
50 #define GET_RISCVVPseudosTable_IMPL
51 #include "RISCVGenSearchableTables.inc"
52 
53 } // namespace RISCVVPseudosTable
54 } // namespace llvm
55 
57  : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
58  STI(STI) {}
59 
61  if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
62  return MCInstBuilder(RISCV::C_NOP);
63  return MCInstBuilder(RISCV::ADDI)
64  .addReg(RISCV::X0)
65  .addReg(RISCV::X0)
66  .addImm(0);
67 }
68 
70  int &FrameIndex) const {
71  switch (MI.getOpcode()) {
72  default:
73  return 0;
74  case RISCV::LB:
75  case RISCV::LBU:
76  case RISCV::LH:
77  case RISCV::LHU:
78  case RISCV::FLH:
79  case RISCV::LW:
80  case RISCV::FLW:
81  case RISCV::LWU:
82  case RISCV::LD:
83  case RISCV::FLD:
84  break;
85  }
86 
87  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
88  MI.getOperand(2).getImm() == 0) {
89  FrameIndex = MI.getOperand(1).getIndex();
90  return MI.getOperand(0).getReg();
91  }
92 
93  return 0;
94 }
95 
97  int &FrameIndex) const {
98  switch (MI.getOpcode()) {
99  default:
100  return 0;
101  case RISCV::SB:
102  case RISCV::SH:
103  case RISCV::SW:
104  case RISCV::FSH:
105  case RISCV::FSW:
106  case RISCV::SD:
107  case RISCV::FSD:
108  break;
109  }
110 
111  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
112  MI.getOperand(2).getImm() == 0) {
113  FrameIndex = MI.getOperand(1).getIndex();
114  return MI.getOperand(0).getReg();
115  }
116 
117  return 0;
118 }
119 
120 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
121  unsigned NumRegs) {
122  return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
123 }
124 
125 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
126  const MachineBasicBlock &MBB,
129  RISCVII::VLMUL LMul) {
131  return false;
132 
133  assert(MBBI->getOpcode() == TargetOpcode::COPY &&
134  "Unexpected COPY instruction.");
135  Register SrcReg = MBBI->getOperand(1).getReg();
136  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
137 
138  bool FoundDef = false;
139  bool FirstVSetVLI = false;
140  unsigned FirstSEW = 0;
141  while (MBBI != MBB.begin()) {
142  --MBBI;
143  if (MBBI->isMetaInstruction())
144  continue;
145 
146  if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
147  MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
148  MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
149  // There is a vsetvli between COPY and source define instruction.
150  // vy = def_vop ... (producing instruction)
151  // ...
152  // vsetvli
153  // ...
154  // vx = COPY vy
155  if (!FoundDef) {
156  if (!FirstVSetVLI) {
157  FirstVSetVLI = true;
158  unsigned FirstVType = MBBI->getOperand(2).getImm();
159  RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
160  FirstSEW = RISCVVType::getSEW(FirstVType);
161  // The first encountered vsetvli must have the same lmul as the
162  // register class of COPY.
163  if (FirstLMul != LMul)
164  return false;
165  }
166  // Only permit `vsetvli x0, x0, vtype` between COPY and the source
167  // define instruction.
168  if (MBBI->getOperand(0).getReg() != RISCV::X0)
169  return false;
170  if (MBBI->getOperand(1).isImm())
171  return false;
172  if (MBBI->getOperand(1).getReg() != RISCV::X0)
173  return false;
174  continue;
175  }
176 
177  // MBBI is the first vsetvli before the producing instruction.
178  unsigned VType = MBBI->getOperand(2).getImm();
179  // If there is a vsetvli between COPY and the producing instruction.
180  if (FirstVSetVLI) {
181  // If SEW is different, return false.
182  if (RISCVVType::getSEW(VType) != FirstSEW)
183  return false;
184  }
185 
186  // If the vsetvli is tail undisturbed, keep the whole register move.
187  if (!RISCVVType::isTailAgnostic(VType))
188  return false;
189 
190  // The checking is conservative. We only have register classes for
191  // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
192  // for fractional LMUL operations. However, we could not use the vsetvli
193  // lmul for widening operations. The result of widening operation is
194  // 2 x LMUL.
195  return LMul == RISCVVType::getVLMUL(VType);
196  } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
197  return false;
198  } else if (MBBI->getNumDefs()) {
199  // Check all the instructions which will change VL.
200  // For example, vleff has implicit def VL.
201  if (MBBI->modifiesRegister(RISCV::VL))
202  return false;
203 
204  // Only converting whole register copies to vmv.v.v when the defining
205  // value appears in the explicit operands.
206  for (const MachineOperand &MO : MBBI->explicit_operands()) {
207  if (!MO.isReg() || !MO.isDef())
208  continue;
209  if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
210  // We only permit the source of COPY has the same LMUL as the defined
211  // operand.
212  // There are cases we need to keep the whole register copy if the LMUL
213  // is different.
214  // For example,
215  // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
216  // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
217  // # The COPY may be created by vlmul_trunc intrinsic.
218  // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
219  //
220  // After widening, the valid value will be 4 x e32 elements. If we
221  // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
222  // FIXME: The COPY of subregister of Zvlsseg register will not be able
223  // to convert to vmv.v.[v|i] under the constraint.
224  if (MO.getReg() != SrcReg)
225  return false;
226 
227  // In widening reduction instructions with LMUL_1 input vector case,
228  // only checking the LMUL is insufficient due to reduction result is
229  // always LMUL_1.
230  // For example,
231  // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
232  // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
233  // $v26 = COPY killed renamable $v8
234  // After widening, The valid value will be 1 x e16 elements. If we
235  // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
236  uint64_t TSFlags = MBBI->getDesc().TSFlags;
237  if (RISCVII::isRVVWideningReduction(TSFlags))
238  return false;
239 
240  // Found the definition.
241  FoundDef = true;
242  DefMBBI = MBBI;
243  // If the producing instruction does not depend on vsetvli, do not
244  // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
245  if (!RISCVII::hasSEWOp(TSFlags))
246  return false;
247  break;
248  }
249  }
250  }
251  }
252 
253  return false;
254 }
255 
258  const DebugLoc &DL, MCRegister DstReg,
259  MCRegister SrcReg, bool KillSrc) const {
260  if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
261  BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
262  .addReg(SrcReg, getKillRegState(KillSrc))
263  .addImm(0);
264  return;
265  }
266 
267  // FPR->FPR copies and VR->VR copies.
268  unsigned Opc;
269  bool IsScalableVector = true;
270  unsigned NF = 1;
272  unsigned SubRegIdx = RISCV::sub_vrm1_0;
273  if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
274  Opc = RISCV::FSGNJ_H;
275  IsScalableVector = false;
276  } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
277  Opc = RISCV::FSGNJ_S;
278  IsScalableVector = false;
279  } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
280  Opc = RISCV::FSGNJ_D;
281  IsScalableVector = false;
282  } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
283  Opc = RISCV::PseudoVMV1R_V;
284  LMul = RISCVII::LMUL_1;
285  } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
286  Opc = RISCV::PseudoVMV2R_V;
287  LMul = RISCVII::LMUL_2;
288  } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
289  Opc = RISCV::PseudoVMV4R_V;
290  LMul = RISCVII::LMUL_4;
291  } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
292  Opc = RISCV::PseudoVMV8R_V;
293  LMul = RISCVII::LMUL_8;
294  } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
295  Opc = RISCV::PseudoVMV1R_V;
296  SubRegIdx = RISCV::sub_vrm1_0;
297  NF = 2;
298  LMul = RISCVII::LMUL_1;
299  } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
300  Opc = RISCV::PseudoVMV2R_V;
301  SubRegIdx = RISCV::sub_vrm2_0;
302  NF = 2;
303  LMul = RISCVII::LMUL_2;
304  } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
305  Opc = RISCV::PseudoVMV4R_V;
306  SubRegIdx = RISCV::sub_vrm4_0;
307  NF = 2;
308  LMul = RISCVII::LMUL_4;
309  } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
310  Opc = RISCV::PseudoVMV1R_V;
311  SubRegIdx = RISCV::sub_vrm1_0;
312  NF = 3;
313  LMul = RISCVII::LMUL_1;
314  } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
315  Opc = RISCV::PseudoVMV2R_V;
316  SubRegIdx = RISCV::sub_vrm2_0;
317  NF = 3;
318  LMul = RISCVII::LMUL_2;
319  } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
320  Opc = RISCV::PseudoVMV1R_V;
321  SubRegIdx = RISCV::sub_vrm1_0;
322  NF = 4;
323  LMul = RISCVII::LMUL_1;
324  } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
325  Opc = RISCV::PseudoVMV2R_V;
326  SubRegIdx = RISCV::sub_vrm2_0;
327  NF = 4;
328  LMul = RISCVII::LMUL_2;
329  } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
330  Opc = RISCV::PseudoVMV1R_V;
331  SubRegIdx = RISCV::sub_vrm1_0;
332  NF = 5;
333  LMul = RISCVII::LMUL_1;
334  } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
335  Opc = RISCV::PseudoVMV1R_V;
336  SubRegIdx = RISCV::sub_vrm1_0;
337  NF = 6;
338  LMul = RISCVII::LMUL_1;
339  } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
340  Opc = RISCV::PseudoVMV1R_V;
341  SubRegIdx = RISCV::sub_vrm1_0;
342  NF = 7;
343  LMul = RISCVII::LMUL_1;
344  } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
345  Opc = RISCV::PseudoVMV1R_V;
346  SubRegIdx = RISCV::sub_vrm1_0;
347  NF = 8;
348  LMul = RISCVII::LMUL_1;
349  } else {
350  llvm_unreachable("Impossible reg-to-reg copy");
351  }
352 
353  if (IsScalableVector) {
354  bool UseVMV_V_V = false;
356  unsigned DefExplicitOpNum;
357  unsigned VIOpc;
358  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
359  UseVMV_V_V = true;
360  DefExplicitOpNum = DefMBBI->getNumExplicitOperands();
361  // We only need to handle LMUL = 1/2/4/8 here because we only define
362  // vector register classes for LMUL = 1/2/4/8.
363  switch (LMul) {
364  default:
365  llvm_unreachable("Impossible LMUL for vector register copy.");
366  case RISCVII::LMUL_1:
367  Opc = RISCV::PseudoVMV_V_V_M1;
368  VIOpc = RISCV::PseudoVMV_V_I_M1;
369  break;
370  case RISCVII::LMUL_2:
371  Opc = RISCV::PseudoVMV_V_V_M2;
372  VIOpc = RISCV::PseudoVMV_V_I_M2;
373  break;
374  case RISCVII::LMUL_4:
375  Opc = RISCV::PseudoVMV_V_V_M4;
376  VIOpc = RISCV::PseudoVMV_V_I_M4;
377  break;
378  case RISCVII::LMUL_8:
379  Opc = RISCV::PseudoVMV_V_V_M8;
380  VIOpc = RISCV::PseudoVMV_V_I_M8;
381  break;
382  }
383  }
384 
385  bool UseVMV_V_I = false;
386  if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
387  UseVMV_V_I = true;
388  Opc = VIOpc;
389  }
390 
391  if (NF == 1) {
392  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
393  if (UseVMV_V_I)
394  MIB = MIB.add(DefMBBI->getOperand(1));
395  else
396  MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
397  if (UseVMV_V_V) {
398  // The last two arguments of vector instructions are
399  // AVL, SEW. We also need to append the implicit-use vl and vtype.
400  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
401  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
402  MIB.addReg(RISCV::VL, RegState::Implicit);
403  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
404  }
405  } else {
407 
408  int I = 0, End = NF, Incr = 1;
409  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
410  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
411  unsigned LMulVal;
412  bool Fractional;
413  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
414  assert(!Fractional && "It is impossible be fractional lmul here.");
415  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
416  I = NF - 1;
417  End = -1;
418  Incr = -1;
419  }
420 
421  for (; I != End; I += Incr) {
422  auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
423  TRI->getSubReg(DstReg, SubRegIdx + I));
424  if (UseVMV_V_I)
425  MIB = MIB.add(DefMBBI->getOperand(1));
426  else
427  MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
428  getKillRegState(KillSrc));
429  if (UseVMV_V_V) {
430  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL
431  MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW
432  MIB.addReg(RISCV::VL, RegState::Implicit);
433  MIB.addReg(RISCV::VTYPE, RegState::Implicit);
434  }
435  }
436  }
437  } else {
438  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
439  .addReg(SrcReg, getKillRegState(KillSrc))
440  .addReg(SrcReg, getKillRegState(KillSrc));
441  }
442 }
443 
446  Register SrcReg, bool IsKill, int FI,
447  const TargetRegisterClass *RC,
448  const TargetRegisterInfo *TRI) const {
449  DebugLoc DL;
450  if (I != MBB.end())
451  DL = I->getDebugLoc();
452 
453  MachineFunction *MF = MBB.getParent();
454  MachineFrameInfo &MFI = MF->getFrameInfo();
455 
456  unsigned Opcode;
457  bool IsScalableVector = true;
458  bool IsZvlsseg = true;
459  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
460  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
461  RISCV::SW : RISCV::SD;
462  IsScalableVector = false;
463  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
464  Opcode = RISCV::FSH;
465  IsScalableVector = false;
466  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
467  Opcode = RISCV::FSW;
468  IsScalableVector = false;
469  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
470  Opcode = RISCV::FSD;
471  IsScalableVector = false;
472  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
473  Opcode = RISCV::PseudoVSPILL_M1;
474  IsZvlsseg = false;
475  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
476  Opcode = RISCV::PseudoVSPILL_M2;
477  IsZvlsseg = false;
478  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
479  Opcode = RISCV::PseudoVSPILL_M4;
480  IsZvlsseg = false;
481  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
482  Opcode = RISCV::PseudoVSPILL_M8;
483  IsZvlsseg = false;
484  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
485  Opcode = RISCV::PseudoVSPILL2_M1;
486  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
487  Opcode = RISCV::PseudoVSPILL2_M2;
488  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
489  Opcode = RISCV::PseudoVSPILL2_M4;
490  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
491  Opcode = RISCV::PseudoVSPILL3_M1;
492  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
493  Opcode = RISCV::PseudoVSPILL3_M2;
494  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
495  Opcode = RISCV::PseudoVSPILL4_M1;
496  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
497  Opcode = RISCV::PseudoVSPILL4_M2;
498  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
499  Opcode = RISCV::PseudoVSPILL5_M1;
500  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
501  Opcode = RISCV::PseudoVSPILL6_M1;
502  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
503  Opcode = RISCV::PseudoVSPILL7_M1;
504  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
505  Opcode = RISCV::PseudoVSPILL8_M1;
506  else
507  llvm_unreachable("Can't store this register to stack slot");
508 
509  if (IsScalableVector) {
513 
515  auto MIB = BuildMI(MBB, I, DL, get(Opcode))
516  .addReg(SrcReg, getKillRegState(IsKill))
517  .addFrameIndex(FI)
518  .addMemOperand(MMO);
519  if (IsZvlsseg) {
520  // For spilling/reloading Zvlsseg registers, append the dummy field for
521  // the scaled vector length. The argument will be used when expanding
522  // these pseudo instructions.
523  MIB.addReg(RISCV::X0);
524  }
525  } else {
528  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
529 
530  BuildMI(MBB, I, DL, get(Opcode))
531  .addReg(SrcReg, getKillRegState(IsKill))
532  .addFrameIndex(FI)
533  .addImm(0)
534  .addMemOperand(MMO);
535  }
536 }
537 
540  Register DstReg, int FI,
541  const TargetRegisterClass *RC,
542  const TargetRegisterInfo *TRI) const {
543  DebugLoc DL;
544  if (I != MBB.end())
545  DL = I->getDebugLoc();
546 
547  MachineFunction *MF = MBB.getParent();
548  MachineFrameInfo &MFI = MF->getFrameInfo();
549 
550  unsigned Opcode;
551  bool IsScalableVector = true;
552  bool IsZvlsseg = true;
553  if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
554  Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
555  RISCV::LW : RISCV::LD;
556  IsScalableVector = false;
557  } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
558  Opcode = RISCV::FLH;
559  IsScalableVector = false;
560  } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
561  Opcode = RISCV::FLW;
562  IsScalableVector = false;
563  } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
564  Opcode = RISCV::FLD;
565  IsScalableVector = false;
566  } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
567  Opcode = RISCV::PseudoVRELOAD_M1;
568  IsZvlsseg = false;
569  } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
570  Opcode = RISCV::PseudoVRELOAD_M2;
571  IsZvlsseg = false;
572  } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
573  Opcode = RISCV::PseudoVRELOAD_M4;
574  IsZvlsseg = false;
575  } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
576  Opcode = RISCV::PseudoVRELOAD_M8;
577  IsZvlsseg = false;
578  } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
579  Opcode = RISCV::PseudoVRELOAD2_M1;
580  else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
581  Opcode = RISCV::PseudoVRELOAD2_M2;
582  else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
583  Opcode = RISCV::PseudoVRELOAD2_M4;
584  else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
585  Opcode = RISCV::PseudoVRELOAD3_M1;
586  else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
587  Opcode = RISCV::PseudoVRELOAD3_M2;
588  else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
589  Opcode = RISCV::PseudoVRELOAD4_M1;
590  else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
591  Opcode = RISCV::PseudoVRELOAD4_M2;
592  else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
593  Opcode = RISCV::PseudoVRELOAD5_M1;
594  else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
595  Opcode = RISCV::PseudoVRELOAD6_M1;
596  else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
597  Opcode = RISCV::PseudoVRELOAD7_M1;
598  else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
599  Opcode = RISCV::PseudoVRELOAD8_M1;
600  else
601  llvm_unreachable("Can't load this register from stack slot");
602 
603  if (IsScalableVector) {
607 
609  auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
610  .addFrameIndex(FI)
611  .addMemOperand(MMO);
612  if (IsZvlsseg) {
613  // For spilling/reloading Zvlsseg registers, append the dummy field for
614  // the scaled vector length. The argument will be used when expanding
615  // these pseudo instructions.
616  MIB.addReg(RISCV::X0);
617  }
618  } else {
621  MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
622 
623  BuildMI(MBB, I, DL, get(Opcode), DstReg)
624  .addFrameIndex(FI)
625  .addImm(0)
626  .addMemOperand(MMO);
627  }
628 }
629 
632  const DebugLoc &DL, Register DstReg, uint64_t Val,
633  MachineInstr::MIFlag Flag) const {
634  MachineFunction *MF = MBB.getParent();
636  Register SrcReg = RISCV::X0;
637  Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
638  unsigned Num = 0;
639 
640  if (!STI.is64Bit() && !isInt<32>(Val))
641  report_fatal_error("Should only materialize 32-bit constants for RV32");
642 
644  RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
645  assert(!Seq.empty());
646 
647  for (RISCVMatInt::Inst &Inst : Seq) {
648  // Write the final result to DstReg if it's the last instruction in the Seq.
649  // Otherwise, write the result to the temp register.
650  if (++Num == Seq.size())
651  Result = DstReg;
652 
653  if (Inst.Opc == RISCV::LUI) {
654  BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
655  .addImm(Inst.Imm)
656  .setMIFlag(Flag);
657  } else if (Inst.Opc == RISCV::ADDUW) {
658  BuildMI(MBB, MBBI, DL, get(RISCV::ADDUW), Result)
659  .addReg(SrcReg, RegState::Kill)
660  .addReg(RISCV::X0)
661  .setMIFlag(Flag);
662  } else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
663  Inst.Opc == RISCV::SH3ADD) {
664  BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
665  .addReg(SrcReg, RegState::Kill)
666  .addReg(SrcReg, RegState::Kill)
667  .setMIFlag(Flag);
668  } else {
669  BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
670  .addReg(SrcReg, RegState::Kill)
671  .addImm(Inst.Imm)
672  .setMIFlag(Flag);
673  }
674  // Only the first instruction has X0 as its source.
675  SrcReg = Result;
676  }
677 }
678 
680  switch (Opc) {
681  default:
682  return RISCVCC::COND_INVALID;
683  case RISCV::BEQ:
684  return RISCVCC::COND_EQ;
685  case RISCV::BNE:
686  return RISCVCC::COND_NE;
687  case RISCV::BLT:
688  return RISCVCC::COND_LT;
689  case RISCV::BGE:
690  return RISCVCC::COND_GE;
691  case RISCV::BLTU:
692  return RISCVCC::COND_LTU;
693  case RISCV::BGEU:
694  return RISCVCC::COND_GEU;
695  }
696 }
697 
698 // The contents of values added to Cond are not examined outside of
699 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
700 // push BranchOpcode, Reg1, Reg2.
703  // Block ends with fall-through condbranch.
704  assert(LastInst.getDesc().isConditionalBranch() &&
705  "Unknown conditional branch");
706  Target = LastInst.getOperand(2).getMBB();
707  unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
708  Cond.push_back(MachineOperand::CreateImm(CC));
709  Cond.push_back(LastInst.getOperand(0));
710  Cond.push_back(LastInst.getOperand(1));
711 }
712 
714  switch (CC) {
715  default:
716  llvm_unreachable("Unknown condition code!");
717  case RISCVCC::COND_EQ:
718  return get(RISCV::BEQ);
719  case RISCVCC::COND_NE:
720  return get(RISCV::BNE);
721  case RISCVCC::COND_LT:
722  return get(RISCV::BLT);
723  case RISCVCC::COND_GE:
724  return get(RISCV::BGE);
725  case RISCVCC::COND_LTU:
726  return get(RISCV::BLTU);
727  case RISCVCC::COND_GEU:
728  return get(RISCV::BGEU);
729  }
730 }
731 
733  switch (CC) {
734  default:
735  llvm_unreachable("Unrecognized conditional branch");
736  case RISCVCC::COND_EQ:
737  return RISCVCC::COND_NE;
738  case RISCVCC::COND_NE:
739  return RISCVCC::COND_EQ;
740  case RISCVCC::COND_LT:
741  return RISCVCC::COND_GE;
742  case RISCVCC::COND_GE:
743  return RISCVCC::COND_LT;
744  case RISCVCC::COND_LTU:
745  return RISCVCC::COND_GEU;
746  case RISCVCC::COND_GEU:
747  return RISCVCC::COND_LTU;
748  }
749 }
750 
752  MachineBasicBlock *&TBB,
753  MachineBasicBlock *&FBB,
755  bool AllowModify) const {
756  TBB = FBB = nullptr;
757  Cond.clear();
758 
759  // If the block has no terminators, it just falls into the block after it.
761  if (I == MBB.end() || !isUnpredicatedTerminator(*I))
762  return false;
763 
764  // Count the number of terminators and find the first unconditional or
765  // indirect branch.
766  MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
767  int NumTerminators = 0;
768  for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
769  J++) {
770  NumTerminators++;
771  if (J->getDesc().isUnconditionalBranch() ||
772  J->getDesc().isIndirectBranch()) {
773  FirstUncondOrIndirectBr = J.getReverse();
774  }
775  }
776 
777  // If AllowModify is true, we can erase any terminators after
778  // FirstUncondOrIndirectBR.
779  if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
780  while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
781  std::next(FirstUncondOrIndirectBr)->eraseFromParent();
782  NumTerminators--;
783  }
784  I = FirstUncondOrIndirectBr;
785  }
786 
787  // We can't handle blocks that end in an indirect branch.
788  if (I->getDesc().isIndirectBranch())
789  return true;
790 
791  // We can't handle blocks with more than 2 terminators.
792  if (NumTerminators > 2)
793  return true;
794 
795  // Handle a single unconditional branch.
796  if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
797  TBB = getBranchDestBlock(*I);
798  return false;
799  }
800 
801  // Handle a single conditional branch.
802  if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
803  parseCondBranch(*I, TBB, Cond);
804  return false;
805  }
806 
807  // Handle a conditional branch followed by an unconditional branch.
808  if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
809  I->getDesc().isUnconditionalBranch()) {
810  parseCondBranch(*std::prev(I), TBB, Cond);
811  FBB = getBranchDestBlock(*I);
812  return false;
813  }
814 
815  // Otherwise, we can't handle this.
816  return true;
817 }
818 
820  int *BytesRemoved) const {
821  if (BytesRemoved)
822  *BytesRemoved = 0;
824  if (I == MBB.end())
825  return 0;
826 
827  if (!I->getDesc().isUnconditionalBranch() &&
828  !I->getDesc().isConditionalBranch())
829  return 0;
830 
831  // Remove the branch.
832  if (BytesRemoved)
833  *BytesRemoved += getInstSizeInBytes(*I);
834  I->eraseFromParent();
835 
836  I = MBB.end();
837 
838  if (I == MBB.begin())
839  return 1;
840  --I;
841  if (!I->getDesc().isConditionalBranch())
842  return 1;
843 
844  // Remove the branch.
845  if (BytesRemoved)
846  *BytesRemoved += getInstSizeInBytes(*I);
847  I->eraseFromParent();
848  return 2;
849 }
850 
851 // Inserts a branch into the end of the specific MachineBasicBlock, returning
852 // the number of instructions inserted.
855  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
856  if (BytesAdded)
857  *BytesAdded = 0;
858 
859  // Shouldn't be a fall through.
860  assert(TBB && "insertBranch must not be told to insert a fallthrough");
861  assert((Cond.size() == 3 || Cond.size() == 0) &&
862  "RISCV branch conditions have two components!");
863 
864  // Unconditional branch.
865  if (Cond.empty()) {
866  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
867  if (BytesAdded)
868  *BytesAdded += getInstSizeInBytes(MI);
869  return 1;
870  }
871 
872  // Either a one or two-way conditional branch.
873  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
874  MachineInstr &CondMI =
875  *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
876  if (BytesAdded)
877  *BytesAdded += getInstSizeInBytes(CondMI);
878 
879  // One-way conditional branch.
880  if (!FBB)
881  return 1;
882 
883  // Two-way conditional branch.
884  MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
885  if (BytesAdded)
886  *BytesAdded += getInstSizeInBytes(MI);
887  return 2;
888 }
889 
891  MachineBasicBlock &DestBB,
892  MachineBasicBlock &RestoreBB,
893  const DebugLoc &DL, int64_t BrOffset,
894  RegScavenger *RS) const {
895  assert(RS && "RegScavenger required for long branching");
896  assert(MBB.empty() &&
897  "new block should be inserted for expanding unconditional branch");
898  assert(MBB.pred_size() == 1);
899 
900  MachineFunction *MF = MBB.getParent();
902 
903  if (!isInt<32>(BrOffset))
905  "Branch offsets outside of the signed 32-bit range not supported");
906 
907  // FIXME: A virtual register must be used initially, as the register
908  // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
909  // uses the same workaround).
910  Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
911  auto II = MBB.end();
912 
913  MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
914  .addReg(ScratchReg, RegState::Define | RegState::Dead)
915  .addMBB(&DestBB, RISCVII::MO_CALL);
916 
917  RS->enterBasicBlockEnd(MBB);
918  unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
919  MI.getIterator(), false, 0);
920  // TODO: The case when there is no scavenged register needs special handling.
921  assert(Scav != RISCV::NoRegister && "No register is scavenged!");
922  MRI.replaceRegWith(ScratchReg, Scav);
923  MRI.clearVirtRegs();
924  RS->setRegUsed(Scav);
925 }
926 
929  assert((Cond.size() == 3) && "Invalid branch condition!");
930  auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
931  Cond[0].setImm(getOppositeBranchCondition(CC));
932  return false;
933 }
934 
937  assert(MI.getDesc().isBranch() && "Unexpected opcode!");
938  // The branch target is always the last operand.
939  int NumOp = MI.getNumExplicitOperands();
940  return MI.getOperand(NumOp - 1).getMBB();
941 }
942 
944  int64_t BrOffset) const {
945  unsigned XLen = STI.getXLen();
946  // Ideally we could determine the supported branch offset from the
947  // RISCVII::FormMask, but this can't be used for Pseudo instructions like
948  // PseudoBR.
949  switch (BranchOp) {
950  default:
951  llvm_unreachable("Unexpected opcode!");
952  case RISCV::BEQ:
953  case RISCV::BNE:
954  case RISCV::BLT:
955  case RISCV::BGE:
956  case RISCV::BLTU:
957  case RISCV::BGEU:
958  return isIntN(13, BrOffset);
959  case RISCV::JAL:
960  case RISCV::PseudoBR:
961  return isIntN(21, BrOffset);
962  case RISCV::PseudoJump:
963  return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
964  }
965 }
966 
968  unsigned Opcode = MI.getOpcode();
969 
970  switch (Opcode) {
971  default: {
972  if (MI.getParent() && MI.getParent()->getParent()) {
973  const auto MF = MI.getMF();
974  const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
975  const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
976  const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
977  const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
978  if (isCompressibleInst(MI, &ST, MRI, STI))
979  return 2;
980  }
981  return get(Opcode).getSize();
982  }
984  case TargetOpcode::IMPLICIT_DEF:
985  case TargetOpcode::KILL:
986  case TargetOpcode::DBG_VALUE:
987  return 0;
988  // These values are determined based on RISCVExpandAtomicPseudoInsts,
989  // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
990  // pseudos are expanded.
991  case RISCV::PseudoCALLReg:
992  case RISCV::PseudoCALL:
993  case RISCV::PseudoJump:
994  case RISCV::PseudoTAIL:
995  case RISCV::PseudoLLA:
996  case RISCV::PseudoLA:
997  case RISCV::PseudoLA_TLS_IE:
998  case RISCV::PseudoLA_TLS_GD:
999  return 8;
1000  case RISCV::PseudoAtomicLoadNand32:
1001  case RISCV::PseudoAtomicLoadNand64:
1002  return 20;
1003  case RISCV::PseudoMaskedAtomicSwap32:
1004  case RISCV::PseudoMaskedAtomicLoadAdd32:
1005  case RISCV::PseudoMaskedAtomicLoadSub32:
1006  return 28;
1007  case RISCV::PseudoMaskedAtomicLoadNand32:
1008  return 32;
1009  case RISCV::PseudoMaskedAtomicLoadMax32:
1010  case RISCV::PseudoMaskedAtomicLoadMin32:
1011  return 44;
1012  case RISCV::PseudoMaskedAtomicLoadUMax32:
1013  case RISCV::PseudoMaskedAtomicLoadUMin32:
1014  return 36;
1015  case RISCV::PseudoCmpXchg32:
1016  case RISCV::PseudoCmpXchg64:
1017  return 16;
1018  case RISCV::PseudoMaskedCmpXchg32:
1019  return 32;
1022  const MachineFunction &MF = *MI.getParent()->getParent();
1023  const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1024  return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1025  *TM.getMCAsmInfo());
1026  }
1027  case RISCV::PseudoVSPILL2_M1:
1028  case RISCV::PseudoVSPILL2_M2:
1029  case RISCV::PseudoVSPILL2_M4:
1030  case RISCV::PseudoVSPILL3_M1:
1031  case RISCV::PseudoVSPILL3_M2:
1032  case RISCV::PseudoVSPILL4_M1:
1033  case RISCV::PseudoVSPILL4_M2:
1034  case RISCV::PseudoVSPILL5_M1:
1035  case RISCV::PseudoVSPILL6_M1:
1036  case RISCV::PseudoVSPILL7_M1:
1037  case RISCV::PseudoVSPILL8_M1:
1038  case RISCV::PseudoVRELOAD2_M1:
1039  case RISCV::PseudoVRELOAD2_M2:
1040  case RISCV::PseudoVRELOAD2_M4:
1041  case RISCV::PseudoVRELOAD3_M1:
1042  case RISCV::PseudoVRELOAD3_M2:
1043  case RISCV::PseudoVRELOAD4_M1:
1044  case RISCV::PseudoVRELOAD4_M2:
1045  case RISCV::PseudoVRELOAD5_M1:
1046  case RISCV::PseudoVRELOAD6_M1:
1047  case RISCV::PseudoVRELOAD7_M1:
1048  case RISCV::PseudoVRELOAD8_M1: {
1049  // The values are determined based on expandVSPILL and expandVRELOAD that
1050  // expand the pseudos depending on NF.
1051  unsigned NF = isRVVSpillForZvlsseg(Opcode)->first;
1052  return 4 * (2 * NF - 1);
1053  }
1054  }
1055 }
1056 
1058  const unsigned Opcode = MI.getOpcode();
1059  switch (Opcode) {
1060  default:
1061  break;
1062  case RISCV::FSGNJ_D:
1063  case RISCV::FSGNJ_S:
1064  case RISCV::FSGNJ_H:
1065  // The canonical floating-point move is fsgnj rd, rs, rs.
1066  return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1067  MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1068  case RISCV::ADDI:
1069  case RISCV::ORI:
1070  case RISCV::XORI:
1071  return (MI.getOperand(1).isReg() &&
1072  MI.getOperand(1).getReg() == RISCV::X0) ||
1073  (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1074  }
1075  return MI.isAsCheapAsAMove();
1076 }
1077 
1080  if (MI.isMoveReg())
1081  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1082  switch (MI.getOpcode()) {
1083  default:
1084  break;
1085  case RISCV::ADDI:
1086  // Operand 1 can be a frameindex but callers expect registers
1087  if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1088  MI.getOperand(2).getImm() == 0)
1089  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1090  break;
1091  case RISCV::FSGNJ_D:
1092  case RISCV::FSGNJ_S:
1093  case RISCV::FSGNJ_H:
1094  // The canonical floating-point move is fsgnj rd, rs, rs.
1095  if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1096  MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1097  return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1098  break;
1099  }
1100  return None;
1101 }
1102 
1104  StringRef &ErrInfo) const {
1105  const MCInstrInfo *MCII = STI.getInstrInfo();
1106  MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
1107 
1108  for (auto &OI : enumerate(Desc.operands())) {
1109  unsigned OpType = OI.value().OperandType;
1110  if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1111  OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1112  const MachineOperand &MO = MI.getOperand(OI.index());
1113  if (MO.isImm()) {
1114  int64_t Imm = MO.getImm();
1115  bool Ok;
1116  switch (OpType) {
1117  default:
1118  llvm_unreachable("Unexpected operand type");
1120  Ok = isUInt<2>(Imm);
1121  break;
1123  Ok = isUInt<3>(Imm);
1124  break;
1126  Ok = isUInt<4>(Imm);
1127  break;
1129  Ok = isUInt<5>(Imm);
1130  break;
1132  Ok = isUInt<7>(Imm);
1133  break;
1135  Ok = isUInt<12>(Imm);
1136  break;
1138  Ok = isInt<12>(Imm);
1139  break;
1141  Ok = isUInt<20>(Imm);
1142  break;
1144  if (STI.getTargetTriple().isArch64Bit())
1145  Ok = isUInt<6>(Imm);
1146  else
1147  Ok = isUInt<5>(Imm);
1148  break;
1149  }
1150  if (!Ok) {
1151  ErrInfo = "Invalid immediate";
1152  return false;
1153  }
1154  }
1155  }
1156  }
1157 
1158  return true;
1159 }
1160 
1161 // Return true if get the base operand, byte offset of an instruction and the
1162 // memory width. Width is the size of memory that is being loaded/stored.
1164  const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1165  unsigned &Width, const TargetRegisterInfo *TRI) const {
1166  if (!LdSt.mayLoadOrStore())
1167  return false;
1168 
1169  // Here we assume the standard RISC-V ISA, which uses a base+offset
1170  // addressing mode. You'll need to relax these conditions to support custom
1171  // load/stores instructions.
1172  if (LdSt.getNumExplicitOperands() != 3)
1173  return false;
1174  if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1175  return false;
1176 
1177  if (!LdSt.hasOneMemOperand())
1178  return false;
1179 
1180  Width = (*LdSt.memoperands_begin())->getSize();
1181  BaseReg = &LdSt.getOperand(1);
1182  Offset = LdSt.getOperand(2).getImm();
1183  return true;
1184 }
1185 
1187  const MachineInstr &MIa, const MachineInstr &MIb) const {
1188  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1189  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1190 
1191  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1193  return false;
1194 
1195  // Retrieve the base register, offset from the base register and width. Width
1196  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
1197  // base registers are identical, and the offset of a lower memory access +
1198  // the width doesn't overlap the offset of a higher memory access,
1199  // then the memory accesses are different.
1201  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1202  int64_t OffsetA = 0, OffsetB = 0;
1203  unsigned int WidthA = 0, WidthB = 0;
1204  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1205  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1206  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1207  int LowOffset = std::min(OffsetA, OffsetB);
1208  int HighOffset = std::max(OffsetA, OffsetB);
1209  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1210  if (LowOffset + LowWidth <= HighOffset)
1211  return true;
1212  }
1213  }
1214  return false;
1215 }
1216 
1217 std::pair<unsigned, unsigned>
1219  const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1220  return std::make_pair(TF & Mask, TF & ~Mask);
1221 }
1222 
1225  using namespace RISCVII;
1226  static const std::pair<unsigned, const char *> TargetFlags[] = {
1227  {MO_CALL, "riscv-call"},
1228  {MO_PLT, "riscv-plt"},
1229  {MO_LO, "riscv-lo"},
1230  {MO_HI, "riscv-hi"},
1231  {MO_PCREL_LO, "riscv-pcrel-lo"},
1232  {MO_PCREL_HI, "riscv-pcrel-hi"},
1233  {MO_GOT_HI, "riscv-got-hi"},
1234  {MO_TPREL_LO, "riscv-tprel-lo"},
1235  {MO_TPREL_HI, "riscv-tprel-hi"},
1236  {MO_TPREL_ADD, "riscv-tprel-add"},
1237  {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1238  {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1239  return makeArrayRef(TargetFlags);
1240 }
1242  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1243  const Function &F = MF.getFunction();
1244 
1245  // Can F be deduplicated by the linker? If it can, don't outline from it.
1246  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1247  return false;
1248 
1249  // Don't outline from functions with section markings; the program could
1250  // expect that all the code is in the named section.
1251  if (F.hasSection())
1252  return false;
1253 
1254  // It's safe to outline from MF.
1255  return true;
1256 }
1257 
1259  unsigned &Flags) const {
1260  // More accurate safety checking is done in getOutliningCandidateInfo.
1262 }
1263 
1264 // Enum values indicating how an outlined call should be constructed.
1267 };
1268 
1270  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1271 
1272  // First we need to filter out candidates where the X5 register (IE t0) can't
1273  // be used to setup the function call.
1274  auto CannotInsertCall = [](outliner::Candidate &C) {
1275  const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1276 
1277  C.initLRU(*TRI);
1278  LiveRegUnits LRU = C.LRU;
1279  return !LRU.available(RISCV::X5);
1280  };
1281 
1282  llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1283 
1284  // If the sequence doesn't have enough candidates left, then we're done.
1285  if (RepeatedSequenceLocs.size() < 2)
1286  return outliner::OutlinedFunction();
1287 
1288  unsigned SequenceSize = 0;
1289 
1290  auto I = RepeatedSequenceLocs[0].front();
1291  auto E = std::next(RepeatedSequenceLocs[0].back());
1292  for (; I != E; ++I)
1293  SequenceSize += getInstSizeInBytes(*I);
1294 
1295  // call t0, function = 8 bytes.
1296  unsigned CallOverhead = 8;
1297  for (auto &C : RepeatedSequenceLocs)
1298  C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1299 
1300  // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1301  unsigned FrameOverhead = 4;
1302  if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1303  .getFeatureBits()[RISCV::FeatureStdExtC])
1304  FrameOverhead = 2;
1305 
1306  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1307  FrameOverhead, MachineOutlinerDefault);
1308 }
1309 
1312  unsigned Flags) const {
1313  MachineInstr &MI = *MBBI;
1314  MachineBasicBlock *MBB = MI.getParent();
1315  const TargetRegisterInfo *TRI =
1317 
1318  // Positions generally can't safely be outlined.
1319  if (MI.isPosition()) {
1320  // We can manually strip out CFI instructions later.
1321  if (MI.isCFIInstruction())
1323 
1325  }
1326 
1327  // Don't trust the user to write safe inline assembly.
1328  if (MI.isInlineAsm())
1330 
1331  // We can't outline branches to other basic blocks.
1332  if (MI.isTerminator() && !MBB->succ_empty())
1334 
1335  // We need support for tail calls to outlined functions before return
1336  // statements can be allowed.
1337  if (MI.isReturn())
1339 
1340  // Don't allow modifying the X5 register which we use for return addresses for
1341  // these outlined functions.
1342  if (MI.modifiesRegister(RISCV::X5, TRI) ||
1343  MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1345 
1346  // Make sure the operands don't reference something unsafe.
1347  for (const auto &MO : MI.operands())
1348  if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1350 
1351  // Don't allow instructions which won't be materialized to impact outlining
1352  // analysis.
1353  if (MI.isMetaInstruction())
1355 
1357 }
1358 
1361  const outliner::OutlinedFunction &OF) const {
1362 
1363  // Strip out any CFI instructions
1364  bool Changed = true;
1365  while (Changed) {
1366  Changed = false;
1367  auto I = MBB.begin();
1368  auto E = MBB.end();
1369  for (; I != E; ++I) {
1370  if (I->isCFIInstruction()) {
1371  I->removeFromParent();
1372  Changed = true;
1373  break;
1374  }
1375  }
1376  }
1377 
1378  MBB.addLiveIn(RISCV::X5);
1379 
1380  // Add in a return instruction to the end of the outlined frame.
1381  MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1382  .addReg(RISCV::X0, RegState::Define)
1383  .addReg(RISCV::X5)
1384  .addImm(0));
1385 }
1386 
1389  MachineFunction &MF, const outliner::Candidate &C) const {
1390 
1391  // Add in a call instruction to the outlined function at the given location.
1392  It = MBB.insert(It,
1393  BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1394  .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1395  RISCVII::MO_CALL));
1396  return It;
1397 }
1398 
1399 // clang-format off
1400 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1401  RISCV::PseudoV##OP##_##TYPE##_##LMUL
1402 
1403 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1404  CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1405  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1406  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1407  case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1408 
1409 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1410  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1411  case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1412 
1413 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1414  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1415  case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1416 
1417 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1418  CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1419  case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1420 
1421 #define CASE_VFMA_SPLATS(OP) \
1422  CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1423  case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1424  case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1425 // clang-format on
1426 
1428  unsigned &SrcOpIdx1,
1429  unsigned &SrcOpIdx2) const {
1430  const MCInstrDesc &Desc = MI.getDesc();
1431  if (!Desc.isCommutable())
1432  return false;
1433 
1434  switch (MI.getOpcode()) {
1435  case CASE_VFMA_SPLATS(FMADD):
1436  case CASE_VFMA_SPLATS(FMSUB):
1437  case CASE_VFMA_SPLATS(FMACC):
1438  case CASE_VFMA_SPLATS(FMSAC):
1439  case CASE_VFMA_SPLATS(FNMADD):
1440  case CASE_VFMA_SPLATS(FNMSUB):
1441  case CASE_VFMA_SPLATS(FNMACC):
1442  case CASE_VFMA_SPLATS(FNMSAC):
1443  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1444  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1445  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1446  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1447  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1448  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1449  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1450  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1451  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1452  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1453  // If the tail policy is undisturbed we can't commute.
1454  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1455  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1456  return false;
1457 
1458  // For these instructions we can only swap operand 1 and operand 3 by
1459  // changing the opcode.
1460  unsigned CommutableOpIdx1 = 1;
1461  unsigned CommutableOpIdx2 = 3;
1462  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1463  CommutableOpIdx2))
1464  return false;
1465  return true;
1466  }
1467  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1471  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1472  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1473  // If the tail policy is undisturbed we can't commute.
1474  assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1475  if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1476  return false;
1477 
1478  // For these instructions we have more freedom. We can commute with the
1479  // other multiplicand or with the addend/subtrahend/minuend.
1480 
1481  // Any fixed operand must be from source 1, 2 or 3.
1482  if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1483  return false;
1484  if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1485  return false;
1486 
1487  // It both ops are fixed one must be the tied source.
1488  if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1489  SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1490  return false;
1491 
1492  // Look for two different register operands assumed to be commutable
1493  // regardless of the FMA opcode. The FMA opcode is adjusted later if
1494  // needed.
1495  if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1496  SrcOpIdx2 == CommuteAnyOperandIndex) {
1497  // At least one of operands to be commuted is not specified and
1498  // this method is free to choose appropriate commutable operands.
1499  unsigned CommutableOpIdx1 = SrcOpIdx1;
1500  if (SrcOpIdx1 == SrcOpIdx2) {
1501  // Both of operands are not fixed. Set one of commutable
1502  // operands to the tied source.
1503  CommutableOpIdx1 = 1;
1504  } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1505  // Only one of the operands is not fixed.
1506  CommutableOpIdx1 = SrcOpIdx2;
1507  }
1508 
1509  // CommutableOpIdx1 is well defined now. Let's choose another commutable
1510  // operand and assign its index to CommutableOpIdx2.
1511  unsigned CommutableOpIdx2;
1512  if (CommutableOpIdx1 != 1) {
1513  // If we haven't already used the tied source, we must use it now.
1514  CommutableOpIdx2 = 1;
1515  } else {
1516  Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1517 
1518  // The commuted operands should have different registers.
1519  // Otherwise, the commute transformation does not change anything and
1520  // is useless. We use this as a hint to make our decision.
1521  if (Op1Reg != MI.getOperand(2).getReg())
1522  CommutableOpIdx2 = 2;
1523  else
1524  CommutableOpIdx2 = 3;
1525  }
1526 
1527  // Assign the found pair of commutable indices to SrcOpIdx1 and
1528  // SrcOpIdx2 to return those values.
1529  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1530  CommutableOpIdx2))
1531  return false;
1532  }
1533 
1534  return true;
1535  }
1536  }
1537 
1538  return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1539 }
1540 
1541 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1542  case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1543  Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1544  break;
1545 
1546 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1547  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1548  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1549  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1550  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1551 
1552 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1553  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1554  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1555 
1556 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1557  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1558  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1559 
1560 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1561  CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1562  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1563 
1564 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1565  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1566  CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1567  CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1568 
1570  bool NewMI,
1571  unsigned OpIdx1,
1572  unsigned OpIdx2) const {
1573  auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1574  if (NewMI)
1575  return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1576  return MI;
1577  };
1578 
1579  switch (MI.getOpcode()) {
1580  case CASE_VFMA_SPLATS(FMACC):
1581  case CASE_VFMA_SPLATS(FMADD):
1582  case CASE_VFMA_SPLATS(FMSAC):
1583  case CASE_VFMA_SPLATS(FMSUB):
1584  case CASE_VFMA_SPLATS(FNMACC):
1585  case CASE_VFMA_SPLATS(FNMADD):
1586  case CASE_VFMA_SPLATS(FNMSAC):
1587  case CASE_VFMA_SPLATS(FNMSUB):
1588  case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
1589  case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
1590  case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
1591  case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
1592  case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1593  case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1594  case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1595  case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1596  case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1597  case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1598  // It only make sense to toggle these between clobbering the
1599  // addend/subtrahend/minuend one of the multiplicands.
1600  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1601  assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1602  unsigned Opc;
1603  switch (MI.getOpcode()) {
1604  default:
1605  llvm_unreachable("Unexpected opcode");
1606  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1607  CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1614  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
1618  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1619  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1620  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1621  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1622  CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1623  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1624  }
1625 
1626  auto &WorkingMI = cloneIfNew(MI);
1627  WorkingMI.setDesc(get(Opc));
1628  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1629  OpIdx1, OpIdx2);
1630  }
1631  case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
1635  case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1636  case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1637  assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1638  // If one of the operands, is the addend we need to change opcode.
1639  // Otherwise we're just swapping 2 of the multiplicands.
1640  if (OpIdx1 == 3 || OpIdx2 == 3) {
1641  unsigned Opc;
1642  switch (MI.getOpcode()) {
1643  default:
1644  llvm_unreachable("Unexpected opcode");
1645  CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
1649  CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1650  CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1651  }
1652 
1653  auto &WorkingMI = cloneIfNew(MI);
1654  WorkingMI.setDesc(get(Opc));
1655  return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1656  OpIdx1, OpIdx2);
1657  }
1658  // Let the default code handle it.
1659  break;
1660  }
1661  }
1662 
1663  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1664 }
1665 
1666 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1667 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1668 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1669 #undef CASE_VFMA_SPLATS
1670 #undef CASE_VFMA_OPCODE_LMULS
1671 #undef CASE_VFMA_OPCODE_COMMON
1672 
1673 // clang-format off
1674 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1675  RISCV::PseudoV##OP##_##LMUL##_TIED
1676 
1677 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1678  CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1679  case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1680  case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1681  case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1682  case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1683 
1684 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1685  CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1686  case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1687 // clang-format on
1688 
1689 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1690  case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1691  NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1692  break;
1693 
1694 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1695  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1696  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1697  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1698  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1699  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1700 
1701 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1702  CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1703  CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1704 
1706  LiveVariables *LV,
1707  LiveIntervals *LIS) const {
1708  switch (MI.getOpcode()) {
1709  default:
1710  break;
1711  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
1712  case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
1713  case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1714  case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1715  case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1716  case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1717  // clang-format off
1718  unsigned NewOpc;
1719  switch (MI.getOpcode()) {
1720  default:
1721  llvm_unreachable("Unexpected opcode");
1728  }
1729  //clang-format on
1730 
1731  MachineBasicBlock &MBB = *MI.getParent();
1732  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
1733  .add(MI.getOperand(0))
1734  .add(MI.getOperand(1))
1735  .add(MI.getOperand(2))
1736  .add(MI.getOperand(3))
1737  .add(MI.getOperand(4));
1738  MIB.copyImplicitOps(MI);
1739 
1740  if (LV) {
1741  unsigned NumOps = MI.getNumOperands();
1742  for (unsigned I = 1; I < NumOps; ++I) {
1743  MachineOperand &Op = MI.getOperand(I);
1744  if (Op.isReg() && Op.isKill())
1745  LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1746  }
1747  }
1748 
1749  if (LIS) {
1750  SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1751 
1752  if (MI.getOperand(0).isEarlyClobber()) {
1753  // Use operand 1 was tied to early-clobber def operand 0, so its live
1754  // interval could have ended at an early-clobber slot. Now they are not
1755  // tied we need to update it to the normal register slot.
1756  LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
1758  if (S->end == Idx.getRegSlot(true))
1759  S->end = Idx.getRegSlot();
1760  }
1761  }
1762 
1763  return MIB;
1764  }
1765  }
1766 
1767  return nullptr;
1768 }
1769 
1770 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1771 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1772 #undef CASE_WIDEOP_OPCODE_LMULS
1773 #undef CASE_WIDEOP_OPCODE_COMMON
1774 
1778  const DebugLoc &DL,
1779  int64_t Amount,
1780  MachineInstr::MIFlag Flag) const {
1781  assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1782  assert(Amount % 8 == 0 &&
1783  "Reserve the stack by the multiple of one vector size.");
1784 
1786  const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1787  int64_t NumOfVReg = Amount / 8;
1788 
1789  Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1790  BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL)
1791  .setMIFlag(Flag);
1792  assert(isInt<32>(NumOfVReg) &&
1793  "Expect the number of vector registers within 32-bits.");
1794  if (isPowerOf2_32(NumOfVReg)) {
1795  uint32_t ShiftAmount = Log2_32(NumOfVReg);
1796  if (ShiftAmount == 0)
1797  return VL;
1798  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
1799  .addReg(VL, RegState::Kill)
1800  .addImm(ShiftAmount)
1801  .setMIFlag(Flag);
1802  } else if (isPowerOf2_32(NumOfVReg - 1)) {
1803  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1804  uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1805  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1806  .addReg(VL)
1807  .addImm(ShiftAmount)
1808  .setMIFlag(Flag);
1809  BuildMI(MBB, II, DL, TII->get(RISCV::ADD), VL)
1810  .addReg(ScaledRegister, RegState::Kill)
1811  .addReg(VL, RegState::Kill)
1812  .setMIFlag(Flag);
1813  } else if (isPowerOf2_32(NumOfVReg + 1)) {
1814  Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1815  uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1816  BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1817  .addReg(VL)
1818  .addImm(ShiftAmount)
1819  .setMIFlag(Flag);
1820  BuildMI(MBB, II, DL, TII->get(RISCV::SUB), VL)
1821  .addReg(ScaledRegister, RegState::Kill)
1822  .addReg(VL, RegState::Kill)
1823  .setMIFlag(Flag);
1824  } else {
1825  Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1826  if (!isInt<12>(NumOfVReg))
1827  movImm(MBB, II, DL, N, NumOfVReg);
1828  else {
1829  BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), N)
1830  .addReg(RISCV::X0)
1831  .addImm(NumOfVReg)
1832  .setMIFlag(Flag);
1833  }
1834  if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtM())
1836  MF.getFunction(),
1837  "M-extension must be enabled to calculate the vscaled size/offset."});
1838  BuildMI(MBB, II, DL, TII->get(RISCV::MUL), VL)
1839  .addReg(VL, RegState::Kill)
1841  .setMIFlag(Flag);
1842  }
1843 
1844  return VL;
1845 }
1846 
1847 static bool isRVVWholeLoadStore(unsigned Opcode) {
1848  switch (Opcode) {
1849  default:
1850  return false;
1851  case RISCV::VS1R_V:
1852  case RISCV::VS2R_V:
1853  case RISCV::VS4R_V:
1854  case RISCV::VS8R_V:
1855  case RISCV::VL1RE8_V:
1856  case RISCV::VL2RE8_V:
1857  case RISCV::VL4RE8_V:
1858  case RISCV::VL8RE8_V:
1859  case RISCV::VL1RE16_V:
1860  case RISCV::VL2RE16_V:
1861  case RISCV::VL4RE16_V:
1862  case RISCV::VL8RE16_V:
1863  case RISCV::VL1RE32_V:
1864  case RISCV::VL2RE32_V:
1865  case RISCV::VL4RE32_V:
1866  case RISCV::VL8RE32_V:
1867  case RISCV::VL1RE64_V:
1868  case RISCV::VL2RE64_V:
1869  case RISCV::VL4RE64_V:
1870  case RISCV::VL8RE64_V:
1871  return true;
1872  }
1873 }
1874 
1875 bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
1876  // RVV lacks any support for immediate addressing for stack addresses, so be
1877  // conservative.
1878  unsigned Opcode = MI.getOpcode();
1879  if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1880  !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1881  return false;
1882  return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1883  return MO.isFI();
1884  });
1885 }
1886 
1889  switch (Opcode) {
1890  default:
1891  return None;
1892  case RISCV::PseudoVSPILL2_M1:
1893  case RISCV::PseudoVRELOAD2_M1:
1894  return std::make_pair(2u, 1u);
1895  case RISCV::PseudoVSPILL2_M2:
1896  case RISCV::PseudoVRELOAD2_M2:
1897  return std::make_pair(2u, 2u);
1898  case RISCV::PseudoVSPILL2_M4:
1899  case RISCV::PseudoVRELOAD2_M4:
1900  return std::make_pair(2u, 4u);
1901  case RISCV::PseudoVSPILL3_M1:
1902  case RISCV::PseudoVRELOAD3_M1:
1903  return std::make_pair(3u, 1u);
1904  case RISCV::PseudoVSPILL3_M2:
1905  case RISCV::PseudoVRELOAD3_M2:
1906  return std::make_pair(3u, 2u);
1907  case RISCV::PseudoVSPILL4_M1:
1908  case RISCV::PseudoVRELOAD4_M1:
1909  return std::make_pair(4u, 1u);
1910  case RISCV::PseudoVSPILL4_M2:
1911  case RISCV::PseudoVRELOAD4_M2:
1912  return std::make_pair(4u, 2u);
1913  case RISCV::PseudoVSPILL5_M1:
1914  case RISCV::PseudoVRELOAD5_M1:
1915  return std::make_pair(5u, 1u);
1916  case RISCV::PseudoVSPILL6_M1:
1917  case RISCV::PseudoVRELOAD6_M1:
1918  return std::make_pair(6u, 1u);
1919  case RISCV::PseudoVSPILL7_M1:
1920  case RISCV::PseudoVRELOAD7_M1:
1921  return std::make_pair(7u, 1u);
1922  case RISCV::PseudoVSPILL8_M1:
1923  case RISCV::PseudoVRELOAD8_M1:
1924  return std::make_pair(8u, 1u);
1925  }
1926 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:101
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:20
llvm::RISCVII::isRVVWideningReduction
static bool isRVVWideningReduction(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:155
llvm::RISCVInstrInfo::reverseBranchCondition
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Definition: RISCVInstrInfo.cpp:927
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
llvm::RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Definition: RISCVInstrInfo.cpp:1224
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
llvm::MachineInstrBuilder::copyImplicitOps
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
Definition: MachineInstrBuilder.h:315
llvm::HexagonMCInstrInfo::getDesc
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
Definition: HexagonMCInstrInfo.cpp:255
llvm::RISCVInstrInfo::RISCVInstrInfo
RISCVInstrInfo(RISCVSubtarget &STI)
Definition: RISCVInstrInfo.cpp:56
llvm::RISCVInstrInfo::getBrCond
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
Definition: RISCVInstrInfo.cpp:713
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
PreferWholeRegisterMove
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
llvm::RISCVCC::COND_GEU
@ COND_GEU
Definition: RISCVInstrInfo.h:36
llvm::RISCVII::MO_TLS_GOT_HI
@ MO_TLS_GOT_HI
Definition: RISCVBaseInfo.h:172
llvm::RISCVOp::OPERAND_SIMM12
@ OPERAND_SIMM12
Definition: RISCVBaseInfo.h:191
llvm::RISCVCC::getOppositeBranchCondition
CondCode getOppositeBranchCondition(CondCode)
Definition: RISCVInstrInfo.cpp:732
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:158
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1004
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::RISCVCC::COND_INVALID
@ COND_INVALID
Definition: RISCVInstrInfo.h:37
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1028
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:709
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:62
llvm::RISCVInstrInfo::getOutliningType
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
Definition: RISCVInstrInfo.cpp:1311
llvm::MachineInstr::memoperands_begin
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:708
llvm::RISCVOp::OPERAND_LAST_RISCV_IMM
@ OPERAND_LAST_RISCV_IMM
Definition: RISCVBaseInfo.h:194
llvm::RegScavenger::scavengeRegisterBackwards
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Definition: RegisterScavenging.cpp:587
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:137
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1177
llvm::RISCVOp::OPERAND_UIMMLOG2XLEN
@ OPERAND_UIMMLOG2XLEN
Definition: RISCVBaseInfo.h:193
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:435
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2080
ErrorHandling.h
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1830
llvm::LiveRange::Segment
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
llvm::ISD::EH_LABEL
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:992
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:164
MCInstBuilder.h
llvm::IRSimilarity::Invisible
@ Invisible
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::LiveRegUnits::available
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:104
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::MCRegisterInfo::getEncodingValue
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Definition: MCRegisterInfo.h:553
llvm::RISCVII::hasSEWOp
static bool hasSEWOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:143
llvm::MipsII::MO_TPREL_HI
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::X86ISD::FNMADD
@ FNMADD
Definition: X86ISelLowering.h:555
llvm::MachineInstr::getDesc
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:486
llvm::outliner::InstrType
InstrType
Represents how an instruction should be mapped by the outliner.
Definition: MachineOutliner.h:34
llvm::RISCVVType::isTailAgnostic
static bool isTailAgnostic(unsigned VType)
Definition: RISCVBaseInfo.h:384
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::M68kII::MO_PLT
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
Definition: M68kBaseInfo.h:114
llvm::PPCISD::FNMSUB
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
Definition: PPCISelLowering.h:170
llvm::RISCVInstrInfo::insertIndirectBranch
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
Definition: RISCVInstrInfo.cpp:890
llvm::RISCVInstrInfo::STI
const RISCVSubtarget & STI
Definition: RISCVInstrInfo.h:181
llvm::Optional
Definition: APInt.h:33
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:80
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
STLExtras.h
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
llvm::RISCVInstrInfo::getBranchDestBlock
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:936
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::outliner::OutlinedFunction
The information necessary to create an outlined function for some class of candidate.
Definition: MachineOutliner.h:164
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:151
RISCVMatInt.h
llvm::RISCVInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:69
llvm::RISCVVType::getSEW
static unsigned getSEW(unsigned VType)
Definition: RISCVBaseInfo.h:379
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVInstrInfo::isBranchOffsetInRange
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
Definition: RISCVInstrInfo.cpp:943
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1564
RISCVGenInstrInfo
llvm::RISCVInstrInfo::convertToThreeAddress
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Definition: RISCVInstrInfo.cpp:1705
llvm::MachineInstr::hasOneMemOperand
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:723
llvm::RISCVOp::OPERAND_UIMM7
@ OPERAND_UIMM7
Definition: RISCVBaseInfo.h:189
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MachineInstr::hasOrderedMemoryRef
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Definition: MachineInstr.cpp:1359
MachineRegisterInfo.h
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:984
llvm::RISCVInstrInfo::isRVVSpillForZvlsseg
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode) const
Definition: RISCVInstrInfo.cpp:1888
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:157
CASE_VFMA_OPCODE_LMULS_MF4
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1413
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:103
llvm::MachineBasicBlock::pred_size
unsigned pred_size() const
Definition: MachineBasicBlock.h:332
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:651
llvm::TargetInstrInfo::commuteInstructionImpl
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
Definition: TargetInstrInfo.cpp:167
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RISCVCC::COND_LT
@ COND_LT
Definition: RISCVInstrInfo.h:33
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:773
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:537
parseCondBranch
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
Definition: RISCVInstrInfo.cpp:701
llvm::RISCVInstrInfo::findCommutedOpIndices
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Definition: RISCVInstrInfo.cpp:1427
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::IRSimilarity::Illegal
@ Illegal
Definition: IRSimilarityIdentifier.h:75
llvm::RISCVInstrInfo::analyzeBranch
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Definition: RISCVInstrInfo.cpp:751
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
LiveVariables.h
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::LiveVariables::replaceKillInstruction
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Definition: LiveVariables.cpp:752
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:195
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::RISCVCC::COND_LTU
@ COND_LTU
Definition: RISCVInstrInfo.h:35
llvm::RISCVOp::OPERAND_UIMM5
@ OPERAND_UIMM5
Definition: RISCVBaseInfo.h:188
llvm::MCInstrDesc::isCommutable
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:472
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:146
llvm::RISCVInstrInfo::decomposeMachineOperandsTargetFlags
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Definition: RISCVInstrInfo.cpp:1218
llvm::MachineBasicBlock::rend
reverse_iterator rend()
Definition: MachineBasicBlock.h:282
getOppositeBranchCondition
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
Definition: ARCInstrInfo.cpp:102
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::RISCVInstrInfo::isRVVSpill
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs) const
Definition: RISCVInstrInfo.cpp:1875
llvm::RegScavenger::enterBasicBlockEnd
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Definition: RegisterScavenging.cpp:89
llvm::RISCVOp::OPERAND_UIMM12
@ OPERAND_UIMM12
Definition: RISCVBaseInfo.h:190
llvm::RISCVInstrInfo::insertOutlinedCall
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const override
Definition: RISCVInstrInfo.cpp:1387
llvm::RISCVInstrInfo::removeBranch
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Definition: RISCVInstrInfo.cpp:819
llvm::RISCVOp::OPERAND_UIMM2
@ OPERAND_UIMM2
Definition: RISCVBaseInfo.h:185
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
llvm::RISCVInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:444
llvm::MCRegisterInfo::isSubRegisterEq
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
Definition: MCRegisterInfo.h:568
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:119
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:680
llvm::LiveRegUnits
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
llvm::RISCVInstrInfo::getNop
MCInst getNop() const override
Definition: RISCVInstrInfo.cpp:60
llvm::TargetInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
Definition: TargetInstrInfo.cpp:1422
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:83
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:23
llvm::RISCVInstrInfo::areMemAccessesTriviallyDisjoint
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Definition: RISCVInstrInfo.cpp:1186
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1556
MachineOutlinerConstructionID
MachineOutlinerConstructionID
Definition: RISCVInstrInfo.cpp:1265
llvm::RISCVII::MO_TLS_GD_HI
@ MO_TLS_GD_HI
Definition: RISCVBaseInfo.h:173
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:641
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::cl::opt< bool >
forwardCopyWillClobberTuple
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
Definition: RISCVInstrInfo.cpp:120
llvm::RISCVII::MO_GOT_HI
@ MO_GOT_HI
Definition: RISCVBaseInfo.h:168
llvm::RISCVOp::OPERAND_UIMM3
@ OPERAND_UIMM3
Definition: RISCVBaseInfo.h:186
llvm::MachineInstrBundleIterator::getReverse
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Definition: MachineInstrBundleIterator.h:283
llvm::RISCVVType::decodeVLMUL
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
Definition: RISCVBaseInfo.cpp:143
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::IRSimilarity::Legal
@ Legal
Definition: IRSimilarityIdentifier.h:75
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::MachineFrameInfo::getObjectSize
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Definition: MachineFrameInfo.h:453
LiveIntervals.h
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RISCVInstrInfo::isFunctionSafeToOutlineFrom
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
Definition: RISCVInstrInfo.cpp:1241
llvm::outliner::Candidate
An individual sequence of instructions to be replaced with a call to an outlined function.
Definition: MachineOutliner.h:38
llvm::RISCVOp::OPERAND_UIMM20
@ OPERAND_UIMM20
Definition: RISCVBaseInfo.h:192
llvm::RISCVCC::COND_EQ
@ COND_EQ
Definition: RISCVInstrInfo.h:31
MemoryLocation.h
llvm::RISCVInstrInfo::getMemOperandWithOffsetWidth
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
Definition: RISCVInstrInfo.cpp:1163
llvm::RISCVInstrInfo::isMBBSafeToOutlineFrom
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Definition: RISCVInstrInfo.cpp:1258
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::MachineFrameInfo::getObjectAlign
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
Definition: MachineFrameInfo.h:467
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::TargetStackID::ScalableVector
@ ScalableVector
Definition: TargetFrameLowering.h:30
llvm::MCInstBuilder
Definition: MCInstBuilder.h:21
llvm::MachineBasicBlock::getLastNonDebugInstr
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Definition: MachineBasicBlock.cpp:266
MachineFunctionPass.h
isConvertibleToVMV_V_V
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
Definition: RISCVInstrInfo.cpp:125
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::X86ISD::FMSUB
@ FMSUB
Definition: X86ISelLowering.h:556
llvm::MachineFunction::getName
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Definition: MachineFunction.cpp:546
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:657
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:229
llvm::MachineInstrBuilder::addMemOperand
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Definition: MachineInstrBuilder.h:202
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::RISCVInstrInfo::isCopyInstrImpl
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1079
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
CASE_VFMA_SPLATS
#define CASE_VFMA_SPLATS(OP)
Definition: RISCVInstrInfo.cpp:1421
RISCV.h
llvm::MachineInstr::MIFlag
MIFlag
Definition: MachineInstr.h:80
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:254
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::MachineFunction
Definition: MachineFunction.h:241
CASE_VFMA_OPCODE_LMULS
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
Definition: RISCVInstrInfo.cpp:1417
llvm::MipsII::MO_TPREL_LO
@ MO_TPREL_LO
Definition: MipsBaseInfo.h:74
llvm::MachineBasicBlock::succ_empty
bool succ_empty() const
Definition: MachineBasicBlock.h:351
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineFrameInfo::setStackID
void setStackID(int ObjectIdx, uint8_t ID)
Definition: MachineFrameInfo.h:704
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:44
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:552
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1656
CASE_WIDEOP_OPCODE_LMULS
#define CASE_WIDEOP_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1684
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:179
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:120
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:37
llvm::RISCVInstrInfo::movImm
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:630
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:102
uint32_t
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:836
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::RISCVInstrInfo::buildOutlinedFrame
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Definition: RISCVInstrInfo.cpp:1359
llvm::RISCVInstrInfo::getVLENFactoredAmount
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Definition: RISCVInstrInfo.cpp:1775
llvm::RISCVInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
Definition: RISCVInstrInfo.cpp:853
llvm::RISCVII::MO_PCREL_LO
@ MO_PCREL_LO
Definition: RISCVBaseInfo.h:166
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
getCondFromBranchOpc
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
Definition: RISCVInstrInfo.cpp:679
llvm::RISCVOp::OPERAND_UIMM4
@ OPERAND_UIMM4
Definition: RISCVBaseInfo.h:187
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:371
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:380
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
CASE_WIDEOP_CHANGE_OPCODE_LMULS
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
Definition: RISCVInstrInfo.cpp:1701
llvm::LLVMContext::diagnose
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Definition: LLVMContext.cpp:228
llvm::RISCVVType::getVLMUL
static RISCVII::VLMUL getVLMUL(unsigned VType)
Definition: RISCVBaseInfo.h:366
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:25
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:607
llvm::RISCVInstrInfo::getOutliningCandidateInfo
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
Definition: RISCVInstrInfo.cpp:1269
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:276
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:637
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::DestSourcePair
Definition: TargetInstrInfo.h:68
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
CASE_WIDEOP_OPCODE_LMULS_MF4
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1677
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:325
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1311
llvm::RISCVII::MO_CALL
@ MO_CALL
Definition: RISCVBaseInfo.h:162
llvm::MachineInstr::hasUnmodeledSideEffects
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
Definition: MachineInstr.cpp:1440
llvm::ISD::INLINEASM_BR
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:987
llvm::RISCVII::MO_DIRECT_FLAG_MASK
@ MO_DIRECT_FLAG_MASK
Definition: RISCVBaseInfo.h:178
llvm::RegScavenger::setRegUsed
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Definition: RegisterScavenging.cpp:53
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:163
RISCVInstrInfo.h
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::RISCVInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Definition: RISCVInstrInfo.cpp:96
llvm::RISCVCC::COND_GE
@ COND_GE
Definition: RISCVInstrInfo.h:34
llvm::MachineRegisterInfo::clearVirtRegs
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
Definition: MachineRegisterInfo.cpp:202
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:323
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:137
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:416
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:474
llvm::RISCVInstrInfo::isAsCheapAsAMove
bool isAsCheapAsAMove(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:1057
llvm::RISCVInstrInfo::commuteInstructionImpl
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Definition: RISCVInstrInfo.cpp:1569
RISCVSubtarget.h
llvm::RISCVInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Definition: RISCVInstrInfo.cpp:538
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::RISCVInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
Definition: RISCVInstrInfo.cpp:256
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:107
MachineOutlinerDefault
@ MachineOutlinerDefault
Definition: RISCVInstrInfo.cpp:1266
llvm::RISCVCC::CondCode
CondCode
Definition: RISCVInstrInfo.h:30
CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
Definition: RISCVInstrInfo.cpp:1694
SmallVector.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1008
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:272
MachineInstrBuilder.h
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::TargetInstrInfo::findCommutedOpIndices
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
Definition: TargetInstrInfo.cpp:296
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
llvm::RISCVInstrInfo::verifyInstruction
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
Definition: RISCVInstrInfo.cpp:1103
RISCVMachineFunctionInfo.h
llvm::LiveRange::getSegmentContaining
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
Definition: LiveInterval.h:400
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MachineBasicBlock::empty
bool empty() const
Definition: MachineBasicBlock.h:244
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:31
CASE_VFMA_CHANGE_OPCODE_LMULS
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
Definition: RISCVInstrInfo.cpp:1560
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:100
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1094
isRVVWholeLoadStore
static bool isRVVWholeLoadStore(unsigned Opcode)
Definition: RISCVInstrInfo.cpp:1847
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::LiveVariables
Definition: LiveVariables.h:46
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:62
llvm::RISCVII::MO_TPREL_ADD
@ MO_TPREL_ADD
Definition: RISCVBaseInfo.h:171
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:412
RegisterScavenging.h
llvm::RISCVSubtarget::hasStdExtM
bool hasStdExtM() const
Definition: RISCVSubtarget.h:137
CASE_VFMA_CHANGE_OPCODE_SPLATS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Definition: RISCVInstrInfo.cpp:1564
llvm::MachineInstrBundleIterator< const MachineInstr >
TargetRegistry.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:75
llvm::AVRII::MO_LO
@ MO_LO
On a symbol operand, this represents the lo part.
Definition: AVRInstrInfo.h:52
llvm::RISCVOp::OPERAND_FIRST_RISCV_IMM
@ OPERAND_FIRST_RISCV_IMM
Definition: RISCVBaseInfo.h:184
llvm::MCInstrDesc::operands
iterator_range< const_opInfo_iterator > operands() const
Definition: MCInstrDesc.h:235
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:274
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:308
llvm::RISCVInstrInfo::getInstSizeInBytes
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
Definition: RISCVInstrInfo.cpp:967
llvm::RISCVII::MO_PCREL_HI
@ MO_PCREL_HI
Definition: RISCVBaseInfo.h:167
llvm::MachineOperand::isIdenticalTo
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
Definition: MachineOperand.cpp:287
llvm::AVRII::MO_HI
@ MO_HI
On a symbol operand, this represents the hi part.
Definition: AVRInstrInfo.h:55
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::RISCVCC::COND_NE
@ COND_NE
Definition: RISCVInstrInfo.h:32
RISCVTargetMachine.h