LLVM  11.0.0git
LegalizationArtifactCombiner.h
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h -----*- C++ -*-//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains some helper functions which try to cleanup artifacts
9 // such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
10 // the types match. This file also contains some combines of merges that happens
11 // at the end of the legalization.
12 //===----------------------------------------------------------------------===//
13 
20 #include "llvm/Support/Debug.h"
21 
22 #define DEBUG_TYPE "legalizer"
23 using namespace llvm::MIPatternMatch;
24 
25 namespace llvm {
27  MachineIRBuilder &Builder;
29  const LegalizerInfo &LI;
30 
31  static bool isArtifactCast(unsigned Opc) {
32  switch (Opc) {
33  case TargetOpcode::G_TRUNC:
34  case TargetOpcode::G_SEXT:
35  case TargetOpcode::G_ZEXT:
36  case TargetOpcode::G_ANYEXT:
37  return true;
38  default:
39  return false;
40  }
41  }
42 
43 public:
45  const LegalizerInfo &LI)
46  : Builder(B), MRI(MRI), LI(LI) {}
47 
50  SmallVectorImpl<Register> &UpdatedDefs) {
51  assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);
52 
53  Builder.setInstrAndDebugLoc(MI);
54  Register DstReg = MI.getOperand(0).getReg();
55  Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
56 
57  // aext(trunc x) - > aext/copy/trunc x
58  Register TruncSrc;
59  if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
60  LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
61  Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
62  UpdatedDefs.push_back(DstReg);
63  markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
64  return true;
65  }
66 
67  // aext([asz]ext x) -> [asz]ext x
68  Register ExtSrc;
69  MachineInstr *ExtMI;
70  if (mi_match(SrcReg, MRI,
71  m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
72  m_GSExt(m_Reg(ExtSrc)),
73  m_GZExt(m_Reg(ExtSrc)))))) {
74  Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
75  UpdatedDefs.push_back(DstReg);
76  markInstAndDefDead(MI, *ExtMI, DeadInsts);
77  return true;
78  }
79 
80  // Try to fold aext(g_constant) when the larger constant type is legal.
81  // Can't use MIPattern because we don't have a specific constant in mind.
82  auto *SrcMI = MRI.getVRegDef(SrcReg);
83  if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
84  const LLT DstTy = MRI.getType(DstReg);
85  if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
86  auto &CstVal = SrcMI->getOperand(1);
87  Builder.buildConstant(
88  DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
89  UpdatedDefs.push_back(DstReg);
90  markInstAndDefDead(MI, *SrcMI, DeadInsts);
91  return true;
92  }
93  }
94  return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
95  }
96 
99  SmallVectorImpl<Register> &UpdatedDefs,
100  GISelObserverWrapper &Observer) {
101  assert(MI.getOpcode() == TargetOpcode::G_ZEXT);
102 
103  Builder.setInstrAndDebugLoc(MI);
104  Register DstReg = MI.getOperand(0).getReg();
105  Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
106 
107  // zext(trunc x) - > and (aext/copy/trunc x), mask
108  Register TruncSrc;
109  if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
110  LLT DstTy = MRI.getType(DstReg);
111  if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
112  isConstantUnsupported(DstTy))
113  return false;
114  LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
115  LLT SrcTy = MRI.getType(SrcReg);
116  APInt Mask = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
117  auto MIBMask = Builder.buildConstant(
118  DstTy, Mask.zext(DstTy.getScalarSizeInBits()));
119  Builder.buildAnd(DstReg, Builder.buildAnyExtOrTrunc(DstTy, TruncSrc),
120  MIBMask);
121  markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
122  return true;
123  }
124 
125  // zext(zext x) -> (zext x)
126  Register ZextSrc;
127  if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) {
128  LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
129  Observer.changingInstr(MI);
130  MI.getOperand(1).setReg(ZextSrc);
131  Observer.changedInstr(MI);
132  UpdatedDefs.push_back(DstReg);
133  markDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
134  return true;
135  }
136 
137  // Try to fold zext(g_constant) when the larger constant type is legal.
138  // Can't use MIPattern because we don't have a specific constant in mind.
139  auto *SrcMI = MRI.getVRegDef(SrcReg);
140  if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
141  const LLT DstTy = MRI.getType(DstReg);
142  if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
143  auto &CstVal = SrcMI->getOperand(1);
144  Builder.buildConstant(
145  DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
146  UpdatedDefs.push_back(DstReg);
147  markInstAndDefDead(MI, *SrcMI, DeadInsts);
148  return true;
149  }
150  }
151  return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
152  }
153 
156  SmallVectorImpl<Register> &UpdatedDefs) {
157  assert(MI.getOpcode() == TargetOpcode::G_SEXT);
158 
159  Builder.setInstrAndDebugLoc(MI);
160  Register DstReg = MI.getOperand(0).getReg();
161  Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
162 
163  // sext(trunc x) - > (sext_inreg (aext/copy/trunc x), c)
164  Register TruncSrc;
165  if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
166  LLT DstTy = MRI.getType(DstReg);
167  if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
168  return false;
169  LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
170  LLT SrcTy = MRI.getType(SrcReg);
171  uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
172  Builder.buildInstr(
173  TargetOpcode::G_SEXT_INREG, {DstReg},
174  {Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), SizeInBits});
175  markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
176  return true;
177  }
178 
179  // sext(zext x) -> (zext x)
180  // sext(sext x) -> (sext x)
181  Register ExtSrc;
182  MachineInstr *ExtMI;
183  if (mi_match(SrcReg, MRI,
184  m_all_of(m_MInstr(ExtMI), m_any_of(m_GZExt(m_Reg(ExtSrc)),
185  m_GSExt(m_Reg(ExtSrc)))))) {
186  LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
187  Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
188  UpdatedDefs.push_back(DstReg);
189  markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
190  return true;
191  }
192 
193  return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
194  }
195 
198  SmallVectorImpl<Register> &UpdatedDefs,
199  GISelObserverWrapper &Observer) {
200  assert(MI.getOpcode() == TargetOpcode::G_TRUNC);
201 
202  Builder.setInstr(MI);
203  Register DstReg = MI.getOperand(0).getReg();
204  Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
205 
206  // Try to fold trunc(g_constant) when the smaller constant type is legal.
207  // Can't use MIPattern because we don't have a specific constant in mind.
208  auto *SrcMI = MRI.getVRegDef(SrcReg);
209  if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
210  const LLT DstTy = MRI.getType(DstReg);
211  if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
212  auto &CstVal = SrcMI->getOperand(1);
213  Builder.buildConstant(
214  DstReg, CstVal.getCImm()->getValue().trunc(DstTy.getSizeInBits()));
215  UpdatedDefs.push_back(DstReg);
216  markInstAndDefDead(MI, *SrcMI, DeadInsts);
217  return true;
218  }
219  }
220 
221  // Try to fold trunc(merge) to directly use the source of the merge.
222  // This gets rid of large, difficult to legalize, merges
223  if (SrcMI->getOpcode() == TargetOpcode::G_MERGE_VALUES) {
224  const Register MergeSrcReg = SrcMI->getOperand(1).getReg();
225  const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
226  const LLT DstTy = MRI.getType(DstReg);
227 
228  // We can only fold if the types are scalar
229  const unsigned DstSize = DstTy.getSizeInBits();
230  const unsigned MergeSrcSize = MergeSrcTy.getSizeInBits();
231  if (!DstTy.isScalar() || !MergeSrcTy.isScalar())
232  return false;
233 
234  if (DstSize < MergeSrcSize) {
235  // When the merge source is larger than the destination, we can just
236  // truncate the merge source directly
237  if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
238  return false;
239 
240  LLVM_DEBUG(dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
241  << MI);
242 
243  Builder.buildTrunc(DstReg, MergeSrcReg);
244  UpdatedDefs.push_back(DstReg);
245  } else if (DstSize == MergeSrcSize) {
246  // If the sizes match we can simply try to replace the register
247  LLVM_DEBUG(
248  dbgs() << "Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
249  << MI);
250  replaceRegOrBuildCopy(DstReg, MergeSrcReg, MRI, Builder, UpdatedDefs,
251  Observer);
252  } else if (DstSize % MergeSrcSize == 0) {
253  // If the trunc size is a multiple of the merge source size we can use
254  // a smaller merge instead
255  if (isInstUnsupported(
256  {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
257  return false;
258 
259  LLVM_DEBUG(
260  dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
261  << MI);
262 
263  const unsigned NumSrcs = DstSize / MergeSrcSize;
264  assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
265  "trunc(merge) should require less inputs than merge");
266  SmallVector<Register, 2> SrcRegs(NumSrcs);
267  for (unsigned i = 0; i < NumSrcs; ++i)
268  SrcRegs[i] = SrcMI->getOperand(i + 1).getReg();
269 
270  Builder.buildMerge(DstReg, SrcRegs);
271  UpdatedDefs.push_back(DstReg);
272  } else {
273  // Unable to combine
274  return false;
275  }
276 
277  markInstAndDefDead(MI, *SrcMI, DeadInsts);
278  return true;
279  }
280 
281  // trunc(trunc) -> trunc
282  Register TruncSrc;
283  if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
284  // Always combine trunc(trunc) since the eventual resulting trunc must be
285  // legal anyway as it must be legal for all outputs of the consumer type
286  // set.
287  LLVM_DEBUG(dbgs() << ".. Combine G_TRUNC(G_TRUNC): " << MI);
288 
289  Builder.buildTrunc(DstReg, TruncSrc);
290  UpdatedDefs.push_back(DstReg);
291  markInstAndDefDead(MI, *MRI.getVRegDef(TruncSrc), DeadInsts);
292  return true;
293  }
294 
295  return false;
296  }
297 
298  /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
301  SmallVectorImpl<Register> &UpdatedDefs) {
302  unsigned Opcode = MI.getOpcode();
303  assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
304  Opcode == TargetOpcode::G_SEXT);
305 
306  if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
307  MI.getOperand(1).getReg(), MRI)) {
308  Builder.setInstr(MI);
309  Register DstReg = MI.getOperand(0).getReg();
310  LLT DstTy = MRI.getType(DstReg);
311 
312  if (Opcode == TargetOpcode::G_ANYEXT) {
313  // G_ANYEXT (G_IMPLICIT_DEF) -> G_IMPLICIT_DEF
314  if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
315  return false;
316  LLVM_DEBUG(dbgs() << ".. Combine G_ANYEXT(G_IMPLICIT_DEF): " << MI;);
317  Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
318  UpdatedDefs.push_back(DstReg);
319  } else {
320  // G_[SZ]EXT (G_IMPLICIT_DEF) -> G_CONSTANT 0 because the top
321  // bits will be 0 for G_ZEXT and 0/1 for the G_SEXT.
322  if (isConstantUnsupported(DstTy))
323  return false;
324  LLVM_DEBUG(dbgs() << ".. Combine G_[SZ]EXT(G_IMPLICIT_DEF): " << MI;);
325  Builder.buildConstant(DstReg, 0);
326  UpdatedDefs.push_back(DstReg);
327  }
328 
329  markInstAndDefDead(MI, *DefMI, DeadInsts);
330  return true;
331  }
332  return false;
333  }
334 
337  SmallVectorImpl<Register> &UpdatedDefs) {
338 
339  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
340 
341  const unsigned CastOpc = CastMI.getOpcode();
342 
343  if (!isArtifactCast(CastOpc))
344  return false;
345 
346  const unsigned NumDefs = MI.getNumOperands() - 1;
347 
348  const Register CastSrcReg = CastMI.getOperand(1).getReg();
349  const LLT CastSrcTy = MRI.getType(CastSrcReg);
350  const LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
351  const LLT SrcTy = MRI.getType(MI.getOperand(NumDefs).getReg());
352 
353  const unsigned CastSrcSize = CastSrcTy.getSizeInBits();
354  const unsigned DestSize = DestTy.getSizeInBits();
355 
356  if (CastOpc == TargetOpcode::G_TRUNC) {
357  if (SrcTy.isVector() && SrcTy.getScalarType() == DestTy.getScalarType()) {
358  // %1:_(<4 x s8>) = G_TRUNC %0(<4 x s32>)
359  // %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %1
360  // =>
361  // %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %0
362  // %2:_(s8) = G_TRUNC %6
363  // %3:_(s8) = G_TRUNC %7
364  // %4:_(s8) = G_TRUNC %8
365  // %5:_(s8) = G_TRUNC %9
366 
367  unsigned UnmergeNumElts =
368  DestTy.isVector() ? CastSrcTy.getNumElements() / NumDefs : 1;
369  LLT UnmergeTy = CastSrcTy.changeNumElements(UnmergeNumElts);
370 
371  if (isInstUnsupported(
372  {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
373  return false;
374 
375  Builder.setInstr(MI);
376  auto NewUnmerge = Builder.buildUnmerge(UnmergeTy, CastSrcReg);
377 
378  SmallVector<Register, 8> Regs(NumDefs);
379  for (unsigned I = 0; I != NumDefs; ++I)
380  Builder.buildTrunc(MI.getOperand(I), NewUnmerge.getReg(I));
381 
382  markInstAndDefDead(MI, CastMI, DeadInsts);
383  return true;
384  }
385 
386  if (CastSrcTy.isScalar() && SrcTy.isScalar() && !DestTy.isVector()) {
387  // %1:_(s16) = G_TRUNC %0(s32)
388  // %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %1
389  // =>
390  // %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0
391 
392  // Unmerge(trunc) can be combined if the trunc source size is a multiple
393  // of the unmerge destination size
394  if (CastSrcSize % DestSize != 0)
395  return false;
396 
397  // Check if the new unmerge is supported
398  if (isInstUnsupported(
399  {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
400  return false;
401 
402  // Gather the original destination registers and create new ones for the
403  // unused bits
404  const unsigned NewNumDefs = CastSrcSize / DestSize;
405  SmallVector<Register, 2> DstRegs(NewNumDefs);
406  for (unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
407  if (Idx < NumDefs)
408  DstRegs[Idx] = MI.getOperand(Idx).getReg();
409  else
410  DstRegs[Idx] = MRI.createGenericVirtualRegister(DestTy);
411  }
412 
413  // Build new unmerge
414  Builder.setInstr(MI);
415  Builder.buildUnmerge(DstRegs, CastSrcReg);
416  UpdatedDefs.append(DstRegs.begin(), DstRegs.begin() + NumDefs);
417  markInstAndDefDead(MI, CastMI, DeadInsts);
418  return true;
419  }
420  }
421 
422  // TODO: support combines with other casts as well
423  return false;
424  }
425 
426  static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp,
427  LLT OpTy, LLT DestTy) {
428  // Check if we found a definition that is like G_MERGE_VALUES.
429  switch (MergeOp) {
430  default:
431  return false;
432  case TargetOpcode::G_BUILD_VECTOR:
433  case TargetOpcode::G_MERGE_VALUES:
434  // The convert operation that we will need to insert is
435  // going to convert the input of that type of instruction (scalar)
436  // to the destination type (DestTy).
437  // The conversion needs to stay in the same domain (scalar to scalar
438  // and vector to vector), so if we were to allow to fold the merge
439  // we would need to insert some bitcasts.
440  // E.g.,
441  // <2 x s16> = build_vector s16, s16
442  // <2 x s32> = zext <2 x s16>
443  // <2 x s16>, <2 x s16> = unmerge <2 x s32>
444  //
445  // As is the folding would produce:
446  // <2 x s16> = zext s16 <-- scalar to vector
447  // <2 x s16> = zext s16 <-- scalar to vector
448  // Which is invalid.
449  // Instead we would want to generate:
450  // s32 = zext s16
451  // <2 x s16> = bitcast s32
452  // s32 = zext s16
453  // <2 x s16> = bitcast s32
454  //
455  // That is not done yet.
456  if (ConvertOp == 0)
457  return true;
458  return !DestTy.isVector() && OpTy.isVector();
459  case TargetOpcode::G_CONCAT_VECTORS: {
460  if (ConvertOp == 0)
461  return true;
462  if (!DestTy.isVector())
463  return false;
464 
465  const unsigned OpEltSize = OpTy.getElementType().getSizeInBits();
466 
467  // Don't handle scalarization with a cast that isn't in the same
468  // direction as the vector cast. This could be handled, but it would
469  // require more intermediate unmerges.
470  if (ConvertOp == TargetOpcode::G_TRUNC)
471  return DestTy.getSizeInBits() <= OpEltSize;
472  return DestTy.getSizeInBits() >= OpEltSize;
473  }
474  }
475  }
476 
477  /// Try to replace DstReg with SrcReg or build a COPY instruction
478  /// depending on the register constraints.
479  static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg,
480  MachineRegisterInfo &MRI,
481  MachineIRBuilder &Builder,
482  SmallVectorImpl<Register> &UpdatedDefs,
483  GISelObserverWrapper &Observer) {
484  if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
485  Builder.buildCopy(DstReg, SrcReg);
486  UpdatedDefs.push_back(DstReg);
487  return;
488  }
490  // Get the users and notify the observer before replacing.
491  for (auto &UseMI : MRI.use_instructions(DstReg)) {
492  UseMIs.push_back(&UseMI);
493  Observer.changingInstr(UseMI);
494  }
495  // Replace the registers.
496  MRI.replaceRegWith(DstReg, SrcReg);
497  UpdatedDefs.push_back(SrcReg);
498  // Notify the observer that we changed the instructions.
499  for (auto *UseMI : UseMIs)
500  Observer.changedInstr(*UseMI);
501  }
502 
505  SmallVectorImpl<Register> &UpdatedDefs,
506  GISelObserverWrapper &Observer) {
507  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
508 
509  unsigned NumDefs = MI.getNumOperands() - 1;
510  MachineInstr *SrcDef =
511  getDefIgnoringCopies(MI.getOperand(NumDefs).getReg(), MRI);
512  if (!SrcDef)
513  return false;
514 
515  LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
516  LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
517  MachineInstr *MergeI = SrcDef;
518  unsigned ConvertOp = 0;
519 
520  // Handle intermediate conversions
521  unsigned SrcOp = SrcDef->getOpcode();
522  if (isArtifactCast(SrcOp)) {
523  ConvertOp = SrcOp;
524  MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
525  }
526 
527  if (!MergeI || !canFoldMergeOpcode(MergeI->getOpcode(),
528  ConvertOp, OpTy, DestTy)) {
529  // We might have a chance to combine later by trying to combine
530  // unmerge(cast) first
531  return tryFoldUnmergeCast(MI, *SrcDef, DeadInsts, UpdatedDefs);
532  }
533 
534  const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
535 
536  if (NumMergeRegs < NumDefs) {
537  if (NumDefs % NumMergeRegs != 0)
538  return false;
539 
540  Builder.setInstr(MI);
541  // Transform to UNMERGEs, for example
542  // %1 = G_MERGE_VALUES %4, %5
543  // %9, %10, %11, %12 = G_UNMERGE_VALUES %1
544  // to
545  // %9, %10 = G_UNMERGE_VALUES %4
546  // %11, %12 = G_UNMERGE_VALUES %5
547 
548  const unsigned NewNumDefs = NumDefs / NumMergeRegs;
549  for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
550  SmallVector<Register, 2> DstRegs;
551  for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
552  ++j, ++DefIdx)
553  DstRegs.push_back(MI.getOperand(DefIdx).getReg());
554 
555  if (ConvertOp) {
556  LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
557 
558  // This is a vector that is being split and casted. Extract to the
559  // element type, and do the conversion on the scalars (or smaller
560  // vectors).
561  LLT MergeEltTy = MergeSrcTy.divide(NewNumDefs);
562 
563  // Handle split to smaller vectors, with conversions.
564  // %2(<8 x s8>) = G_CONCAT_VECTORS %0(<4 x s8>), %1(<4 x s8>)
565  // %3(<8 x s16>) = G_SEXT %2
566  // %4(<2 x s16>), %5(<2 x s16>), %6(<2 x s16>), %7(<2 x s16>) = G_UNMERGE_VALUES %3
567  //
568  // =>
569  //
570  // %8(<2 x s8>), %9(<2 x s8>) = G_UNMERGE_VALUES %0
571  // %10(<2 x s8>), %11(<2 x s8>) = G_UNMERGE_VALUES %1
572  // %4(<2 x s16>) = G_SEXT %8
573  // %5(<2 x s16>) = G_SEXT %9
574  // %6(<2 x s16>) = G_SEXT %10
575  // %7(<2 x s16>)= G_SEXT %11
576 
577  SmallVector<Register, 4> TmpRegs(NewNumDefs);
578  for (unsigned k = 0; k < NewNumDefs; ++k)
579  TmpRegs[k] = MRI.createGenericVirtualRegister(MergeEltTy);
580 
581  Builder.buildUnmerge(TmpRegs, MergeI->getOperand(Idx + 1).getReg());
582 
583  for (unsigned k = 0; k < NewNumDefs; ++k)
584  Builder.buildInstr(ConvertOp, {DstRegs[k]}, {TmpRegs[k]});
585  } else {
586  Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
587  }
588  UpdatedDefs.append(DstRegs.begin(), DstRegs.end());
589  }
590 
591  } else if (NumMergeRegs > NumDefs) {
592  if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
593  return false;
594 
595  Builder.setInstr(MI);
596  // Transform to MERGEs
597  // %6 = G_MERGE_VALUES %17, %18, %19, %20
598  // %7, %8 = G_UNMERGE_VALUES %6
599  // to
600  // %7 = G_MERGE_VALUES %17, %18
601  // %8 = G_MERGE_VALUES %19, %20
602 
603  const unsigned NumRegs = NumMergeRegs / NumDefs;
604  for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
606  for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
607  ++j, ++Idx)
608  Regs.push_back(MergeI->getOperand(Idx).getReg());
609 
610  Register DefReg = MI.getOperand(DefIdx).getReg();
611  Builder.buildMerge(DefReg, Regs);
612  UpdatedDefs.push_back(DefReg);
613  }
614 
615  } else {
616  LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
617 
618  if (!ConvertOp && DestTy != MergeSrcTy)
619  ConvertOp = TargetOpcode::G_BITCAST;
620 
621  if (ConvertOp) {
622  Builder.setInstr(MI);
623 
624  for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
625  Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
626  Register DefReg = MI.getOperand(Idx).getReg();
627  Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
628  UpdatedDefs.push_back(DefReg);
629  }
630 
631  markInstAndDefDead(MI, *MergeI, DeadInsts);
632  return true;
633  }
634 
635  assert(DestTy == MergeSrcTy &&
636  "Bitcast and the other kinds of conversions should "
637  "have happened earlier");
638 
639  Builder.setInstr(MI);
640  for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
641  Register DstReg = MI.getOperand(Idx).getReg();
642  Register SrcReg = MergeI->getOperand(Idx + 1).getReg();
643  replaceRegOrBuildCopy(DstReg, SrcReg, MRI, Builder, UpdatedDefs,
644  Observer);
645  }
646  }
647 
648  markInstAndDefDead(MI, *MergeI, DeadInsts);
649  return true;
650  }
651 
652  static bool isMergeLikeOpcode(unsigned Opc) {
653  switch (Opc) {
654  case TargetOpcode::G_MERGE_VALUES:
655  case TargetOpcode::G_BUILD_VECTOR:
656  case TargetOpcode::G_CONCAT_VECTORS:
657  return true;
658  default:
659  return false;
660  }
661  }
662 
665  SmallVectorImpl<Register> &UpdatedDefs) {
666  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);
667 
668  // Try to use the source registers from a G_MERGE_VALUES
669  //
670  // %2 = G_MERGE_VALUES %0, %1
671  // %3 = G_EXTRACT %2, N
672  // =>
673  //
674  // for N < %2.getSizeInBits() / 2
675  // %3 = G_EXTRACT %0, N
676  //
677  // for N >= %2.getSizeInBits() / 2
678  // %3 = G_EXTRACT %1, (N - %0.getSizeInBits()
679 
680  Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
681  MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
682  if (!MergeI || !isMergeLikeOpcode(MergeI->getOpcode()))
683  return false;
684 
685  Register DstReg = MI.getOperand(0).getReg();
686  LLT DstTy = MRI.getType(DstReg);
687  LLT SrcTy = MRI.getType(SrcReg);
688 
689  // TODO: Do we need to check if the resulting extract is supported?
690  unsigned ExtractDstSize = DstTy.getSizeInBits();
691  unsigned Offset = MI.getOperand(2).getImm();
692  unsigned NumMergeSrcs = MergeI->getNumOperands() - 1;
693  unsigned MergeSrcSize = SrcTy.getSizeInBits() / NumMergeSrcs;
694  unsigned MergeSrcIdx = Offset / MergeSrcSize;
695 
696  // Compute the offset of the last bit the extract needs.
697  unsigned EndMergeSrcIdx = (Offset + ExtractDstSize - 1) / MergeSrcSize;
698 
699  // Can't handle the case where the extract spans multiple inputs.
700  if (MergeSrcIdx != EndMergeSrcIdx)
701  return false;
702 
703  // TODO: We could modify MI in place in most cases.
704  Builder.setInstr(MI);
705  Builder.buildExtract(DstReg, MergeI->getOperand(MergeSrcIdx + 1).getReg(),
706  Offset - MergeSrcIdx * MergeSrcSize);
707  UpdatedDefs.push_back(DstReg);
708  markInstAndDefDead(MI, *MergeI, DeadInsts);
709  return true;
710  }
711 
712  /// Try to combine away MI.
713  /// Returns true if it combined away the MI.
714  /// Adds instructions that are dead as a result of the combine
715  /// into DeadInsts, which can include MI.
718  GISelObserverWrapper &WrapperObserver) {
719  // This might be a recursive call, and we might have DeadInsts already
720  // populated. To avoid bad things happening later with multiple vreg defs
721  // etc, process the dead instructions now if any.
722  if (!DeadInsts.empty())
723  deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
724 
725  // Put here every vreg that was redefined in such a way that it's at least
726  // possible that one (or more) of its users (immediate or COPY-separated)
727  // could become artifact combinable with the new definition (or the
728  // instruction reachable from it through a chain of copies if any).
729  SmallVector<Register, 4> UpdatedDefs;
730  bool Changed = false;
731  switch (MI.getOpcode()) {
732  default:
733  return false;
734  case TargetOpcode::G_ANYEXT:
735  Changed = tryCombineAnyExt(MI, DeadInsts, UpdatedDefs);
736  break;
737  case TargetOpcode::G_ZEXT:
738  Changed = tryCombineZExt(MI, DeadInsts, UpdatedDefs, WrapperObserver);
739  break;
740  case TargetOpcode::G_SEXT:
741  Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
742  break;
743  case TargetOpcode::G_UNMERGE_VALUES:
744  Changed = tryCombineMerges(MI, DeadInsts, UpdatedDefs, WrapperObserver);
745  break;
746  case TargetOpcode::G_MERGE_VALUES:
747  // If any of the users of this merge are an unmerge, then add them to the
748  // artifact worklist in case there's folding that can be done looking up.
749  for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
750  if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
751  U.getOpcode() == TargetOpcode::G_TRUNC) {
752  UpdatedDefs.push_back(MI.getOperand(0).getReg());
753  break;
754  }
755  }
756  break;
757  case TargetOpcode::G_EXTRACT:
758  Changed = tryCombineExtract(MI, DeadInsts, UpdatedDefs);
759  break;
760  case TargetOpcode::G_TRUNC:
761  Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs, WrapperObserver);
762  if (!Changed) {
763  // Try to combine truncates away even if they are legal. As all artifact
764  // combines at the moment look only "up" the def-use chains, we achieve
765  // that by throwing truncates' users (with look through copies) into the
766  // ArtifactList again.
767  UpdatedDefs.push_back(MI.getOperand(0).getReg());
768  }
769  break;
770  }
771  // If the main loop through the ArtifactList found at least one combinable
772  // pair of artifacts, not only combine it away (as done above), but also
773  // follow the def-use chain from there to combine everything that can be
774  // combined within this def-use chain of artifacts.
775  while (!UpdatedDefs.empty()) {
776  Register NewDef = UpdatedDefs.pop_back_val();
777  assert(NewDef.isVirtual() && "Unexpected redefinition of a physreg");
778  for (MachineInstr &Use : MRI.use_instructions(NewDef)) {
779  switch (Use.getOpcode()) {
780  // Keep this list in sync with the list of all artifact combines.
781  case TargetOpcode::G_ANYEXT:
782  case TargetOpcode::G_ZEXT:
783  case TargetOpcode::G_SEXT:
784  case TargetOpcode::G_UNMERGE_VALUES:
785  case TargetOpcode::G_EXTRACT:
786  case TargetOpcode::G_TRUNC:
787  // Adding Use to ArtifactList.
788  WrapperObserver.changedInstr(Use);
789  break;
790  case TargetOpcode::COPY: {
791  Register Copy = Use.getOperand(0).getReg();
792  if (Copy.isVirtual())
793  UpdatedDefs.push_back(Copy);
794  break;
795  }
796  default:
797  // If we do not have an artifact combine for the opcode, there is no
798  // point in adding it to the ArtifactList as nothing interesting will
799  // be done to it anyway.
800  break;
801  }
802  }
803  }
804  return Changed;
805  }
806 
807 private:
808  static unsigned getArtifactSrcReg(const MachineInstr &MI) {
809  switch (MI.getOpcode()) {
810  case TargetOpcode::COPY:
811  case TargetOpcode::G_TRUNC:
812  case TargetOpcode::G_ZEXT:
813  case TargetOpcode::G_ANYEXT:
814  case TargetOpcode::G_SEXT:
815  case TargetOpcode::G_UNMERGE_VALUES:
816  return MI.getOperand(MI.getNumOperands() - 1).getReg();
817  case TargetOpcode::G_EXTRACT:
818  return MI.getOperand(1).getReg();
819  default:
820  llvm_unreachable("Not a legalization artifact happen");
821  }
822  }
823 
824  /// Mark a def of one of MI's original operands, DefMI, as dead if changing MI
825  /// (either by killing it or changing operands) results in DefMI being dead
826  /// too. In-between COPYs or artifact-casts are also collected if they are
827  /// dead.
828  /// MI is not marked dead.
829  void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
830  SmallVectorImpl<MachineInstr *> &DeadInsts) {
831  // Collect all the copy instructions that are made dead, due to deleting
832  // this instruction. Collect all of them until the Trunc(DefMI).
833  // Eg,
834  // %1(s1) = G_TRUNC %0(s32)
835  // %2(s1) = COPY %1(s1)
836  // %3(s1) = COPY %2(s1)
837  // %4(s32) = G_ANYEXT %3(s1)
838  // In this case, we would have replaced %4 with a copy of %0,
839  // and as a result, %3, %2, %1 are dead.
840  MachineInstr *PrevMI = &MI;
841  while (PrevMI != &DefMI) {
842  unsigned PrevRegSrc = getArtifactSrcReg(*PrevMI);
843 
844  MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
845  if (MRI.hasOneUse(PrevRegSrc)) {
846  if (TmpDef != &DefMI) {
847  assert((TmpDef->getOpcode() == TargetOpcode::COPY ||
848  isArtifactCast(TmpDef->getOpcode())) &&
849  "Expecting copy or artifact cast here");
850 
851  DeadInsts.push_back(TmpDef);
852  }
853  } else
854  break;
855  PrevMI = TmpDef;
856  }
857  if (PrevMI == &DefMI && MRI.hasOneUse(DefMI.getOperand(0).getReg()))
858  DeadInsts.push_back(&DefMI);
859  }
860 
861  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
862  /// dead due to MI being killed, then mark DefMI as dead too.
863  /// Some of the combines (extends(trunc)), try to walk through redundant
864  /// copies in between the extends and the truncs, and this attempts to collect
865  /// the in between copies if they're dead.
866  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
867  SmallVectorImpl<MachineInstr *> &DeadInsts) {
868  DeadInsts.push_back(&MI);
869  markDefDead(MI, DefMI, DeadInsts);
870  }
871 
872  /// Erase the dead instructions in the list and call the observer hooks.
873  /// Normally the Legalizer will deal with erasing instructions that have been
874  /// marked dead. However, for the trunc(ext(x)) cases we can end up trying to
875  /// process instructions which have been marked dead, but otherwise break the
876  /// MIR by introducing multiple vreg defs. For those cases, allow the combines
877  /// to explicitly delete the instructions before we run into trouble.
878  void deleteMarkedDeadInsts(SmallVectorImpl<MachineInstr *> &DeadInsts,
879  GISelObserverWrapper &WrapperObserver) {
880  for (auto *DeadMI : DeadInsts) {
881  LLVM_DEBUG(dbgs() << *DeadMI << "Is dead, eagerly deleting\n");
882  WrapperObserver.erasingInstr(*DeadMI);
883  DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
884  }
885  DeadInsts.clear();
886  }
887 
888  /// Checks if the target legalizer info has specified anything about the
889  /// instruction, or if unsupported.
890  bool isInstUnsupported(const LegalityQuery &Query) const {
891  using namespace LegalizeActions;
892  auto Step = LI.getAction(Query);
893  return Step.Action == Unsupported || Step.Action == NotFound;
894  }
895 
896  bool isInstLegal(const LegalityQuery &Query) const {
897  return LI.getAction(Query).Action == LegalizeActions::Legal;
898  }
899 
900  bool isConstantUnsupported(LLT Ty) const {
901  if (!Ty.isVector())
902  return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
903 
904  LLT EltTy = Ty.getElementType();
905  return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
906  isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
907  }
908 
909  /// Looks through copy instructions and returns the actual
910  /// source register.
911  unsigned lookThroughCopyInstrs(Register Reg) {
912  Register TmpReg;
913  while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
914  if (MRI.getType(TmpReg).isValid())
915  Reg = TmpReg;
916  else
917  break;
918  }
919  return Reg;
920  }
921 };
922 
923 } // namespace llvm
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
bool tryCombineMerges(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
bind_ty< MachineInstr * > m_MInstr(MachineInstr *&MI)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:566
UnaryOp_match< SrcTy, TargetOpcode::G_ANYEXT > m_GAnyExt(const SrcTy &Src)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
unsigned getScalarSizeInBits() const
The LegalityQuery object bundles together all the information that&#39;s needed to decide whether a given...
bool isScalar() const
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:935
unsigned Reg
LLT getScalarType() const
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
LLT divide(int Factor) const
Return a type that is Factor times smaller.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:366
void erasingInstr(MachineInstr &MI) override
An instruction is about to be erased.
bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool tryCombineInstruction(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, GISelObserverWrapper &WrapperObserver)
Try to combine away MI.
Or< Preds... > m_any_of(Preds &&... preds)
bool isVector() const
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
bool tryCombineZExt(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:438
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
void changingInstr(MachineInstr &MI) override
This instruction is about to be mutated in some way.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:435
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
This operation is completely unsupported on the target.
Definition: LegalizerInfo.h:89
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
bool tryFoldImplicitDef(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
UnaryOp_match< SrcTy, TargetOpcode::COPY > m_Copy(SrcTy &&Src)
And< Preds... > m_all_of(Preds &&... preds)
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:89
void setReg(Register Reg)
Change the register this operand corresponds to.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Sentinel value for when no action was found in the specified table.
Definition: LegalizerInfo.h:92
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder & UseMI
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI&#39;s loc.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
Helper class to build MachineInstr.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI, const LegalizerInfo &LI)
void changedInstr(MachineInstr &MI) override
This instruction was mutated in some way.
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp, LLT OpTy, LLT DestTy)
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:380
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool tryCombineAnyExt(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:167
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:883
MachineInstrBuilder MachineInstrBuilder & DefMI
LLT changeNumElements(unsigned NewNumElts) const
Return a vector or scalar with the same element type and the new number of elements.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:420
bool tryCombineExtract(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
int64_t getImm() const
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
bool tryCombineSExt(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
Class for arbitrary precision integers.
Definition: APInt.h:69
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
bool tryCombineTrunc(MachineInstr &MI, SmallVectorImpl< MachineInstr *> &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:433
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:62
LegalizeAction Action
The action to take or the final answer.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
#define I(x, y, z)
Definition: MD5.cpp:59
static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI, MachineIRBuilder &Builder, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
Try to replace DstReg with SrcReg or build a COPY instruction depending on the register constraints...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
operand_type_match m_Reg()
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
IRTranslator LLVM IR MI
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Simple wrapper observer that takes several observers, and calls each one for each event...
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:440
The operation is expected to be selectable directly by the target, and no transformation is necessary...
Definition: LegalizerInfo.h:48
Wrapper class representing virtual and physical registers.
Definition: Register.h:19