LLVM  16.0.0git
SIShrinkInstructions.cpp
Go to the documentation of this file.
1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "llvm/ADT/Statistic.h"
17 
18 #define DEBUG_TYPE "si-shrink-instructions"
19 
20 STATISTIC(NumInstructionsShrunk,
21  "Number of 64-bit instruction reduced to 32-bit.");
22 STATISTIC(NumLiteralConstantsFolded,
23  "Number of literal constants folded into 32-bit instructions.");
24 
25 using namespace llvm;
26 
27 namespace {
28 
29 class SIShrinkInstructions : public MachineFunctionPass {
30  MachineFunction *MF;
32  const GCNSubtarget *ST;
33  const SIInstrInfo *TII;
34  const SIRegisterInfo *TRI;
35 
36 public:
37  static char ID;
38 
39 public:
40  SIShrinkInstructions() : MachineFunctionPass(ID) {
41  }
42 
43  bool foldImmediates(MachineInstr &MI, bool TryToCommute = true) const;
44  bool shouldShrinkTrue16(MachineInstr &MI) const;
45  bool isKImmOperand(const MachineOperand &Src) const;
46  bool isKUImmOperand(const MachineOperand &Src) const;
47  bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const;
48  bool isReverseInlineImm(const MachineOperand &Src, int32_t &ReverseImm) const;
49  void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const;
50  void shrinkScalarCompare(MachineInstr &MI) const;
51  void shrinkMIMG(MachineInstr &MI) const;
52  void shrinkMadFma(MachineInstr &MI) const;
53  bool shrinkScalarLogicOp(MachineInstr &MI) const;
54  bool tryReplaceDeadSDST(MachineInstr &MI) const;
56  Register Reg, unsigned SubReg) const;
57  bool instReadsReg(const MachineInstr *MI, unsigned Reg,
58  unsigned SubReg) const;
59  bool instModifiesReg(const MachineInstr *MI, unsigned Reg,
60  unsigned SubReg) const;
61  TargetInstrInfo::RegSubRegPair getSubRegForIndex(Register Reg, unsigned Sub,
62  unsigned I) const;
63  void dropInstructionKeepingImpDefs(MachineInstr &MI) const;
64  MachineInstr *matchSwap(MachineInstr &MovT) const;
65 
66  bool runOnMachineFunction(MachineFunction &MF) override;
67 
68  StringRef getPassName() const override { return "SI Shrink Instructions"; }
69 
70  void getAnalysisUsage(AnalysisUsage &AU) const override {
71  AU.setPreservesCFG();
73  }
74 };
75 
76 } // End anonymous namespace.
77 
78 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
79  "SI Shrink Instructions", false, false)
80 
81 char SIShrinkInstructions::ID = 0;
82 
84  return new SIShrinkInstructions();
85 }
86 
87 /// This function checks \p MI for operands defined by a move immediate
88 /// instruction and then folds the literal constant into the instruction if it
89 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
90 bool SIShrinkInstructions::foldImmediates(MachineInstr &MI,
91  bool TryToCommute) const {
92  assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
93 
94  int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
95 
96  // Try to fold Src0
97  MachineOperand &Src0 = MI.getOperand(Src0Idx);
98  if (Src0.isReg()) {
99  Register Reg = Src0.getReg();
100  if (Reg.isVirtual()) {
102  if (Def && Def->isMoveImmediate()) {
103  MachineOperand &MovSrc = Def->getOperand(1);
104  bool ConstantFolded = false;
105 
106  if (TII->isOperandLegal(MI, Src0Idx, &MovSrc)) {
107  if (MovSrc.isImm() &&
108  (isInt<32>(MovSrc.getImm()) || isUInt<32>(MovSrc.getImm()))) {
109  Src0.ChangeToImmediate(MovSrc.getImm());
110  ConstantFolded = true;
111  } else if (MovSrc.isFI()) {
112  Src0.ChangeToFrameIndex(MovSrc.getIndex());
113  ConstantFolded = true;
114  } else if (MovSrc.isGlobal()) {
115  Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
116  MovSrc.getTargetFlags());
117  ConstantFolded = true;
118  }
119  }
120 
121  if (ConstantFolded) {
122  if (MRI->use_nodbg_empty(Reg))
123  Def->eraseFromParent();
124  ++NumLiteralConstantsFolded;
125  return true;
126  }
127  }
128  }
129  }
130 
131  // We have failed to fold src0, so commute the instruction and try again.
132  if (TryToCommute && MI.isCommutable()) {
133  if (TII->commuteInstruction(MI)) {
134  if (foldImmediates(MI, false))
135  return true;
136 
137  // Commute back.
138  TII->commuteInstruction(MI);
139  }
140  }
141 
142  return false;
143 }
144 
145 /// Do not shrink the instruction if its registers are not expressible in the
146 /// shrunk encoding.
147 bool SIShrinkInstructions::shouldShrinkTrue16(MachineInstr &MI) const {
148  for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
149  const MachineOperand &MO = MI.getOperand(I);
150  if (MO.isReg()) {
151  Register Reg = MO.getReg();
152  assert(!Reg.isVirtual() && "Prior checks should ensure we only shrink "
153  "True16 Instructions post-RA");
154  if (AMDGPU::VGPR_32RegClass.contains(Reg) &&
155  !AMDGPU::VGPR_32_Lo128RegClass.contains(Reg))
156  return false;
157  }
158  }
159  return true;
160 }
161 
162 bool SIShrinkInstructions::isKImmOperand(const MachineOperand &Src) const {
163  return isInt<16>(Src.getImm()) &&
164  !TII->isInlineConstant(*Src.getParent(),
165  Src.getParent()->getOperandNo(&Src));
166 }
167 
168 bool SIShrinkInstructions::isKUImmOperand(const MachineOperand &Src) const {
169  return isUInt<16>(Src.getImm()) &&
170  !TII->isInlineConstant(*Src.getParent(),
171  Src.getParent()->getOperandNo(&Src));
172 }
173 
174 bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src,
175  bool &IsUnsigned) const {
176  if (isInt<16>(Src.getImm())) {
177  IsUnsigned = false;
178  return !TII->isInlineConstant(Src);
179  }
180 
181  if (isUInt<16>(Src.getImm())) {
182  IsUnsigned = true;
183  return !TII->isInlineConstant(Src);
184  }
185 
186  return false;
187 }
188 
189 /// \returns true if the constant in \p Src should be replaced with a bitreverse
190 /// of an inline immediate.
191 bool SIShrinkInstructions::isReverseInlineImm(const MachineOperand &Src,
192  int32_t &ReverseImm) const {
193  if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
194  return false;
195 
196  ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
197  return ReverseImm >= -16 && ReverseImm <= 64;
198 }
199 
200 /// Copy implicit register operands from specified instruction to this
201 /// instruction that are not part of the instruction definition.
202 void SIShrinkInstructions::copyExtraImplicitOps(MachineInstr &NewMI,
203  MachineInstr &MI) const {
204  MachineFunction &MF = *MI.getMF();
205  for (unsigned i = MI.getDesc().getNumOperands() +
206  MI.getDesc().getNumImplicitUses() +
207  MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
208  i != e; ++i) {
209  const MachineOperand &MO = MI.getOperand(i);
210  if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
211  NewMI.addOperand(MF, MO);
212  }
213 }
214 
215 void SIShrinkInstructions::shrinkScalarCompare(MachineInstr &MI) const {
216  // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
217  // get constants on the RHS.
218  if (!MI.getOperand(0).isReg())
219  TII->commuteInstruction(MI, false, 0, 1);
220 
221  // cmpk requires src0 to be a register
222  const MachineOperand &Src0 = MI.getOperand(0);
223  if (!Src0.isReg())
224  return;
225 
226  const MachineOperand &Src1 = MI.getOperand(1);
227  if (!Src1.isImm())
228  return;
229 
230  int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
231  if (SOPKOpc == -1)
232  return;
233 
234  // eq/ne is special because the imm16 can be treated as signed or unsigned,
235  // and initially selected to the unsigned versions.
236  if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
237  bool HasUImm;
238  if (isKImmOrKUImmOperand(Src1, HasUImm)) {
239  if (!HasUImm) {
240  SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
241  AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
242  }
243 
244  MI.setDesc(TII->get(SOPKOpc));
245  }
246 
247  return;
248  }
249 
250  const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
251 
252  if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(Src1)) ||
253  (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(Src1))) {
254  MI.setDesc(NewDesc);
255  }
256 }
257 
258 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
259 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) const {
260  const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
261  if (!Info)
262  return;
263 
264  uint8_t NewEncoding;
265  switch (Info->MIMGEncoding) {
266  case AMDGPU::MIMGEncGfx10NSA:
267  NewEncoding = AMDGPU::MIMGEncGfx10Default;
268  break;
269  case AMDGPU::MIMGEncGfx11NSA:
270  NewEncoding = AMDGPU::MIMGEncGfx11Default;
271  break;
272  default:
273  return;
274  }
275 
276  int VAddr0Idx =
277  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
278  unsigned NewAddrDwords = Info->VAddrDwords;
279  const TargetRegisterClass *RC;
280 
281  if (Info->VAddrDwords == 2) {
282  RC = &AMDGPU::VReg_64RegClass;
283  } else if (Info->VAddrDwords == 3) {
284  RC = &AMDGPU::VReg_96RegClass;
285  } else if (Info->VAddrDwords == 4) {
286  RC = &AMDGPU::VReg_128RegClass;
287  } else if (Info->VAddrDwords == 5) {
288  RC = &AMDGPU::VReg_160RegClass;
289  } else if (Info->VAddrDwords == 6) {
290  RC = &AMDGPU::VReg_192RegClass;
291  } else if (Info->VAddrDwords == 7) {
292  RC = &AMDGPU::VReg_224RegClass;
293  } else if (Info->VAddrDwords == 8) {
294  RC = &AMDGPU::VReg_256RegClass;
295  } else {
296  RC = &AMDGPU::VReg_512RegClass;
297  NewAddrDwords = 16;
298  }
299 
300  unsigned VgprBase = 0;
301  unsigned NextVgpr = 0;
302  bool IsUndef = true;
303  bool IsKill = NewAddrDwords == Info->VAddrDwords;
304  for (unsigned Idx = 0; Idx < Info->VAddrOperands; ++Idx) {
305  const MachineOperand &Op = MI.getOperand(VAddr0Idx + Idx);
306  unsigned Vgpr = TRI->getHWRegIndex(Op.getReg());
307  unsigned Dwords = TRI->getRegSizeInBits(Op.getReg(), *MRI) / 32;
308  assert(Dwords > 0 && "Un-implemented for less than 32 bit regs");
309 
310  if (Idx == 0) {
311  VgprBase = Vgpr;
312  NextVgpr = Vgpr + Dwords;
313  } else if (Vgpr == NextVgpr) {
314  NextVgpr = Vgpr + Dwords;
315  } else {
316  return;
317  }
318 
319  if (!Op.isUndef())
320  IsUndef = false;
321  if (!Op.isKill())
322  IsKill = false;
323  }
324 
325  if (VgprBase + NewAddrDwords > 256)
326  return;
327 
328  // Further check for implicit tied operands - this may be present if TFE is
329  // enabled
330  int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
331  int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
332  unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(TFEIdx).getImm();
333  unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(LWEIdx).getImm();
334  int ToUntie = -1;
335  if (TFEVal || LWEVal) {
336  // TFE/LWE is enabled so we need to deal with an implicit tied operand
337  for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
338  if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
339  MI.getOperand(i).isImplicit()) {
340  // This is the tied operand
341  assert(
342  ToUntie == -1 &&
343  "found more than one tied implicit operand when expecting only 1");
344  ToUntie = i;
345  MI.untieRegOperand(ToUntie);
346  }
347  }
348  }
349 
350  unsigned NewOpcode = AMDGPU::getMIMGOpcode(Info->BaseOpcode, NewEncoding,
351  Info->VDataDwords, NewAddrDwords);
352  MI.setDesc(TII->get(NewOpcode));
353  MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
354  MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
355  MI.getOperand(VAddr0Idx).setIsKill(IsKill);
356 
357  for (int i = 1; i < Info->VAddrOperands; ++i)
358  MI.removeOperand(VAddr0Idx + 1);
359 
360  if (ToUntie >= 0) {
361  MI.tieOperands(
362  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
363  ToUntie - (Info->VAddrOperands - 1));
364  }
365 }
366 
367 // Shrink MAD to MADAK/MADMK and FMA to FMAAK/FMAMK.
368 void SIShrinkInstructions::shrinkMadFma(MachineInstr &MI) const {
369  // Pre-GFX10 VOP3 instructions like MAD/FMA cannot take a literal operand so
370  // there is no reason to try to shrink them.
371  if (!ST->hasVOP3Literal())
372  return;
373 
374  // There is no advantage to doing this pre-RA.
375  if (!MF->getProperties().hasProperty(
377  return;
378 
379  if (TII->hasAnyModifiersSet(MI))
380  return;
381 
382  const unsigned Opcode = MI.getOpcode();
383  MachineOperand &Src0 = *TII->getNamedOperand(MI, AMDGPU::OpName::src0);
384  MachineOperand &Src1 = *TII->getNamedOperand(MI, AMDGPU::OpName::src1);
385  MachineOperand &Src2 = *TII->getNamedOperand(MI, AMDGPU::OpName::src2);
386  unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
387 
388  bool Swap;
389 
390  // Detect "Dst = VSrc * VGPR + Imm" and convert to AK form.
391  if (Src2.isImm() && !TII->isInlineConstant(Src2)) {
392  if (Src1.isReg() && TRI->isVGPR(*MRI, Src1.getReg()))
393  Swap = false;
394  else if (Src0.isReg() && TRI->isVGPR(*MRI, Src0.getReg()))
395  Swap = true;
396  else
397  return;
398 
399  switch (Opcode) {
400  default:
401  llvm_unreachable("Unexpected mad/fma opcode!");
402  case AMDGPU::V_MAD_F32_e64:
403  NewOpcode = AMDGPU::V_MADAK_F32;
404  break;
405  case AMDGPU::V_FMA_F32_e64:
406  NewOpcode = AMDGPU::V_FMAAK_F32;
407  break;
408  case AMDGPU::V_MAD_F16_e64:
409  NewOpcode = AMDGPU::V_MADAK_F16;
410  break;
411  case AMDGPU::V_FMA_F16_e64:
412  case AMDGPU::V_FMA_F16_gfx9_e64:
413  NewOpcode = ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_t16
414  : AMDGPU::V_FMAAK_F16;
415  break;
416  }
417  }
418 
419  // Detect "Dst = VSrc * Imm + VGPR" and convert to MK form.
420  if (Src2.isReg() && TRI->isVGPR(*MRI, Src2.getReg())) {
421  if (Src1.isImm() && !TII->isInlineConstant(Src1))
422  Swap = false;
423  else if (Src0.isImm() && !TII->isInlineConstant(Src0))
424  Swap = true;
425  else
426  return;
427 
428  switch (Opcode) {
429  default:
430  llvm_unreachable("Unexpected mad/fma opcode!");
431  case AMDGPU::V_MAD_F32_e64:
432  NewOpcode = AMDGPU::V_MADMK_F32;
433  break;
434  case AMDGPU::V_FMA_F32_e64:
435  NewOpcode = AMDGPU::V_FMAMK_F32;
436  break;
437  case AMDGPU::V_MAD_F16_e64:
438  NewOpcode = AMDGPU::V_MADMK_F16;
439  break;
440  case AMDGPU::V_FMA_F16_e64:
441  case AMDGPU::V_FMA_F16_gfx9_e64:
442  NewOpcode = ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_t16
443  : AMDGPU::V_FMAMK_F16;
444  break;
445  }
446  }
447 
448  if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
449  return;
450 
451  if (AMDGPU::isTrue16Inst(NewOpcode) && !shouldShrinkTrue16(MI))
452  return;
453 
454  if (Swap) {
455  // Swap Src0 and Src1 by building a new instruction.
456  BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(NewOpcode),
457  MI.getOperand(0).getReg())
458  .add(Src1)
459  .add(Src0)
460  .add(Src2)
461  .setMIFlags(MI.getFlags());
462  MI.eraseFromParent();
463  } else {
464  TII->removeModOperands(MI);
465  MI.setDesc(TII->get(NewOpcode));
466  }
467 }
468 
469 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
470 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
471 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
472 /// XNOR (as a ^ b == ~(a ^ ~b)).
473 /// \returns true if the caller should continue the machine function iterator
474 bool SIShrinkInstructions::shrinkScalarLogicOp(MachineInstr &MI) const {
475  unsigned Opc = MI.getOpcode();
476  const MachineOperand *Dest = &MI.getOperand(0);
477  MachineOperand *Src0 = &MI.getOperand(1);
478  MachineOperand *Src1 = &MI.getOperand(2);
479  MachineOperand *SrcReg = Src0;
480  MachineOperand *SrcImm = Src1;
481 
482  if (!SrcImm->isImm() ||
483  AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST->hasInv2PiInlineImm()))
484  return false;
485 
486  uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
487  uint32_t NewImm = 0;
488 
489  if (Opc == AMDGPU::S_AND_B32) {
490  if (isPowerOf2_32(~Imm)) {
491  NewImm = countTrailingOnes(Imm);
492  Opc = AMDGPU::S_BITSET0_B32;
493  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
494  NewImm = ~Imm;
495  Opc = AMDGPU::S_ANDN2_B32;
496  }
497  } else if (Opc == AMDGPU::S_OR_B32) {
498  if (isPowerOf2_32(Imm)) {
499  NewImm = countTrailingZeros(Imm);
500  Opc = AMDGPU::S_BITSET1_B32;
501  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
502  NewImm = ~Imm;
503  Opc = AMDGPU::S_ORN2_B32;
504  }
505  } else if (Opc == AMDGPU::S_XOR_B32) {
506  if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
507  NewImm = ~Imm;
508  Opc = AMDGPU::S_XNOR_B32;
509  }
510  } else {
511  llvm_unreachable("unexpected opcode");
512  }
513 
514  if (NewImm != 0) {
515  if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
516  MRI->setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
517  MRI->setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
518  return true;
519  }
520 
521  if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
522  const bool IsUndef = SrcReg->isUndef();
523  const bool IsKill = SrcReg->isKill();
524  MI.setDesc(TII->get(Opc));
525  if (Opc == AMDGPU::S_BITSET0_B32 ||
526  Opc == AMDGPU::S_BITSET1_B32) {
527  Src0->ChangeToImmediate(NewImm);
528  // Remove the immediate and add the tied input.
529  MI.getOperand(2).ChangeToRegister(Dest->getReg(), /*IsDef*/ false,
530  /*isImp*/ false, IsKill,
531  /*isDead*/ false, IsUndef);
532  MI.tieOperands(0, 2);
533  } else {
534  SrcImm->setImm(NewImm);
535  }
536  }
537  }
538 
539  return false;
540 }
541 
542 // This is the same as MachineInstr::readsRegister/modifiesRegister except
543 // it takes subregs into account.
544 bool SIShrinkInstructions::instAccessReg(
546  unsigned SubReg) const {
547  for (const MachineOperand &MO : R) {
548  if (!MO.isReg())
549  continue;
550 
551  if (Reg.isPhysical() && MO.getReg().isPhysical()) {
552  if (TRI->regsOverlap(Reg, MO.getReg()))
553  return true;
554  } else if (MO.getReg() == Reg && Reg.isVirtual()) {
557  if (Overlap.any())
558  return true;
559  }
560  }
561  return false;
562 }
563 
564 bool SIShrinkInstructions::instReadsReg(const MachineInstr *MI, unsigned Reg,
565  unsigned SubReg) const {
566  return instAccessReg(MI->uses(), Reg, SubReg);
567 }
568 
569 bool SIShrinkInstructions::instModifiesReg(const MachineInstr *MI, unsigned Reg,
570  unsigned SubReg) const {
571  return instAccessReg(MI->defs(), Reg, SubReg);
572 }
573 
575 SIShrinkInstructions::getSubRegForIndex(Register Reg, unsigned Sub,
576  unsigned I) const {
577  if (TRI->getRegSizeInBits(Reg, *MRI) != 32) {
578  if (Reg.isPhysical()) {
579  Reg = TRI->getSubReg(Reg, TRI->getSubRegFromChannel(I));
580  } else {
581  Sub = TRI->getSubRegFromChannel(I + TRI->getChannelFromSubReg(Sub));
582  }
583  }
584  return TargetInstrInfo::RegSubRegPair(Reg, Sub);
585 }
586 
587 void SIShrinkInstructions::dropInstructionKeepingImpDefs(
588  MachineInstr &MI) const {
589  for (unsigned i = MI.getDesc().getNumOperands() +
590  MI.getDesc().getNumImplicitUses() +
591  MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
592  i != e; ++i) {
593  const MachineOperand &Op = MI.getOperand(i);
594  if (!Op.isDef())
595  continue;
596  BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
597  TII->get(AMDGPU::IMPLICIT_DEF), Op.getReg());
598  }
599 
600  MI.eraseFromParent();
601 }
602 
603 // Match:
604 // mov t, x
605 // mov x, y
606 // mov y, t
607 //
608 // =>
609 //
610 // mov t, x (t is potentially dead and move eliminated)
611 // v_swap_b32 x, y
612 //
613 // Returns next valid instruction pointer if was able to create v_swap_b32.
614 //
615 // This shall not be done too early not to prevent possible folding which may
616 // remove matched moves, and this should preferably be done before RA to
617 // release saved registers and also possibly after RA which can insert copies
618 // too.
619 //
620 // This is really just a generic peephole that is not a canonical shrinking,
621 // although requirements match the pass placement and it reduces code size too.
622 MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
623  assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
624  MovT.getOpcode() == AMDGPU::COPY);
625 
626  Register T = MovT.getOperand(0).getReg();
627  unsigned Tsub = MovT.getOperand(0).getSubReg();
628  MachineOperand &Xop = MovT.getOperand(1);
629 
630  if (!Xop.isReg())
631  return nullptr;
632  Register X = Xop.getReg();
633  unsigned Xsub = Xop.getSubReg();
634 
635  unsigned Size = TII->getOpSize(MovT, 0) / 4;
636 
637  if (!TRI->isVGPR(*MRI, X))
638  return nullptr;
639 
641  return nullptr;
642 
643  const unsigned SearchLimit = 16;
644  unsigned Count = 0;
645  bool KilledT = false;
646  for (auto Iter = std::next(MovT.getIterator()),
647  E = MovT.getParent()->instr_end();
648  Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
649 
650  MachineInstr *MovY = &*Iter;
651  KilledT = MovY->killsRegister(T, TRI);
652 
653  if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
654  MovY->getOpcode() != AMDGPU::COPY) ||
655  !MovY->getOperand(1).isReg() ||
656  MovY->getOperand(1).getReg() != T ||
657  MovY->getOperand(1).getSubReg() != Tsub ||
659  continue;
660 
661  Register Y = MovY->getOperand(0).getReg();
662  unsigned Ysub = MovY->getOperand(0).getSubReg();
663 
664  if (!TRI->isVGPR(*MRI, Y))
665  continue;
666 
667  MachineInstr *MovX = nullptr;
668  for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
669  I != IY; ++I) {
670  if (instReadsReg(&*I, X, Xsub) || instModifiesReg(&*I, Y, Ysub) ||
671  instModifiesReg(&*I, T, Tsub) ||
672  (MovX && instModifiesReg(&*I, X, Xsub))) {
673  MovX = nullptr;
674  break;
675  }
676  if (!instReadsReg(&*I, Y, Ysub)) {
677  if (!MovX && instModifiesReg(&*I, X, Xsub)) {
678  MovX = nullptr;
679  break;
680  }
681  continue;
682  }
683  if (MovX ||
684  (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
685  I->getOpcode() != AMDGPU::COPY) ||
686  I->getOperand(0).getReg() != X ||
687  I->getOperand(0).getSubReg() != Xsub) {
688  MovX = nullptr;
689  break;
690  }
691  // Implicit use of M0 is an indirect move.
692  if (I->hasRegisterImplicitUseOperand(AMDGPU::M0))
693  continue;
694 
695  if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
696  continue;
697 
698  MovX = &*I;
699  }
700 
701  if (!MovX)
702  continue;
703 
704  LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
705 
706  for (unsigned I = 0; I < Size; ++I) {
708  X1 = getSubRegForIndex(X, Xsub, I);
709  Y1 = getSubRegForIndex(Y, Ysub, I);
710  MachineBasicBlock &MBB = *MovT.getParent();
711  auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
712  TII->get(AMDGPU::V_SWAP_B32))
713  .addDef(X1.Reg, 0, X1.SubReg)
714  .addDef(Y1.Reg, 0, Y1.SubReg)
715  .addReg(Y1.Reg, 0, Y1.SubReg)
716  .addReg(X1.Reg, 0, X1.SubReg).getInstr();
717  if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
718  // Drop implicit EXEC.
719  MIB->removeOperand(MIB->getNumExplicitOperands());
720  MIB->copyImplicitOps(*MBB.getParent(), *MovX);
721  }
722  }
723  MovX->eraseFromParent();
724  dropInstructionKeepingImpDefs(*MovY);
725  MachineInstr *Next = &*std::next(MovT.getIterator());
726 
727  if (T.isVirtual() && MRI->use_nodbg_empty(T)) {
728  dropInstructionKeepingImpDefs(MovT);
729  } else {
730  Xop.setIsKill(false);
731  for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
732  unsigned OpNo = MovT.getNumExplicitOperands() + I;
733  const MachineOperand &Op = MovT.getOperand(OpNo);
734  if (Op.isKill() && TRI->regsOverlap(X, Op.getReg()))
735  MovT.removeOperand(OpNo);
736  }
737  }
738 
739  return Next;
740  }
741 
742  return nullptr;
743 }
744 
745 // If an instruction has dead sdst replace it with NULL register on gfx1030+
746 bool SIShrinkInstructions::tryReplaceDeadSDST(MachineInstr &MI) const {
747  if (!ST->hasGFX10_3Insts())
748  return false;
749 
750  MachineOperand *Op = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
751  if (!Op)
752  return false;
753  Register SDstReg = Op->getReg();
754  if (SDstReg.isPhysical() || !MRI->use_nodbg_empty(SDstReg))
755  return false;
756 
757  Op->setReg(ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
758  return true;
759 }
760 
761 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
762  if (skipFunction(MF.getFunction()))
763  return false;
764 
765  this->MF = &MF;
766  MRI = &MF.getRegInfo();
767  ST = &MF.getSubtarget<GCNSubtarget>();
768  TII = ST->getInstrInfo();
769  TRI = &TII->getRegisterInfo();
770 
771  unsigned VCCReg = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
772 
773  std::vector<unsigned> I1Defs;
774 
775  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
776  BI != BE; ++BI) {
777 
778  MachineBasicBlock &MBB = *BI;
780  for (I = MBB.begin(); I != MBB.end(); I = Next) {
781  Next = std::next(I);
782  MachineInstr &MI = *I;
783 
784  if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
785  // If this has a literal constant source that is the same as the
786  // reversed bits of an inline immediate, replace with a bitreverse of
787  // that constant. This saves 4 bytes in the common case of materializing
788  // sign bits.
789 
790  // Test if we are after regalloc. We only want to do this after any
791  // optimizations happen because this will confuse them.
792  // XXX - not exactly a check for post-regalloc run.
793  MachineOperand &Src = MI.getOperand(1);
794  if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
795  int32_t ReverseImm;
796  if (isReverseInlineImm(Src, ReverseImm)) {
797  MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
798  Src.setImm(ReverseImm);
799  continue;
800  }
801  }
802  }
803 
804  if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
805  MI.getOpcode() == AMDGPU::COPY)) {
806  if (auto *NextMI = matchSwap(MI)) {
807  Next = NextMI->getIterator();
808  continue;
809  }
810  }
811 
812  // Try to use S_ADDK_I32 and S_MULK_I32.
813  if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
814  MI.getOpcode() == AMDGPU::S_MUL_I32) {
815  const MachineOperand *Dest = &MI.getOperand(0);
816  MachineOperand *Src0 = &MI.getOperand(1);
817  MachineOperand *Src1 = &MI.getOperand(2);
818 
819  if (!Src0->isReg() && Src1->isReg()) {
820  if (TII->commuteInstruction(MI, false, 1, 2))
821  std::swap(Src0, Src1);
822  }
823 
824  // FIXME: This could work better if hints worked with subregisters. If
825  // we have a vector add of a constant, we usually don't get the correct
826  // allocation due to the subregister usage.
827  if (Dest->getReg().isVirtual() && Src0->isReg()) {
828  MRI->setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
829  MRI->setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
830  continue;
831  }
832 
833  if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
834  if (Src1->isImm() && isKImmOperand(*Src1)) {
835  unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
836  AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
837 
838  MI.setDesc(TII->get(Opc));
839  MI.tieOperands(0, 1);
840  }
841  }
842  }
843 
844  // Try to use s_cmpk_*
845  if (MI.isCompare() && TII->isSOPC(MI)) {
846  shrinkScalarCompare(MI);
847  continue;
848  }
849 
850  // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
851  if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
852  const MachineOperand &Dst = MI.getOperand(0);
853  MachineOperand &Src = MI.getOperand(1);
854 
855  if (Src.isImm() && Dst.getReg().isPhysical()) {
856  int32_t ReverseImm;
857  if (isKImmOperand(Src))
858  MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
859  else if (isReverseInlineImm(Src, ReverseImm)) {
860  MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
861  Src.setImm(ReverseImm);
862  }
863  }
864 
865  continue;
866  }
867 
868  // Shrink scalar logic operations.
869  if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
870  MI.getOpcode() == AMDGPU::S_OR_B32 ||
871  MI.getOpcode() == AMDGPU::S_XOR_B32) {
872  if (shrinkScalarLogicOp(MI))
873  continue;
874  }
875 
876  if (TII->isMIMG(MI.getOpcode()) &&
877  ST->getGeneration() >= AMDGPUSubtarget::GFX10 &&
880  shrinkMIMG(MI);
881  continue;
882  }
883 
884  if (!TII->isVOP3(MI))
885  continue;
886 
887  if (MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
888  MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
889  MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
890  MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
891  MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64) {
892  shrinkMadFma(MI);
893  continue;
894  }
895 
896  if (!TII->hasVALU32BitEncoding(MI.getOpcode())) {
897  // If there is no chance we will shrink it and use VCC as sdst to get
898  // a 32 bit form try to replace dead sdst with NULL.
899  tryReplaceDeadSDST(MI);
900  continue;
901  }
902 
903  if (!TII->canShrink(MI, *MRI)) {
904  // Try commuting the instruction and see if that enables us to shrink
905  // it.
906  if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
907  !TII->canShrink(MI, *MRI)) {
908  tryReplaceDeadSDST(MI);
909  continue;
910  }
911  }
912 
913  int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
914 
915  if (TII->isVOPC(Op32)) {
916  MachineOperand &Op0 = MI.getOperand(0);
917  if (Op0.isReg()) {
918  // Exclude VOPCX instructions as these don't explicitly write a
919  // dst.
920  Register DstReg = Op0.getReg();
921  if (DstReg.isVirtual()) {
922  // VOPC instructions can only write to the VCC register. We can't
923  // force them to use VCC here, because this is only one register and
924  // cannot deal with sequences which would require multiple copies of
925  // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
926  //
927  // So, instead of forcing the instruction to write to VCC, we
928  // provide a hint to the register allocator to use VCC and then we
929  // will run this pass again after RA and shrink it if it outputs to
930  // VCC.
931  MRI->setRegAllocationHint(DstReg, 0, VCCReg);
932  continue;
933  }
934  if (DstReg != VCCReg)
935  continue;
936  }
937  }
938 
939  if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
940  // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
941  // instructions.
942  const MachineOperand *Src2 =
943  TII->getNamedOperand(MI, AMDGPU::OpName::src2);
944  if (!Src2->isReg())
945  continue;
946  Register SReg = Src2->getReg();
947  if (SReg.isVirtual()) {
948  MRI->setRegAllocationHint(SReg, 0, VCCReg);
949  continue;
950  }
951  if (SReg != VCCReg)
952  continue;
953  }
954 
955  // Check for the bool flag output for instructions like V_ADD_I32_e64.
956  const MachineOperand *SDst = TII->getNamedOperand(MI,
957  AMDGPU::OpName::sdst);
958 
959  if (SDst) {
960  bool Next = false;
961 
962  if (SDst->getReg() != VCCReg) {
963  if (SDst->getReg().isVirtual())
964  MRI->setRegAllocationHint(SDst->getReg(), 0, VCCReg);
965  Next = true;
966  }
967 
968  // All of the instructions with carry outs also have an SGPR input in
969  // src2.
970  const MachineOperand *Src2 = TII->getNamedOperand(MI,
971  AMDGPU::OpName::src2);
972  if (Src2 && Src2->getReg() != VCCReg) {
973  if (Src2->getReg().isVirtual())
974  MRI->setRegAllocationHint(Src2->getReg(), 0, VCCReg);
975  Next = true;
976  }
977 
978  if (Next)
979  continue;
980  }
981 
982  // Pre-GFX10, shrinking VOP3 instructions pre-RA gave us the chance to
983  // fold an immediate into the shrunk instruction as a literal operand. In
984  // GFX10 VOP3 instructions can take a literal operand anyway, so there is
985  // no advantage to doing this.
986  if (ST->hasVOP3Literal() &&
989  continue;
990 
991  if (ST->hasTrue16BitInsts() && AMDGPU::isTrue16Inst(MI.getOpcode()) &&
992  !shouldShrinkTrue16(MI))
993  continue;
994 
995  // We can shrink this instruction
996  LLVM_DEBUG(dbgs() << "Shrinking " << MI);
997 
998  MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
999  ++NumInstructionsShrunk;
1000 
1001  // Copy extra operands not present in the instruction definition.
1002  copyExtraImplicitOps(*Inst32, MI);
1003 
1004  // Copy deadness from the old explicit vcc def to the new implicit def.
1005  if (SDst && SDst->isDead())
1006  Inst32->findRegisterDefOperand(VCCReg)->setIsDead();
1007 
1008  MI.eraseFromParent();
1009  foldImmediates(*Inst32);
1010 
1011  LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
1012  }
1013  }
1014  return false;
1015 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:77
llvm::LaneBitmask
Definition: LaneBitmask.h:40
i
i
Definition: README.txt:29
llvm::MachineFunctionProperties::hasProperty
bool hasProperty(Property P) const
Definition: MachineFunction.h:192
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:572
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
T
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:713
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
Statistic.h
llvm::MachineFunction::end
iterator end()
Definition: MachineFunction.h:855
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::MachineRegisterInfo::getUniqueVRegDef
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
Definition: MachineRegisterInfo.cpp:407
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::AMDGPU::getSOPKOp
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
llvm::MachineOperand::setImm
void setImm(int64_t immVal)
Definition: MachineOperand.h:664
llvm::MachineInstr::hasRegisterImplicitUseOperand
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
Definition: MachineInstr.cpp:967
llvm::GCNSubtarget
Definition: GCNSubtarget.h:31
llvm::MachineOperand::ChangeToFrameIndex
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
Definition: MachineOperand.cpp:214
llvm::AMDGPU::getNamedOperandIdx
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:458
llvm::MachineOperand::isFI
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
Definition: MachineOperand.h:330
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:609
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::MachineFunctionPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Definition: MachineFunctionPass.cpp:167
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::AMDGPU::isTrue16Inst
bool isTrue16Inst(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:436
llvm::TargetInstrInfo::RegSubRegPair
A pair composed of a register and a sub-register index.
Definition: TargetInstrInfo.h:494
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::MachineOperand::isKill
bool isKill() const
Definition: MachineOperand.h:389
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::Register::isPhysical
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:97
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:379
llvm::TargetRegisterInfo::getSubRegIndexLaneMask
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx.
Definition: TargetRegisterInfo.h:381
GCNSubtarget.h
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
INITIALIZE_PASS
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:37
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MachineFunction::getProperties
const MachineFunctionProperties & getProperties() const
Get the function properties.
Definition: MachineFunction.h:747
llvm::AMDGPU::getVOPe32
LLVM_READONLY int getVOPe32(uint16_t Opcode)
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand::ChangeToImmediate
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Definition: MachineOperand.cpp:156
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::M0
unsigned M0(unsigned Val)
Definition: VE.h:369
llvm::MachineFunction::begin
iterator begin()
Definition: MachineFunction.h:853
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::SIRegisterInfo
Definition: SIRegisterInfo.h:30
llvm::AMDGPU::getMIMGOpcode
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
Definition: AMDGPUBaseInfo.cpp:194
llvm::MachineInstr::killsRegister
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
Definition: MachineInstr.h:1411
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineFunctionProperties::Property::NoVRegs
@ NoVRegs
llvm::AMDGPUSubtarget::GFX10
@ GFX10
Definition: AMDGPUSubtarget.h:41
llvm::Register::isVirtual
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::MachineInstr::getDebugLoc
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:445
llvm::MachineOperand::isUndef
bool isUndef() const
Definition: MachineOperand.h:394
AMDGPUMCTargetDesc.h
llvm::MachineOperand::setIsDead
void setIsDead(bool Val=true)
Definition: MachineOperand.h:515
llvm::TargetRegisterInfo::regsOverlap
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Definition: TargetRegisterInfo.h:422
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:220
llvm::TargetInstrInfo::RegSubRegPair::Reg
Register Reg
Definition: TargetInstrInfo.h:495
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::MachineInstr::getNumImplicitOperands
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
Definition: MachineInstr.h:607
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:384
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::LaneBitmask::any
constexpr bool any() const
Definition: LaneBitmask.h:53
llvm::countTrailingOnes
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
Definition: MathExtras.h:491
MachineFunctionPass.h
RegSubRegPair
TargetInstrInfo::RegSubRegPair RegSubRegPair
Definition: PeepholeOptimizer.cpp:100
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:261
llvm::MachineOperand::isRegMask
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
Definition: MachineOperand.h:344
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::AMDGPU::getMIMGInfo
const LLVM_READONLY MIMGInfo * getMIMGInfo(unsigned Opc)
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineBasicBlock::instr_end
instr_iterator instr_end()
Definition: MachineBasicBlock.h:291
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::MachineRegisterInfo::use_nodbg_empty
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Definition: MachineRegisterInfo.h:574
llvm::createSIShrinkInstructionsPass
FunctionPass * createSIShrinkInstructionsPass()
llvm::TargetInstrInfo::RegSubRegPair::SubReg
unsigned SubReg
Definition: TargetInstrInfo.h:496
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:152
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
AMDGPU.h
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::MachineRegisterInfo::setRegAllocationHint
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register.
Definition: MachineRegisterInfo.h:778
uint32_t
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:89
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:364
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::MachineOperand::ChangeToGA
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
Definition: MachineOperand.cpp:190
STATISTIC
STATISTIC(NumInstructionsShrunk, "Number of 64-bit instruction reduced to 32-bit.")
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:622
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:280
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::TargetRegisterClass::getRegister
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
Definition: TargetRegisterInfo.h:91
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:566
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineInstr::removeOperand
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:276
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::SIInstrInfo
Definition: SIInstrInfo.h:44
llvm::AMDGPU::isInlinableLiteral32
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2121
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:305
llvm::MachineInstrBuilder::setMIFlags
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Definition: MachineInstrBuilder.h:273
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:184
llvm::iterator_range
A range adaptor for a pair of iterators.
Definition: iterator_range.h:30
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1134
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
DEBUG_TYPE
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
Definition: SIShrinkInstructions.cpp:18
llvm::MachineInstr::findRegisterDefOperand
MachineOperand * findRegisterDefOperand(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
Definition: MachineInstr.h:1479
llvm::MachineInstr::eraseFromParent
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Definition: MachineInstr.cpp:684
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:307
SubReg
unsigned SubReg
Definition: AArch64AdvSIMDScalarPass.cpp:104
AMDGPUBaseInfo.h
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:338
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38