LLVM  10.0.0svn
SIShrinkInstructions.cpp
Go to the documentation of this file.
1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
15 #include "llvm/ADT/Statistic.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/Support/Debug.h"
25 
26 #define DEBUG_TYPE "si-shrink-instructions"
27 
28 STATISTIC(NumInstructionsShrunk,
29  "Number of 64-bit instruction reduced to 32-bit.");
30 STATISTIC(NumLiteralConstantsFolded,
31  "Number of literal constants folded into 32-bit instructions.");
32 
33 using namespace llvm;
34 
35 namespace {
36 
37 class SIShrinkInstructions : public MachineFunctionPass {
38 public:
39  static char ID;
40 
41  void shrinkMIMG(MachineInstr &MI);
42 
43 public:
44  SIShrinkInstructions() : MachineFunctionPass(ID) {
45  }
46 
47  bool runOnMachineFunction(MachineFunction &MF) override;
48 
49  StringRef getPassName() const override { return "SI Shrink Instructions"; }
50 
51  void getAnalysisUsage(AnalysisUsage &AU) const override {
52  AU.setPreservesCFG();
54  }
55 };
56 
57 } // End anonymous namespace.
58 
59 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
60  "SI Shrink Instructions", false, false)
61 
62 char SIShrinkInstructions::ID = 0;
63 
65  return new SIShrinkInstructions();
66 }
67 
68 /// This function checks \p MI for operands defined by a move immediate
69 /// instruction and then folds the literal constant into the instruction if it
70 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
72  MachineRegisterInfo &MRI, bool TryToCommute = true) {
73  assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
74 
75  int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
76 
77  // Try to fold Src0
78  MachineOperand &Src0 = MI.getOperand(Src0Idx);
79  if (Src0.isReg()) {
80  Register Reg = Src0.getReg();
81  if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
82  MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
83  if (Def && Def->isMoveImmediate()) {
84  MachineOperand &MovSrc = Def->getOperand(1);
85  bool ConstantFolded = false;
86 
87  if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
88  isUInt<32>(MovSrc.getImm()))) {
89  // It's possible to have only one component of a super-reg defined by
90  // a single mov, so we need to clear any subregister flag.
91  Src0.setSubReg(0);
92  Src0.ChangeToImmediate(MovSrc.getImm());
93  ConstantFolded = true;
94  } else if (MovSrc.isFI()) {
95  Src0.setSubReg(0);
96  Src0.ChangeToFrameIndex(MovSrc.getIndex());
97  ConstantFolded = true;
98  } else if (MovSrc.isGlobal()) {
99  Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
100  MovSrc.getTargetFlags());
101  ConstantFolded = true;
102  }
103 
104  if (ConstantFolded) {
105  assert(MRI.use_empty(Reg));
106  Def->eraseFromParent();
107  ++NumLiteralConstantsFolded;
108  return true;
109  }
110  }
111  }
112  }
113 
114  // We have failed to fold src0, so commute the instruction and try again.
115  if (TryToCommute && MI.isCommutable()) {
116  if (TII->commuteInstruction(MI)) {
117  if (foldImmediates(MI, TII, MRI, false))
118  return true;
119 
120  // Commute back.
121  TII->commuteInstruction(MI);
122  }
123  }
124 
125  return false;
126 }
127 
128 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
129  return isInt<16>(Src.getImm()) &&
130  !TII->isInlineConstant(*Src.getParent(),
131  Src.getParent()->getOperandNo(&Src));
132 }
133 
134 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
135  return isUInt<16>(Src.getImm()) &&
136  !TII->isInlineConstant(*Src.getParent(),
137  Src.getParent()->getOperandNo(&Src));
138 }
139 
141  const MachineOperand &Src,
142  bool &IsUnsigned) {
143  if (isInt<16>(Src.getImm())) {
144  IsUnsigned = false;
145  return !TII->isInlineConstant(Src);
146  }
147 
148  if (isUInt<16>(Src.getImm())) {
149  IsUnsigned = true;
150  return !TII->isInlineConstant(Src);
151  }
152 
153  return false;
154 }
155 
156 /// \returns true if the constant in \p Src should be replaced with a bitreverse
157 /// of an inline immediate.
158 static bool isReverseInlineImm(const SIInstrInfo *TII,
159  const MachineOperand &Src,
160  int32_t &ReverseImm) {
161  if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
162  return false;
163 
164  ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
165  return ReverseImm >= -16 && ReverseImm <= 64;
166 }
167 
168 /// Copy implicit register operands from specified instruction to this
169 /// instruction that are not part of the instruction definition.
171  const MachineInstr &MI) {
172  for (unsigned i = MI.getDesc().getNumOperands() +
173  MI.getDesc().getNumImplicitUses() +
175  i != e; ++i) {
176  const MachineOperand &MO = MI.getOperand(i);
177  if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
178  NewMI.addOperand(MF, MO);
179  }
180 }
181 
183  // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
184  // get constants on the RHS.
185  if (!MI.getOperand(0).isReg())
186  TII->commuteInstruction(MI, false, 0, 1);
187 
188  const MachineOperand &Src1 = MI.getOperand(1);
189  if (!Src1.isImm())
190  return;
191 
192  int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
193  if (SOPKOpc == -1)
194  return;
195 
196  // eq/ne is special because the imm16 can be treated as signed or unsigned,
197  // and initially selectd to the unsigned versions.
198  if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
199  bool HasUImm;
200  if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
201  if (!HasUImm) {
202  SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
203  AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
204  }
205 
206  MI.setDesc(TII->get(SOPKOpc));
207  }
208 
209  return;
210  }
211 
212  const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
213 
214  if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
215  (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
216  MI.setDesc(NewDesc);
217  }
218 }
219 
220 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
221 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
222  const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
223  if (Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA)
224  return;
225 
226  MachineFunction *MF = MI.getParent()->getParent();
227  const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
228  const SIInstrInfo *TII = ST.getInstrInfo();
229  const SIRegisterInfo &TRI = TII->getRegisterInfo();
230  int VAddr0Idx =
231  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
232  unsigned NewAddrDwords = Info->VAddrDwords;
233  const TargetRegisterClass *RC;
234 
235  if (Info->VAddrDwords == 2) {
236  RC = &AMDGPU::VReg_64RegClass;
237  } else if (Info->VAddrDwords == 3) {
238  RC = &AMDGPU::VReg_96RegClass;
239  } else if (Info->VAddrDwords == 4) {
240  RC = &AMDGPU::VReg_128RegClass;
241  } else if (Info->VAddrDwords <= 8) {
242  RC = &AMDGPU::VReg_256RegClass;
243  NewAddrDwords = 8;
244  } else {
245  RC = &AMDGPU::VReg_512RegClass;
246  NewAddrDwords = 16;
247  }
248 
249  unsigned VgprBase = 0;
250  bool IsUndef = true;
251  bool IsKill = NewAddrDwords == Info->VAddrDwords;
252  for (unsigned i = 0; i < Info->VAddrDwords; ++i) {
253  const MachineOperand &Op = MI.getOperand(VAddr0Idx + i);
254  unsigned Vgpr = TRI.getHWRegIndex(Op.getReg());
255 
256  if (i == 0) {
257  VgprBase = Vgpr;
258  } else if (VgprBase + i != Vgpr)
259  return;
260 
261  if (!Op.isUndef())
262  IsUndef = false;
263  if (!Op.isKill())
264  IsKill = false;
265  }
266 
267  if (VgprBase + NewAddrDwords > 256)
268  return;
269 
270  // Further check for implicit tied operands - this may be present if TFE is
271  // enabled
272  int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
273  int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
274  unsigned TFEVal = MI.getOperand(TFEIdx).getImm();
275  unsigned LWEVal = MI.getOperand(LWEIdx).getImm();
276  int ToUntie = -1;
277  if (TFEVal || LWEVal) {
278  // TFE/LWE is enabled so we need to deal with an implicit tied operand
279  for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
280  if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
281  MI.getOperand(i).isImplicit()) {
282  // This is the tied operand
283  assert(
284  ToUntie == -1 &&
285  "found more than one tied implicit operand when expecting only 1");
286  ToUntie = i;
287  MI.untieRegOperand(ToUntie);
288  }
289  }
290  }
291 
292  unsigned NewOpcode =
293  AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default,
294  Info->VDataDwords, NewAddrDwords);
295  MI.setDesc(TII->get(NewOpcode));
296  MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
297  MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
298  MI.getOperand(VAddr0Idx).setIsKill(IsKill);
299 
300  for (unsigned i = 1; i < Info->VAddrDwords; ++i)
301  MI.RemoveOperand(VAddr0Idx + 1);
302 
303  if (ToUntie >= 0) {
304  MI.tieOperands(
305  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
306  ToUntie - (Info->VAddrDwords - 1));
307  }
308 }
309 
310 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
311 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
312 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
313 /// XNOR (as a ^ b == ~(a ^ ~b)).
314 /// \returns true if the caller should continue the machine function iterator
317  const SIInstrInfo *TII,
318  MachineInstr &MI) {
319  unsigned Opc = MI.getOpcode();
320  const MachineOperand *Dest = &MI.getOperand(0);
321  MachineOperand *Src0 = &MI.getOperand(1);
322  MachineOperand *Src1 = &MI.getOperand(2);
323  MachineOperand *SrcReg = Src0;
324  MachineOperand *SrcImm = Src1;
325 
326  if (SrcImm->isImm() &&
328  uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
329  uint32_t NewImm = 0;
330 
331  if (Opc == AMDGPU::S_AND_B32) {
332  if (isPowerOf2_32(~Imm)) {
333  NewImm = countTrailingOnes(Imm);
334  Opc = AMDGPU::S_BITSET0_B32;
335  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
336  NewImm = ~Imm;
337  Opc = AMDGPU::S_ANDN2_B32;
338  }
339  } else if (Opc == AMDGPU::S_OR_B32) {
340  if (isPowerOf2_32(Imm)) {
341  NewImm = countTrailingZeros(Imm);
342  Opc = AMDGPU::S_BITSET1_B32;
343  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
344  NewImm = ~Imm;
345  Opc = AMDGPU::S_ORN2_B32;
346  }
347  } else if (Opc == AMDGPU::S_XOR_B32) {
349  NewImm = ~Imm;
350  Opc = AMDGPU::S_XNOR_B32;
351  }
352  } else {
353  llvm_unreachable("unexpected opcode");
354  }
355 
356  if ((Opc == AMDGPU::S_ANDN2_B32 || Opc == AMDGPU::S_ORN2_B32) &&
357  SrcImm == Src0) {
358  if (!TII->commuteInstruction(MI, false, 1, 2))
359  NewImm = 0;
360  }
361 
362  if (NewImm != 0) {
363  if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
364  MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
365  MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
366  return true;
367  }
368 
369  if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
370  MI.setDesc(TII->get(Opc));
371  if (Opc == AMDGPU::S_BITSET0_B32 ||
372  Opc == AMDGPU::S_BITSET1_B32) {
373  Src0->ChangeToImmediate(NewImm);
374  // Remove the immediate and add the tied input.
375  MI.getOperand(2).ChangeToRegister(Dest->getReg(), false);
376  MI.tieOperands(0, 2);
377  } else {
378  SrcImm->setImm(NewImm);
379  }
380  }
381  }
382  }
383 
384  return false;
385 }
386 
387 // This is the same as MachineInstr::readsRegister/modifiesRegister except
388 // it takes subregs into account.
390  unsigned Reg, unsigned SubReg,
391  const SIRegisterInfo &TRI) {
392  for (const MachineOperand &MO : R) {
393  if (!MO.isReg())
394  continue;
395 
396  if (Register::isPhysicalRegister(Reg) &&
397  Register::isPhysicalRegister(MO.getReg())) {
398  if (TRI.regsOverlap(Reg, MO.getReg()))
399  return true;
400  } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
401  LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
402  TRI.getSubRegIndexLaneMask(MO.getSubReg());
403  if (Overlap.any())
404  return true;
405  }
406  }
407  return false;
408 }
409 
410 static bool instReadsReg(const MachineInstr *MI,
411  unsigned Reg, unsigned SubReg,
412  const SIRegisterInfo &TRI) {
413  return instAccessReg(MI->uses(), Reg, SubReg, TRI);
414 }
415 
416 static bool instModifiesReg(const MachineInstr *MI,
417  unsigned Reg, unsigned SubReg,
418  const SIRegisterInfo &TRI) {
419  return instAccessReg(MI->defs(), Reg, SubReg, TRI);
420 }
421 
423 getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
424  const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
425  if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
426  if (Register::isPhysicalRegister(Reg)) {
427  Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
428  } else {
429  LaneBitmask LM = TRI.getSubRegIndexLaneMask(Sub);
431  }
432  }
433  return TargetInstrInfo::RegSubRegPair(Reg, Sub);
434 }
435 
436 // Match:
437 // mov t, x
438 // mov x, y
439 // mov y, t
440 //
441 // =>
442 //
443 // mov t, x (t is potentially dead and move eliminated)
444 // v_swap_b32 x, y
445 //
446 // Returns next valid instruction pointer if was able to create v_swap_b32.
447 //
448 // This shall not be done too early not to prevent possible folding which may
449 // remove matched moves, and this should prefereably be done before RA to
450 // release saved registers and also possibly after RA which can insert copies
451 // too.
452 //
453 // This is really just a generic peephole that is not a canocical shrinking,
454 // although requirements match the pass placement and it reduces code size too.
456  const SIInstrInfo *TII) {
457  assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
458  MovT.getOpcode() == AMDGPU::COPY);
459 
460  Register T = MovT.getOperand(0).getReg();
461  unsigned Tsub = MovT.getOperand(0).getSubReg();
462  MachineOperand &Xop = MovT.getOperand(1);
463 
464  if (!Xop.isReg())
465  return nullptr;
466  Register X = Xop.getReg();
467  unsigned Xsub = Xop.getSubReg();
468 
469  unsigned Size = TII->getOpSize(MovT, 0) / 4;
470 
471  const SIRegisterInfo &TRI = TII->getRegisterInfo();
472  if (!TRI.isVGPR(MRI, X))
473  return nullptr;
474 
475  for (MachineOperand &YTop : MRI.use_nodbg_operands(T)) {
476  if (YTop.getSubReg() != Tsub)
477  continue;
478 
479  MachineInstr &MovY = *YTop.getParent();
480  if ((MovY.getOpcode() != AMDGPU::V_MOV_B32_e32 &&
481  MovY.getOpcode() != AMDGPU::COPY) ||
482  MovY.getOperand(1).getSubReg() != Tsub)
483  continue;
484 
485  Register Y = MovY.getOperand(0).getReg();
486  unsigned Ysub = MovY.getOperand(0).getSubReg();
487 
488  if (!TRI.isVGPR(MRI, Y) || MovT.getParent() != MovY.getParent())
489  continue;
490 
491  MachineInstr *MovX = nullptr;
492  auto I = std::next(MovT.getIterator()), E = MovT.getParent()->instr_end();
493  for (auto IY = MovY.getIterator(); I != E && I != IY; ++I) {
494  if (instReadsReg(&*I, X, Xsub, TRI) ||
495  instModifiesReg(&*I, Y, Ysub, TRI) ||
496  instModifiesReg(&*I, T, Tsub, TRI) ||
497  (MovX && instModifiesReg(&*I, X, Xsub, TRI))) {
498  MovX = nullptr;
499  break;
500  }
501  if (!instReadsReg(&*I, Y, Ysub, TRI)) {
502  if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) {
503  MovX = nullptr;
504  break;
505  }
506  continue;
507  }
508  if (MovX ||
509  (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
510  I->getOpcode() != AMDGPU::COPY) ||
511  I->getOperand(0).getReg() != X ||
512  I->getOperand(0).getSubReg() != Xsub) {
513  MovX = nullptr;
514  break;
515  }
516  MovX = &*I;
517  }
518 
519  if (!MovX || I == E)
520  continue;
521 
522  LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << MovY);
523 
524  for (unsigned I = 0; I < Size; ++I) {
526  X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
527  Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
528  BuildMI(*MovT.getParent(), MovX->getIterator(), MovT.getDebugLoc(),
529  TII->get(AMDGPU::V_SWAP_B32))
530  .addDef(X1.Reg, 0, X1.SubReg)
531  .addDef(Y1.Reg, 0, Y1.SubReg)
532  .addReg(Y1.Reg, 0, Y1.SubReg)
533  .addReg(X1.Reg, 0, X1.SubReg).getInstr();
534  }
535  MovX->eraseFromParent();
536  MovY.eraseFromParent();
537  MachineInstr *Next = &*std::next(MovT.getIterator());
538  if (MRI.use_nodbg_empty(T))
539  MovT.eraseFromParent();
540  else
541  Xop.setIsKill(false);
542 
543  return Next;
544  }
545 
546  return nullptr;
547 }
548 
549 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
550  if (skipFunction(MF.getFunction()))
551  return false;
552 
554  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
555  const SIInstrInfo *TII = ST.getInstrInfo();
556  unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
557 
558  std::vector<unsigned> I1Defs;
559 
560  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
561  BI != BE; ++BI) {
562 
563  MachineBasicBlock &MBB = *BI;
565  for (I = MBB.begin(); I != MBB.end(); I = Next) {
566  Next = std::next(I);
567  MachineInstr &MI = *I;
568 
569  if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
570  // If this has a literal constant source that is the same as the
571  // reversed bits of an inline immediate, replace with a bitreverse of
572  // that constant. This saves 4 bytes in the common case of materializing
573  // sign bits.
574 
575  // Test if we are after regalloc. We only want to do this after any
576  // optimizations happen because this will confuse them.
577  // XXX - not exactly a check for post-regalloc run.
578  MachineOperand &Src = MI.getOperand(1);
579  if (Src.isImm() &&
581  int32_t ReverseImm;
582  if (isReverseInlineImm(TII, Src, ReverseImm)) {
583  MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
584  Src.setImm(ReverseImm);
585  continue;
586  }
587  }
588  }
589 
590  if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
591  MI.getOpcode() == AMDGPU::COPY)) {
592  if (auto *NextMI = matchSwap(MI, MRI, TII)) {
593  Next = NextMI->getIterator();
594  continue;
595  }
596  }
597 
598  // Combine adjacent s_nops to use the immediate operand encoding how long
599  // to wait.
600  //
601  // s_nop N
602  // s_nop M
603  // =>
604  // s_nop (N + M)
605  if (MI.getOpcode() == AMDGPU::S_NOP &&
606  Next != MBB.end() &&
607  (*Next).getOpcode() == AMDGPU::S_NOP) {
608 
609  MachineInstr &NextMI = *Next;
610  // The instruction encodes the amount to wait with an offset of 1,
611  // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
612  // after adding.
613  uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
614  uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
615 
616  // Make sure we don't overflow the bounds.
617  if (Nop0 + Nop1 <= 8) {
618  NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
619  MI.eraseFromParent();
620  }
621 
622  continue;
623  }
624 
625  // FIXME: We also need to consider movs of constant operands since
626  // immediate operands are not folded if they have more than one use, and
627  // the operand folding pass is unaware if the immediate will be free since
628  // it won't know if the src == dest constraint will end up being
629  // satisfied.
630  if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
631  MI.getOpcode() == AMDGPU::S_MUL_I32) {
632  const MachineOperand *Dest = &MI.getOperand(0);
633  MachineOperand *Src0 = &MI.getOperand(1);
634  MachineOperand *Src1 = &MI.getOperand(2);
635 
636  if (!Src0->isReg() && Src1->isReg()) {
637  if (TII->commuteInstruction(MI, false, 1, 2))
638  std::swap(Src0, Src1);
639  }
640 
641  // FIXME: This could work better if hints worked with subregisters. If
642  // we have a vector add of a constant, we usually don't get the correct
643  // allocation due to the subregister usage.
644  if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
645  MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
646  MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
647  continue;
648  }
649 
650  if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
651  if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
652  unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
653  AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
654 
655  MI.setDesc(TII->get(Opc));
656  MI.tieOperands(0, 1);
657  }
658  }
659  }
660 
661  // Try to use s_cmpk_*
662  if (MI.isCompare() && TII->isSOPC(MI)) {
663  shrinkScalarCompare(TII, MI);
664  continue;
665  }
666 
667  // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
668  if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
669  const MachineOperand &Dst = MI.getOperand(0);
670  MachineOperand &Src = MI.getOperand(1);
671 
672  if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
673  int32_t ReverseImm;
674  if (isKImmOperand(TII, Src))
675  MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
676  else if (isReverseInlineImm(TII, Src, ReverseImm)) {
677  MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
678  Src.setImm(ReverseImm);
679  }
680  }
681 
682  continue;
683  }
684 
685  // Shrink scalar logic operations.
686  if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
687  MI.getOpcode() == AMDGPU::S_OR_B32 ||
688  MI.getOpcode() == AMDGPU::S_XOR_B32) {
689  if (shrinkScalarLogicOp(ST, MRI, TII, MI))
690  continue;
691  }
692 
693  if (TII->isMIMG(MI.getOpcode()) &&
697  shrinkMIMG(MI);
698  continue;
699  }
700 
701  if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
702  continue;
703 
704  if (!TII->canShrink(MI, MRI)) {
705  // Try commuting the instruction and see if that enables us to shrink
706  // it.
707  if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
708  !TII->canShrink(MI, MRI))
709  continue;
710  }
711 
712  // getVOPe32 could be -1 here if we started with an instruction that had
713  // a 32-bit encoding and then commuted it to an instruction that did not.
714  if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
715  continue;
716 
717  int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
718 
719  if (TII->isVOPC(Op32)) {
720  Register DstReg = MI.getOperand(0).getReg();
721  if (Register::isVirtualRegister(DstReg)) {
722  // VOPC instructions can only write to the VCC register. We can't
723  // force them to use VCC here, because this is only one register and
724  // cannot deal with sequences which would require multiple copies of
725  // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
726  //
727  // So, instead of forcing the instruction to write to VCC, we provide
728  // a hint to the register allocator to use VCC and then we will run
729  // this pass again after RA and shrink it if it outputs to VCC.
730  MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg);
731  continue;
732  }
733  if (DstReg != VCCReg)
734  continue;
735  }
736 
737  if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
738  // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
739  // instructions.
740  const MachineOperand *Src2 =
741  TII->getNamedOperand(MI, AMDGPU::OpName::src2);
742  if (!Src2->isReg())
743  continue;
744  Register SReg = Src2->getReg();
745  if (Register::isVirtualRegister(SReg)) {
746  MRI.setRegAllocationHint(SReg, 0, VCCReg);
747  continue;
748  }
749  if (SReg != VCCReg)
750  continue;
751  }
752 
753  // Check for the bool flag output for instructions like V_ADD_I32_e64.
754  const MachineOperand *SDst = TII->getNamedOperand(MI,
755  AMDGPU::OpName::sdst);
756 
757  // Check the carry-in operand for v_addc_u32_e64.
758  const MachineOperand *Src2 = TII->getNamedOperand(MI,
759  AMDGPU::OpName::src2);
760 
761  if (SDst) {
762  bool Next = false;
763 
764  if (SDst->getReg() != VCCReg) {
765  if (Register::isVirtualRegister(SDst->getReg()))
766  MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
767  Next = true;
768  }
769 
770  // All of the instructions with carry outs also have an SGPR input in
771  // src2.
772  if (Src2 && Src2->getReg() != VCCReg) {
773  if (Register::isVirtualRegister(Src2->getReg()))
774  MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
775  Next = true;
776  }
777 
778  if (Next)
779  continue;
780  }
781 
782  // We can shrink this instruction
783  LLVM_DEBUG(dbgs() << "Shrinking " << MI);
784 
785  MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
786  ++NumInstructionsShrunk;
787 
788  // Copy extra operands not present in the instruction definition.
789  copyExtraImplicitOps(*Inst32, MF, MI);
790 
791  MI.eraseFromParent();
792  foldImmediates(*Inst32, TII, MRI);
793 
794  LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
795  }
796  }
797  return false;
798 }
unsigned getTargetFlags() const
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
unsigned getNumImplicitUses() const
Return the number of implicit uses this instruction has.
Definition: MCInstrDesc.h:551
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:385
static bool isReverseInlineImm(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ReverseImm)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool use_nodbg_empty(unsigned RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register...
AMDGPU specific subclass of TargetSubtarget.
instr_iterator instr_end()
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
unsigned getNumImplicitDefs() const
Return the number of implicit defs this instruct has.
Definition: MCInstrDesc.h:573
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:510
static bool sopkIsZext(const MachineInstr &MI)
Definition: SIInstrInfo.h:608
const MachineFunctionProperties & getProperties() const
Get the function properties.
unsigned getRegister(unsigned i) const
Return the specified register in the class.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:63
iterator_range< use_nodbg_iterator > use_nodbg_operands(unsigned Reg) const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:179
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:527
void setIsUndef(bool Val=true)
unsigned Reg
unsigned getSubReg() const
const SIInstrInfo * getInstrInfo() const override
static bool instModifiesReg(const MachineInstr *MI, unsigned Reg, unsigned SubReg, const SIRegisterInfo &TRI)
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:342
unsigned const TargetRegisterInfo * TRI
bool isInlineConstant(const APInt &Imm) const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const SIRegisterInfo & getRegisterInfo() const
Definition: SIInstrInfo.h:171
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction...
Definition: MachineInstr.h:724
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0&#39;s from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:156
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:226
const HexagonInstrInfo * TII
static TargetInstrInfo::RegSubRegPair getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI)
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:414
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned SubReg
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src, bool &IsUnsigned)
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
static bool instAccessReg(iterator_range< MachineInstr::const_mop_iterator > &&R, unsigned Reg, unsigned SubReg, const SIRegisterInfo &TRI)
unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const
Return the size in bytes of the operand OpNo on the given.
Definition: SIInstrInfo.h:803
bool isVGPR(const MachineRegisterInfo &MRI, unsigned Reg) const
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:408
STATISTIC(NumInstructionsShrunk, "Number of 64-bit instruction reduced to 32-bit.")
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
void setReg(Register Reg)
Change the register this operand corresponds to.
static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo::RegSubRegPair RegSubRegPair
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:465
static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
Definition: MachineInstr.h:718
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const GlobalValue * getGlobal() const
bool hasSwap() const
Represent the analysis usage information of a pass.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:499
constexpr double e
Definition: MathExtras.h:57
void setImm(int64_t immVal)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
Generation getGeneration() const
self_iterator getIterator()
Definition: ilist_node.h:81
constexpr Type getAsInteger() const
Definition: LaneBitmask.h:73
static bool isVOP2(const MachineInstr &MI)
Definition: SIInstrInfo.h:411
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
void setIsKill(bool Val=true)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:33
static bool instReadsReg(const MachineInstr *MI, unsigned Reg, unsigned SubReg, const SIRegisterInfo &TRI)
Iterator for intrusive lists based on ilist_node.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:345
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperand class - Representation of each machine instruction operand.
void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register...
A pair composed of a register and a sub-register index.
static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly. ...
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:301
int64_t getImm() const
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
A range adaptor for a pair of iterators.
static MachineInstr * matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, const SIInstrInfo *TII)
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:256
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides AMDGPU specific target descriptions.
Representation of each machine instruction.
Definition: MachineInstr.h:64
static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction that are not part of t...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool hasInv2PiInlineImm() const
Interface definition for SIInstrInfo.
FunctionPass * createSIShrinkInstructionsPass()
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool isVOPC(const MachineInstr &MI)
Definition: SIInstrInfo.h:435
int64_t getOffset() const
Return the offset from the symbol in this operand.
#define I(x, y, z)
Definition: MD5.cpp:58
constexpr bool any() const
Definition: LaneBitmask.h:52
void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
void setSubReg(unsigned subReg)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
uint32_t Size
Definition: Profile.cpp:46
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:382
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasProperty(Property P) const
static bool shrinkScalarLogicOp(const GCNSubtarget &ST, MachineRegisterInfo &MRI, const SIInstrInfo *TII, MachineInstr &MI)
Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
Definition: MathExtras.h:514
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:69
unsigned getHWRegIndex(unsigned Reg) const
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
static bool isVOP1(const MachineInstr &MI)
Definition: SIInstrInfo.h:403
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:883
static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, MachineRegisterInfo &MRI, bool TryToCommute=true)
This function checks MI for operands defined by a move immediate instruction and then folds the liter...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isImplicit() const
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.