LLVM  15.0.0git
SIShrinkInstructions.cpp
Go to the documentation of this file.
1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
14 #include "llvm/ADT/Statistic.h"
16 
17 #define DEBUG_TYPE "si-shrink-instructions"
18 
19 STATISTIC(NumInstructionsShrunk,
20  "Number of 64-bit instruction reduced to 32-bit.");
21 STATISTIC(NumLiteralConstantsFolded,
22  "Number of literal constants folded into 32-bit instructions.");
23 
24 using namespace llvm;
25 
26 namespace {
27 
28 class SIShrinkInstructions : public MachineFunctionPass {
29 public:
30  static char ID;
31 
32  void shrinkMIMG(MachineInstr &MI);
33 
34 public:
35  SIShrinkInstructions() : MachineFunctionPass(ID) {
36  }
37 
38  bool runOnMachineFunction(MachineFunction &MF) override;
39 
40  StringRef getPassName() const override { return "SI Shrink Instructions"; }
41 
42  void getAnalysisUsage(AnalysisUsage &AU) const override {
43  AU.setPreservesCFG();
45  }
46 };
47 
48 } // End anonymous namespace.
49 
50 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
51  "SI Shrink Instructions", false, false)
52 
53 char SIShrinkInstructions::ID = 0;
54 
56  return new SIShrinkInstructions();
57 }
58 
59 /// This function checks \p MI for operands defined by a move immediate
60 /// instruction and then folds the literal constant into the instruction if it
61 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
63  MachineRegisterInfo &MRI, bool TryToCommute = true) {
64  assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
65 
66  int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
67 
68  // Try to fold Src0
69  MachineOperand &Src0 = MI.getOperand(Src0Idx);
70  if (Src0.isReg()) {
71  Register Reg = Src0.getReg();
72  if (Reg.isVirtual() && MRI.hasOneUse(Reg)) {
74  if (Def && Def->isMoveImmediate()) {
75  MachineOperand &MovSrc = Def->getOperand(1);
76  bool ConstantFolded = false;
77 
78  if (TII->isOperandLegal(MI, Src0Idx, &MovSrc)) {
79  if (MovSrc.isImm() &&
80  (isInt<32>(MovSrc.getImm()) || isUInt<32>(MovSrc.getImm()))) {
81  Src0.ChangeToImmediate(MovSrc.getImm());
82  ConstantFolded = true;
83  } else if (MovSrc.isFI()) {
84  Src0.ChangeToFrameIndex(MovSrc.getIndex());
85  ConstantFolded = true;
86  } else if (MovSrc.isGlobal()) {
87  Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
88  MovSrc.getTargetFlags());
89  ConstantFolded = true;
90  }
91  }
92 
93  if (ConstantFolded) {
95  Def->eraseFromParent();
96  ++NumLiteralConstantsFolded;
97  return true;
98  }
99  }
100  }
101  }
102 
103  // We have failed to fold src0, so commute the instruction and try again.
104  if (TryToCommute && MI.isCommutable()) {
105  if (TII->commuteInstruction(MI)) {
106  if (foldImmediates(MI, TII, MRI, false))
107  return true;
108 
109  // Commute back.
110  TII->commuteInstruction(MI);
111  }
112  }
113 
114  return false;
115 }
116 
117 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
118  return isInt<16>(Src.getImm()) &&
119  !TII->isInlineConstant(*Src.getParent(),
120  Src.getParent()->getOperandNo(&Src));
121 }
122 
123 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
124  return isUInt<16>(Src.getImm()) &&
125  !TII->isInlineConstant(*Src.getParent(),
126  Src.getParent()->getOperandNo(&Src));
127 }
128 
130  const MachineOperand &Src,
131  bool &IsUnsigned) {
132  if (isInt<16>(Src.getImm())) {
133  IsUnsigned = false;
134  return !TII->isInlineConstant(Src);
135  }
136 
137  if (isUInt<16>(Src.getImm())) {
138  IsUnsigned = true;
139  return !TII->isInlineConstant(Src);
140  }
141 
142  return false;
143 }
144 
145 /// \returns true if the constant in \p Src should be replaced with a bitreverse
146 /// of an inline immediate.
147 static bool isReverseInlineImm(const SIInstrInfo *TII,
148  const MachineOperand &Src,
149  int32_t &ReverseImm) {
150  if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
151  return false;
152 
153  ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
154  return ReverseImm >= -16 && ReverseImm <= 64;
155 }
156 
157 /// Copy implicit register operands from specified instruction to this
158 /// instruction that are not part of the instruction definition.
160  const MachineInstr &MI) {
161  for (unsigned i = MI.getDesc().getNumOperands() +
162  MI.getDesc().getNumImplicitUses() +
163  MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
164  i != e; ++i) {
165  const MachineOperand &MO = MI.getOperand(i);
166  if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
167  NewMI.addOperand(MF, MO);
168  }
169 }
170 
172  // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
173  // get constants on the RHS.
174  if (!MI.getOperand(0).isReg())
175  TII->commuteInstruction(MI, false, 0, 1);
176 
177  // cmpk requires src0 to be a register
178  const MachineOperand &Src0 = MI.getOperand(0);
179  if (!Src0.isReg())
180  return;
181 
182  const MachineOperand &Src1 = MI.getOperand(1);
183  if (!Src1.isImm())
184  return;
185 
186  int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
187  if (SOPKOpc == -1)
188  return;
189 
190  // eq/ne is special because the imm16 can be treated as signed or unsigned,
191  // and initially selected to the unsigned versions.
192  if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
193  bool HasUImm;
194  if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
195  if (!HasUImm) {
196  SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
197  AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
198  }
199 
200  MI.setDesc(TII->get(SOPKOpc));
201  }
202 
203  return;
204  }
205 
206  const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
207 
208  if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
209  (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
210  MI.setDesc(NewDesc);
211  }
212 }
213 
214 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
215 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
216  const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
217  if (!Info || Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA)
218  return;
219 
220  MachineFunction *MF = MI.getParent()->getParent();
221  const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
222  const SIInstrInfo *TII = ST.getInstrInfo();
223  const SIRegisterInfo &TRI = TII->getRegisterInfo();
224  int VAddr0Idx =
225  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
226  unsigned NewAddrDwords = Info->VAddrDwords;
227  const TargetRegisterClass *RC;
228 
229  if (Info->VAddrDwords == 2) {
230  RC = &AMDGPU::VReg_64RegClass;
231  } else if (Info->VAddrDwords == 3) {
232  RC = &AMDGPU::VReg_96RegClass;
233  } else if (Info->VAddrDwords == 4) {
234  RC = &AMDGPU::VReg_128RegClass;
235  } else if (Info->VAddrDwords == 5) {
236  RC = &AMDGPU::VReg_160RegClass;
237  } else if (Info->VAddrDwords == 6) {
238  RC = &AMDGPU::VReg_192RegClass;
239  } else if (Info->VAddrDwords == 7) {
240  RC = &AMDGPU::VReg_224RegClass;
241  } else if (Info->VAddrDwords == 8) {
242  RC = &AMDGPU::VReg_256RegClass;
243  } else {
244  RC = &AMDGPU::VReg_512RegClass;
245  NewAddrDwords = 16;
246  }
247 
248  unsigned VgprBase = 0;
249  bool IsUndef = true;
250  bool IsKill = NewAddrDwords == Info->VAddrDwords;
251  for (unsigned i = 0; i < Info->VAddrDwords; ++i) {
252  const MachineOperand &Op = MI.getOperand(VAddr0Idx + i);
253  unsigned Vgpr = TRI.getHWRegIndex(Op.getReg());
254 
255  if (i == 0) {
256  VgprBase = Vgpr;
257  } else if (VgprBase + i != Vgpr)
258  return;
259 
260  if (!Op.isUndef())
261  IsUndef = false;
262  if (!Op.isKill())
263  IsKill = false;
264  }
265 
266  if (VgprBase + NewAddrDwords > 256)
267  return;
268 
269  // Further check for implicit tied operands - this may be present if TFE is
270  // enabled
271  int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
272  int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
273  unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(TFEIdx).getImm();
274  unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(LWEIdx).getImm();
275  int ToUntie = -1;
276  if (TFEVal || LWEVal) {
277  // TFE/LWE is enabled so we need to deal with an implicit tied operand
278  for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
279  if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
280  MI.getOperand(i).isImplicit()) {
281  // This is the tied operand
282  assert(
283  ToUntie == -1 &&
284  "found more than one tied implicit operand when expecting only 1");
285  ToUntie = i;
286  MI.untieRegOperand(ToUntie);
287  }
288  }
289  }
290 
291  unsigned NewOpcode =
292  AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default,
293  Info->VDataDwords, NewAddrDwords);
294  MI.setDesc(TII->get(NewOpcode));
295  MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
296  MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
297  MI.getOperand(VAddr0Idx).setIsKill(IsKill);
298 
299  for (unsigned i = 1; i < Info->VAddrDwords; ++i)
300  MI.removeOperand(VAddr0Idx + 1);
301 
302  if (ToUntie >= 0) {
303  MI.tieOperands(
304  AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
305  ToUntie - (Info->VAddrDwords - 1));
306  }
307 }
308 
309 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
310 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
311 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
312 /// XNOR (as a ^ b == ~(a ^ ~b)).
313 /// \returns true if the caller should continue the machine function iterator
316  const SIInstrInfo *TII,
317  MachineInstr &MI) {
318  unsigned Opc = MI.getOpcode();
319  const MachineOperand *Dest = &MI.getOperand(0);
320  MachineOperand *Src0 = &MI.getOperand(1);
321  MachineOperand *Src1 = &MI.getOperand(2);
322  MachineOperand *SrcReg = Src0;
323  MachineOperand *SrcImm = Src1;
324 
325  if (!SrcImm->isImm() ||
326  AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm()))
327  return false;
328 
329  uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
330  uint32_t NewImm = 0;
331 
332  if (Opc == AMDGPU::S_AND_B32) {
333  if (isPowerOf2_32(~Imm)) {
334  NewImm = countTrailingOnes(Imm);
335  Opc = AMDGPU::S_BITSET0_B32;
336  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
337  NewImm = ~Imm;
338  Opc = AMDGPU::S_ANDN2_B32;
339  }
340  } else if (Opc == AMDGPU::S_OR_B32) {
341  if (isPowerOf2_32(Imm)) {
342  NewImm = countTrailingZeros(Imm);
343  Opc = AMDGPU::S_BITSET1_B32;
344  } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
345  NewImm = ~Imm;
346  Opc = AMDGPU::S_ORN2_B32;
347  }
348  } else if (Opc == AMDGPU::S_XOR_B32) {
349  if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
350  NewImm = ~Imm;
351  Opc = AMDGPU::S_XNOR_B32;
352  }
353  } else {
354  llvm_unreachable("unexpected opcode");
355  }
356 
357  if (NewImm != 0) {
358  if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
359  MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
360  MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
361  return true;
362  }
363 
364  if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
365  const bool IsUndef = SrcReg->isUndef();
366  const bool IsKill = SrcReg->isKill();
367  MI.setDesc(TII->get(Opc));
368  if (Opc == AMDGPU::S_BITSET0_B32 ||
369  Opc == AMDGPU::S_BITSET1_B32) {
370  Src0->ChangeToImmediate(NewImm);
371  // Remove the immediate and add the tied input.
372  MI.getOperand(2).ChangeToRegister(Dest->getReg(), /*IsDef*/ false,
373  /*isImp*/ false, IsKill,
374  /*isDead*/ false, IsUndef);
375  MI.tieOperands(0, 2);
376  } else {
377  SrcImm->setImm(NewImm);
378  }
379  }
380  }
381 
382  return false;
383 }
384 
385 // This is the same as MachineInstr::readsRegister/modifiesRegister except
386 // it takes subregs into account.
388  Register Reg, unsigned SubReg,
389  const SIRegisterInfo &TRI) {
390  for (const MachineOperand &MO : R) {
391  if (!MO.isReg())
392  continue;
393 
394  if (Reg.isPhysical() && MO.getReg().isPhysical()) {
395  if (TRI.regsOverlap(Reg, MO.getReg()))
396  return true;
397  } else if (MO.getReg() == Reg && Reg.isVirtual()) {
399  TRI.getSubRegIndexLaneMask(MO.getSubReg());
400  if (Overlap.any())
401  return true;
402  }
403  }
404  return false;
405 }
406 
407 static bool instReadsReg(const MachineInstr *MI,
408  unsigned Reg, unsigned SubReg,
409  const SIRegisterInfo &TRI) {
410  return instAccessReg(MI->uses(), Reg, SubReg, TRI);
411 }
412 
413 static bool instModifiesReg(const MachineInstr *MI,
414  unsigned Reg, unsigned SubReg,
415  const SIRegisterInfo &TRI) {
416  return instAccessReg(MI->defs(), Reg, SubReg, TRI);
417 }
418 
420 getSubRegForIndex(Register Reg, unsigned Sub, unsigned I,
421  const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
422  if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
423  if (Reg.isPhysical()) {
424  Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
425  } else {
426  Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub));
427  }
428  }
429  return TargetInstrInfo::RegSubRegPair(Reg, Sub);
430 }
431 
433  const SIInstrInfo *TII) {
434  for (unsigned i = MI.getDesc().getNumOperands() +
435  MI.getDesc().getNumImplicitUses() +
436  MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
437  i != e; ++i) {
438  const MachineOperand &Op = MI.getOperand(i);
439  if (!Op.isDef())
440  continue;
441  BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
442  TII->get(AMDGPU::IMPLICIT_DEF), Op.getReg());
443  }
444 
445  MI.eraseFromParent();
446 }
447 
448 // Match:
449 // mov t, x
450 // mov x, y
451 // mov y, t
452 //
453 // =>
454 //
455 // mov t, x (t is potentially dead and move eliminated)
456 // v_swap_b32 x, y
457 //
458 // Returns next valid instruction pointer if was able to create v_swap_b32.
459 //
460 // This shall not be done too early not to prevent possible folding which may
461 // remove matched moves, and this should preferably be done before RA to
462 // release saved registers and also possibly after RA which can insert copies
463 // too.
464 //
465 // This is really just a generic peephole that is not a canonical shrinking,
466 // although requirements match the pass placement and it reduces code size too.
468  const SIInstrInfo *TII) {
469  assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
470  MovT.getOpcode() == AMDGPU::COPY);
471 
472  Register T = MovT.getOperand(0).getReg();
473  unsigned Tsub = MovT.getOperand(0).getSubReg();
474  MachineOperand &Xop = MovT.getOperand(1);
475 
476  if (!Xop.isReg())
477  return nullptr;
478  Register X = Xop.getReg();
479  unsigned Xsub = Xop.getSubReg();
480 
481  unsigned Size = TII->getOpSize(MovT, 0) / 4;
482 
483  const SIRegisterInfo &TRI = TII->getRegisterInfo();
484  if (!TRI.isVGPR(MRI, X))
485  return nullptr;
486 
488  return nullptr;
489 
490  const unsigned SearchLimit = 16;
491  unsigned Count = 0;
492  bool KilledT = false;
493  for (auto Iter = std::next(MovT.getIterator()),
494  E = MovT.getParent()->instr_end();
495  Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
496 
497  MachineInstr *MovY = &*Iter;
498  KilledT = MovY->killsRegister(T, &TRI);
499 
500  if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
501  MovY->getOpcode() != AMDGPU::COPY) ||
502  !MovY->getOperand(1).isReg() ||
503  MovY->getOperand(1).getReg() != T ||
504  MovY->getOperand(1).getSubReg() != Tsub ||
506  continue;
507 
508  Register Y = MovY->getOperand(0).getReg();
509  unsigned Ysub = MovY->getOperand(0).getSubReg();
510 
511  if (!TRI.isVGPR(MRI, Y))
512  continue;
513 
514  MachineInstr *MovX = nullptr;
515  for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
516  I != IY; ++I) {
517  if (instReadsReg(&*I, X, Xsub, TRI) ||
518  instModifiesReg(&*I, Y, Ysub, TRI) ||
519  instModifiesReg(&*I, T, Tsub, TRI) ||
520  (MovX && instModifiesReg(&*I, X, Xsub, TRI))) {
521  MovX = nullptr;
522  break;
523  }
524  if (!instReadsReg(&*I, Y, Ysub, TRI)) {
525  if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) {
526  MovX = nullptr;
527  break;
528  }
529  continue;
530  }
531  if (MovX ||
532  (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
533  I->getOpcode() != AMDGPU::COPY) ||
534  I->getOperand(0).getReg() != X ||
535  I->getOperand(0).getSubReg() != Xsub) {
536  MovX = nullptr;
537  break;
538  }
539  // Implicit use of M0 is an indirect move.
540  if (I->hasRegisterImplicitUseOperand(AMDGPU::M0))
541  continue;
542 
543  if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
544  continue;
545 
546  MovX = &*I;
547  }
548 
549  if (!MovX)
550  continue;
551 
552  LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
553 
554  for (unsigned I = 0; I < Size; ++I) {
556  X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
557  Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
558  MachineBasicBlock &MBB = *MovT.getParent();
559  auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
560  TII->get(AMDGPU::V_SWAP_B32))
561  .addDef(X1.Reg, 0, X1.SubReg)
562  .addDef(Y1.Reg, 0, Y1.SubReg)
563  .addReg(Y1.Reg, 0, Y1.SubReg)
564  .addReg(X1.Reg, 0, X1.SubReg).getInstr();
565  if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
566  // Drop implicit EXEC.
567  MIB->removeOperand(MIB->getNumExplicitOperands());
568  MIB->copyImplicitOps(*MBB.getParent(), *MovX);
569  }
570  }
571  MovX->eraseFromParent();
573  MachineInstr *Next = &*std::next(MovT.getIterator());
574 
575  if (T.isVirtual() && MRI.use_nodbg_empty(T)) {
577  } else {
578  Xop.setIsKill(false);
579  for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
580  unsigned OpNo = MovT.getNumExplicitOperands() + I;
581  const MachineOperand &Op = MovT.getOperand(OpNo);
582  if (Op.isKill() && TRI.regsOverlap(X, Op.getReg()))
583  MovT.removeOperand(OpNo);
584  }
585  }
586 
587  return Next;
588  }
589 
590  return nullptr;
591 }
592 
593 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
594  if (skipFunction(MF.getFunction()))
595  return false;
596 
598  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
599  const SIInstrInfo *TII = ST.getInstrInfo();
600  unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
601 
602  std::vector<unsigned> I1Defs;
603 
604  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
605  BI != BE; ++BI) {
606 
607  MachineBasicBlock &MBB = *BI;
609  for (I = MBB.begin(); I != MBB.end(); I = Next) {
610  Next = std::next(I);
611  MachineInstr &MI = *I;
612 
613  if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
614  // If this has a literal constant source that is the same as the
615  // reversed bits of an inline immediate, replace with a bitreverse of
616  // that constant. This saves 4 bytes in the common case of materializing
617  // sign bits.
618 
619  // Test if we are after regalloc. We only want to do this after any
620  // optimizations happen because this will confuse them.
621  // XXX - not exactly a check for post-regalloc run.
622  MachineOperand &Src = MI.getOperand(1);
623  if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
624  int32_t ReverseImm;
625  if (isReverseInlineImm(TII, Src, ReverseImm)) {
626  MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
627  Src.setImm(ReverseImm);
628  continue;
629  }
630  }
631  }
632 
633  if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
634  MI.getOpcode() == AMDGPU::COPY)) {
635  if (auto *NextMI = matchSwap(MI, MRI, TII)) {
636  Next = NextMI->getIterator();
637  continue;
638  }
639  }
640 
641  // FIXME: We also need to consider movs of constant operands since
642  // immediate operands are not folded if they have more than one use, and
643  // the operand folding pass is unaware if the immediate will be free since
644  // it won't know if the src == dest constraint will end up being
645  // satisfied.
646  if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
647  MI.getOpcode() == AMDGPU::S_MUL_I32) {
648  const MachineOperand *Dest = &MI.getOperand(0);
649  MachineOperand *Src0 = &MI.getOperand(1);
650  MachineOperand *Src1 = &MI.getOperand(2);
651 
652  if (!Src0->isReg() && Src1->isReg()) {
653  if (TII->commuteInstruction(MI, false, 1, 2))
654  std::swap(Src0, Src1);
655  }
656 
657  // FIXME: This could work better if hints worked with subregisters. If
658  // we have a vector add of a constant, we usually don't get the correct
659  // allocation due to the subregister usage.
660  if (Dest->getReg().isVirtual() && Src0->isReg()) {
661  MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
662  MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
663  continue;
664  }
665 
666  if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
667  if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
668  unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
669  AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
670 
671  MI.setDesc(TII->get(Opc));
672  MI.tieOperands(0, 1);
673  }
674  }
675  }
676 
677  // Try to use s_cmpk_*
678  if (MI.isCompare() && TII->isSOPC(MI)) {
680  continue;
681  }
682 
683  // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
684  if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
685  const MachineOperand &Dst = MI.getOperand(0);
686  MachineOperand &Src = MI.getOperand(1);
687 
688  if (Src.isImm() && Dst.getReg().isPhysical()) {
689  int32_t ReverseImm;
690  if (isKImmOperand(TII, Src))
691  MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
692  else if (isReverseInlineImm(TII, Src, ReverseImm)) {
693  MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
694  Src.setImm(ReverseImm);
695  }
696  }
697 
698  continue;
699  }
700 
701  // Shrink scalar logic operations.
702  if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
703  MI.getOpcode() == AMDGPU::S_OR_B32 ||
704  MI.getOpcode() == AMDGPU::S_XOR_B32) {
705  if (shrinkScalarLogicOp(ST, MRI, TII, MI))
706  continue;
707  }
708 
709  if (TII->isMIMG(MI.getOpcode()) &&
710  ST.getGeneration() >= AMDGPUSubtarget::GFX10 &&
713  shrinkMIMG(MI);
714  continue;
715  }
716 
717  if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
718  continue;
719 
720  if (!TII->canShrink(MI, MRI)) {
721  // Try commuting the instruction and see if that enables us to shrink
722  // it.
723  if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
724  !TII->canShrink(MI, MRI))
725  continue;
726  }
727 
728  int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
729 
730  if (TII->isVOPC(Op32)) {
731  MachineOperand &Op0 = MI.getOperand(0);
732  if (Op0.isReg()) {
733  // Exclude VOPCX instructions as these don't explicitly write a
734  // dst.
735  Register DstReg = Op0.getReg();
736  if (DstReg.isVirtual()) {
737  // VOPC instructions can only write to the VCC register. We can't
738  // force them to use VCC here, because this is only one register and
739  // cannot deal with sequences which would require multiple copies of
740  // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
741  //
742  // So, instead of forcing the instruction to write to VCC, we
743  // provide a hint to the register allocator to use VCC and then we
744  // will run this pass again after RA and shrink it if it outputs to
745  // VCC.
746  MRI.setRegAllocationHint(DstReg, 0, VCCReg);
747  continue;
748  }
749  if (DstReg != VCCReg)
750  continue;
751  }
752  }
753 
754  if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
755  // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
756  // instructions.
757  const MachineOperand *Src2 =
758  TII->getNamedOperand(MI, AMDGPU::OpName::src2);
759  if (!Src2->isReg())
760  continue;
761  Register SReg = Src2->getReg();
762  if (SReg.isVirtual()) {
763  MRI.setRegAllocationHint(SReg, 0, VCCReg);
764  continue;
765  }
766  if (SReg != VCCReg)
767  continue;
768  }
769 
770  // Check for the bool flag output for instructions like V_ADD_I32_e64.
771  const MachineOperand *SDst = TII->getNamedOperand(MI,
772  AMDGPU::OpName::sdst);
773 
774  if (SDst) {
775  bool Next = false;
776 
777  if (SDst->getReg() != VCCReg) {
778  if (SDst->getReg().isVirtual())
779  MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
780  Next = true;
781  }
782 
783  // All of the instructions with carry outs also have an SGPR input in
784  // src2.
785  const MachineOperand *Src2 = TII->getNamedOperand(MI,
786  AMDGPU::OpName::src2);
787  if (Src2 && Src2->getReg() != VCCReg) {
788  if (Src2->getReg().isVirtual())
789  MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
790  Next = true;
791  }
792 
793  if (Next)
794  continue;
795  }
796 
797  // We can shrink this instruction
798  LLVM_DEBUG(dbgs() << "Shrinking " << MI);
799 
800  MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
801  ++NumInstructionsShrunk;
802 
803  // Copy extra operands not present in the instruction definition.
804  copyExtraImplicitOps(*Inst32, MF, MI);
805 
806  // Copy deadness from the old explicit vcc def to the new implicit def.
807  if (SDst && SDst->isDead())
808  Inst32->findRegisterDefOperand(VCCReg)->setIsDead();
809 
810  MI.eraseFromParent();
811  foldImmediates(*Inst32, TII, MRI);
812 
813  LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
814  }
815  }
816  return false;
817 }
llvm::LaneBitmask
Definition: LaneBitmask.h:40
i
i
Definition: README.txt:29
matchSwap
static MachineInstr * matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, const SIInstrInfo *TII)
Definition: SIShrinkInstructions.cpp:467
isKImmOperand
static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
Definition: SIShrinkInstructions.cpp:117
llvm::MachineFunctionProperties::hasProperty
bool hasProperty(Property P) const
Definition: MachineFunction.h:176
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
isKUImmOperand
static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
Definition: SIShrinkInstructions.cpp:123
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:572
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstr::getNumExplicitOperands
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
Definition: MachineInstr.cpp:679
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
Statistic.h
llvm::MachineFunction::end
iterator end()
Definition: MachineFunction.h:824
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::MachineRegisterInfo::getUniqueVRegDef
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
Definition: MachineRegisterInfo.cpp:407
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::AMDGPU::getSOPKOp
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
llvm::MachineOperand::setImm
void setImm(int64_t immVal)
Definition: MachineOperand.h:664
instAccessReg
static bool instAccessReg(iterator_range< MachineInstr::const_mop_iterator > &&R, Register Reg, unsigned SubReg, const SIRegisterInfo &TRI)
Definition: SIShrinkInstructions.cpp:387
llvm::MachineInstr::hasRegisterImplicitUseOperand
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
Definition: MachineInstr.cpp:933
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::GCNSubtarget
Definition: GCNSubtarget.h:31
dropInstructionKeepingImpDefs
static void dropInstructionKeepingImpDefs(MachineInstr &MI, const SIInstrInfo *TII)
Definition: SIShrinkInstructions.cpp:432
llvm::MachineOperand::ChangeToFrameIndex
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
Definition: MachineOperand.cpp:212
llvm::AMDGPU::getNamedOperandIdx
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
shrinkScalarCompare
static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI)
Definition: SIShrinkInstructions.cpp:171
llvm::MachineOperand::isFI
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
Definition: MachineOperand.h:330
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:609
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1618
llvm::MachineFunctionPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Definition: MachineFunctionPass.cpp:103
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::TargetInstrInfo::RegSubRegPair
A pair composed of a register and a sub-register index.
Definition: TargetInstrInfo.h:491
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::MachineOperand::isKill
bool isKill() const
Definition: MachineOperand.h:389
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:650
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:379
llvm::TargetRegisterInfo::getSubRegIndexLaneMask
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx.
Definition: TargetRegisterInfo.h:376
GCNSubtarget.h
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
INITIALIZE_PASS
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:37
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MachineFunction::getProperties
const MachineFunctionProperties & getProperties() const
Get the function properties.
Definition: MachineFunction.h:731
llvm::AMDGPU::getVOPe32
LLVM_READONLY int getVOPe32(uint16_t Opcode)
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
llvm::MachineOperand::ChangeToImmediate
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Definition: MachineOperand.cpp:154
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::M0
unsigned M0(unsigned Val)
Definition: VE.h:370
instReadsReg
static bool instReadsReg(const MachineInstr *MI, unsigned Reg, unsigned SubReg, const SIRegisterInfo &TRI)
Definition: SIShrinkInstructions.cpp:407
llvm::MachineFunction::begin
iterator begin()
Definition: MachineFunction.h:822
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::SIRegisterInfo
Definition: SIRegisterInfo.h:30
llvm::AMDGPU::getMIMGOpcode
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
Definition: AMDGPUBaseInfo.cpp:185
llvm::MachineInstr::killsRegister
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
Definition: MachineInstr.h:1368
llvm::MachineRegisterInfo::use_empty
bool use_empty(Register RegNo) const
use_empty - Return true if there are no instructions using the specified register.
Definition: MachineRegisterInfo.h:514
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineFunctionProperties::Property::NoVRegs
@ NoVRegs
llvm::AMDGPUSubtarget::GFX10
@ GFX10
Definition: AMDGPUSubtarget.h:41
llvm::Register::isVirtual
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:640
copyExtraImplicitOps
static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction that are not part of t...
Definition: SIShrinkInstructions.cpp:159
llvm::MachineInstr::getDebugLoc
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:420
llvm::MachineOperand::isUndef
bool isUndef() const
Definition: MachineOperand.h:394
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
AMDGPUMCTargetDesc.h
llvm::MachineOperand::setIsDead
void setIsDead(bool Val=true)
Definition: MachineOperand.h:515
llvm::TargetRegisterInfo::regsOverlap
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Definition: TargetRegisterInfo.h:417
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:220
llvm::TargetInstrInfo::RegSubRegPair::Reg
Register Reg
Definition: TargetInstrInfo.h:492
instModifiesReg
static bool instModifiesReg(const MachineInstr *MI, unsigned Reg, unsigned SubReg, const SIRegisterInfo &TRI)
Definition: SIShrinkInstructions.cpp:413
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::MachineInstr::getNumImplicitOperands
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
Definition: MachineInstr.h:585
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:384
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::LaneBitmask::any
constexpr bool any() const
Definition: LaneBitmask.h:53
llvm::countTrailingOnes
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
Definition: MathExtras.h:525
shrinkScalarLogicOp
static bool shrinkScalarLogicOp(const GCNSubtarget &ST, MachineRegisterInfo &MRI, const SIInstrInfo *TII, MachineInstr &MI)
Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
Definition: SIShrinkInstructions.cpp:314
MachineFunctionPass.h
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
RegSubRegPair
TargetInstrInfo::RegSubRegPair RegSubRegPair
Definition: PeepholeOptimizer.cpp:100
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::MachineOperand::isRegMask
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
Definition: MachineOperand.h:344
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::AMDGPU::getMIMGInfo
const LLVM_READONLY MIMGInfo * getMIMGInfo(unsigned Opc)
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineBasicBlock::instr_end
instr_iterator instr_end()
Definition: MachineBasicBlock.h:263
llvm::MachineFunction
Definition: MachineFunction.h:241
llvm::MachineRegisterInfo::use_nodbg_empty
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Definition: MachineRegisterInfo.h:574
llvm::createSIShrinkInstructionsPass
FunctionPass * createSIShrinkInstructionsPass()
llvm::TargetInstrInfo::RegSubRegPair::SubReg
unsigned SubReg
Definition: TargetInstrInfo.h:493
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
AMDGPU.h
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:491
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::MachineRegisterInfo::setRegAllocationHint
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register.
Definition: MachineRegisterInfo.h:773
uint32_t
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:288
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:89
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:364
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::isInt< 16 >
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:370
llvm::MachineOperand::ChangeToGA
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
Definition: MachineOperand.cpp:188
STATISTIC
STATISTIC(NumInstructionsShrunk, "Number of 64-bit instruction reduced to 32-bit.")
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:606
llvm::TargetRegisterInfo::getRegSizeInBits
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
Definition: TargetRegisterInfo.h:275
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:326
llvm::TargetRegisterClass::getRegister
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
Definition: TargetRegisterInfo.h:86
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:566
isReverseInlineImm
static bool isReverseInlineImm(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ReverseImm)
Definition: SIShrinkInstructions.cpp:147
getSubRegForIndex
static TargetInstrInfo::RegSubRegPair getSubRegForIndex(Register Reg, unsigned Sub, unsigned I, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI)
Definition: SIShrinkInstructions.cpp:420
foldImmediates
static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, MachineRegisterInfo &MRI, bool TryToCommute=true)
This function checks MI for operands defined by a move immediate instruction and then folds the liter...
Definition: SIShrinkInstructions.cpp:62
isKImmOrKUImmOperand
static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src, bool &IsUnsigned)
Definition: SIShrinkInstructions.cpp:129
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineInstr::removeOperand
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:276
llvm::SIInstrInfo
Definition: SIInstrInfo.h:43
llvm::AMDGPU::isInlinableLiteral32
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:1959
llvm::MachineRegisterInfo::hasOneUse
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
Definition: MachineRegisterInfo.h:518
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:277
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:184
llvm::iterator_range
A range adaptor for a pair of iterators.
Definition: iterator_range.h:30
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1103
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
DEBUG_TYPE
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
Definition: SIShrinkInstructions.cpp:17
llvm::MachineInstr::findRegisterDefOperand
MachineOperand * findRegisterDefOperand(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
Definition: MachineInstr.h:1436
llvm::MachineInstr::eraseFromParent
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Definition: MachineInstr.cpp:650
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:279
SubReg
unsigned SubReg
Definition: AArch64AdvSIMDScalarPass.cpp:104
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:338
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37