LLVM  4.0.0
SIShrinkInstructions.cpp
Go to the documentation of this file.
1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// The pass tries to use the 32-bit encoding for instructions when possible.
9 //===----------------------------------------------------------------------===//
10 //
11 
12 #include "AMDGPU.h"
13 #include "AMDGPUMCInstLower.h"
14 #include "AMDGPUSubtarget.h"
15 #include "SIInstrInfo.h"
16 #include "llvm/ADT/Statistic.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/Support/Debug.h"
26 
27 #define DEBUG_TYPE "si-shrink-instructions"
28 
29 STATISTIC(NumInstructionsShrunk,
30  "Number of 64-bit instruction reduced to 32-bit.");
31 STATISTIC(NumLiteralConstantsFolded,
32  "Number of literal constants folded into 32-bit instructions.");
33 
34 using namespace llvm;
35 
36 namespace {
37 
38 class SIShrinkInstructions : public MachineFunctionPass {
39 public:
40  static char ID;
41 
42 public:
43  SIShrinkInstructions() : MachineFunctionPass(ID) {
44  }
45 
46  bool runOnMachineFunction(MachineFunction &MF) override;
47 
48  StringRef getPassName() const override { return "SI Shrink Instructions"; }
49 
50  void getAnalysisUsage(AnalysisUsage &AU) const override {
51  AU.setPreservesCFG();
53  }
54 };
55 
56 } // End anonymous namespace.
57 
58 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
59  "SI Shrink Instructions", false, false)
60 
61 char SIShrinkInstructions::ID = 0;
62 
64  return new SIShrinkInstructions();
65 }
66 
67 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
68  const MachineRegisterInfo &MRI) {
69  if (!MO->isReg())
70  return false;
71 
73  return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
74 
75  return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
76 }
77 
78 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
79  const SIRegisterInfo &TRI,
80  const MachineRegisterInfo &MRI) {
81 
82  const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
83  // Can't shrink instruction with three operands.
84  // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
85  // a special case for it. It can only be shrunk if the third operand
86  // is vcc. We should handle this the same way we handle vopc, by addding
87  // a register allocation hint pre-regalloc and then do the shrinking
88  // post-regalloc.
89  if (Src2) {
90  switch (MI.getOpcode()) {
91  default: return false;
92 
93  case AMDGPU::V_ADDC_U32_e64:
94  case AMDGPU::V_SUBB_U32_e64:
95  // Additional verification is needed for sdst/src2.
96  return true;
97 
98  case AMDGPU::V_MAC_F32_e64:
99  case AMDGPU::V_MAC_F16_e64:
100  if (!isVGPR(Src2, TRI, MRI) ||
101  TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
102  return false;
103  break;
104 
105  case AMDGPU::V_CNDMASK_B32_e64:
106  break;
107  }
108  }
109 
110  const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
111  const MachineOperand *Src1Mod =
112  TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
113 
114  if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
115  return false;
116 
117  // We don't need to check src0, all input types are legal, so just make sure
118  // src0 isn't using any modifiers.
119  if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
120  return false;
121 
122  // Check output modifiers
123  if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
124  return false;
125 
126  return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
127 }
128 
129 /// \brief This function checks \p MI for operands defined by a move immediate
130 /// instruction and then folds the literal constant into the instruction if it
131 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
132 /// and will only fold literal constants if we are still in SSA.
134  MachineRegisterInfo &MRI, bool TryToCommute = true) {
135 
136  if (!MRI.isSSA())
137  return;
138 
139  assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
140 
141  int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
142 
143  // Only one literal constant is allowed per instruction, so if src0 is a
144  // literal constant then we can't do any folding.
145  if (TII->isLiteralConstant(MI, Src0Idx))
146  return;
147 
148  // Try to fold Src0
149  MachineOperand &Src0 = MI.getOperand(Src0Idx);
150  if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) {
151  unsigned Reg = Src0.getReg();
152  MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
153  if (Def && Def->isMoveImmediate()) {
154  MachineOperand &MovSrc = Def->getOperand(1);
155  bool ConstantFolded = false;
156 
157  if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
158  isUInt<32>(MovSrc.getImm()))) {
159  Src0.ChangeToImmediate(MovSrc.getImm());
160  ConstantFolded = true;
161  }
162  if (ConstantFolded) {
163  if (MRI.use_empty(Reg))
164  Def->eraseFromParent();
165  ++NumLiteralConstantsFolded;
166  return;
167  }
168  }
169  }
170 
171  // We have failed to fold src0, so commute the instruction and try again.
172  if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
173  foldImmediates(MI, TII, MRI, false);
174 
175 }
176 
177 // Copy MachineOperand with all flags except setting it as implicit.
179  const MachineOperand &Orig) {
180 
181  for (MachineOperand &Use : MI.implicit_operands()) {
182  if (Use.isUse() && Use.getReg() == AMDGPU::VCC) {
183  Use.setIsUndef(Orig.isUndef());
184  Use.setIsKill(Orig.isKill());
185  return;
186  }
187  }
188 }
189 
190 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
191  return isInt<16>(Src.getImm()) &&
192  !TII->isInlineConstant(*Src.getParent(),
193  Src.getParent()->getOperandNo(&Src));
194 }
195 
196 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
197  return isUInt<16>(Src.getImm()) &&
198  !TII->isInlineConstant(*Src.getParent(),
199  Src.getParent()->getOperandNo(&Src));
200 }
201 
203  const MachineOperand &Src,
204  bool &IsUnsigned) {
205  if (isInt<16>(Src.getImm())) {
206  IsUnsigned = false;
207  return !TII->isInlineConstant(Src);
208  }
209 
210  if (isUInt<16>(Src.getImm())) {
211  IsUnsigned = true;
212  return !TII->isInlineConstant(Src);
213  }
214 
215  return false;
216 }
217 
218 /// \returns true if the constant in \p Src should be replaced with a bitreverse
219 /// of an inline immediate.
220 static bool isReverseInlineImm(const SIInstrInfo *TII,
221  const MachineOperand &Src,
222  int32_t &ReverseImm) {
223  if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
224  return false;
225 
226  ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
227  return ReverseImm >= -16 && ReverseImm <= 64;
228 }
229 
230 /// Copy implicit register operands from specified instruction to this
231 /// instruction that are not part of the instruction definition.
233  const MachineInstr &MI) {
234  for (unsigned i = MI.getDesc().getNumOperands() +
235  MI.getDesc().getNumImplicitUses() +
236  MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
237  i != e; ++i) {
238  const MachineOperand &MO = MI.getOperand(i);
239  if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
240  NewMI.addOperand(MF, MO);
241  }
242 }
243 
245  // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
246  // get constants on the RHS.
247  if (!MI.getOperand(0).isReg())
248  TII->commuteInstruction(MI, false, 0, 1);
249 
250  const MachineOperand &Src1 = MI.getOperand(1);
251  if (!Src1.isImm())
252  return;
253 
254  int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
255  if (SOPKOpc == -1)
256  return;
257 
258  // eq/ne is special because the imm16 can be treated as signed or unsigned,
259  // and initially selectd to the unsigned versions.
260  if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
261  bool HasUImm;
262  if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
263  if (!HasUImm) {
264  SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
265  AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
266  }
267 
268  MI.setDesc(TII->get(SOPKOpc));
269  }
270 
271  return;
272  }
273 
274  const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
275 
276  if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
277  (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
278  MI.setDesc(NewDesc);
279  }
280 }
281 
282 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
283  if (skipFunction(*MF.getFunction()))
284  return false;
285 
287  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
288  const SIInstrInfo *TII = ST.getInstrInfo();
289  const SIRegisterInfo &TRI = TII->getRegisterInfo();
290 
291  std::vector<unsigned> I1Defs;
292 
293  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
294  BI != BE; ++BI) {
295 
296  MachineBasicBlock &MBB = *BI;
298  for (I = MBB.begin(); I != MBB.end(); I = Next) {
299  Next = std::next(I);
300  MachineInstr &MI = *I;
301 
302  if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
303  // If this has a literal constant source that is the same as the
304  // reversed bits of an inline immediate, replace with a bitreverse of
305  // that constant. This saves 4 bytes in the common case of materializing
306  // sign bits.
307 
308  // Test if we are after regalloc. We only want to do this after any
309  // optimizations happen because this will confuse them.
310  // XXX - not exactly a check for post-regalloc run.
311  MachineOperand &Src = MI.getOperand(1);
312  if (Src.isImm() &&
314  int32_t ReverseImm;
315  if (isReverseInlineImm(TII, Src, ReverseImm)) {
316  MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
317  Src.setImm(ReverseImm);
318  continue;
319  }
320  }
321  }
322 
323  // Combine adjacent s_nops to use the immediate operand encoding how long
324  // to wait.
325  //
326  // s_nop N
327  // s_nop M
328  // =>
329  // s_nop (N + M)
330  if (MI.getOpcode() == AMDGPU::S_NOP &&
331  Next != MBB.end() &&
332  (*Next).getOpcode() == AMDGPU::S_NOP) {
333 
334  MachineInstr &NextMI = *Next;
335  // The instruction encodes the amount to wait with an offset of 1,
336  // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
337  // after adding.
338  uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
339  uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
340 
341  // Make sure we don't overflow the bounds.
342  if (Nop0 + Nop1 <= 8) {
343  NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
344  MI.eraseFromParent();
345  }
346 
347  continue;
348  }
349 
350  // FIXME: We also need to consider movs of constant operands since
351  // immediate operands are not folded if they have more than one use, and
352  // the operand folding pass is unaware if the immediate will be free since
353  // it won't know if the src == dest constraint will end up being
354  // satisfied.
355  if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
356  MI.getOpcode() == AMDGPU::S_MUL_I32) {
357  const MachineOperand *Dest = &MI.getOperand(0);
358  MachineOperand *Src0 = &MI.getOperand(1);
359  MachineOperand *Src1 = &MI.getOperand(2);
360 
361  if (!Src0->isReg() && Src1->isReg()) {
362  if (TII->commuteInstruction(MI, false, 1, 2))
363  std::swap(Src0, Src1);
364  }
365 
366  // FIXME: This could work better if hints worked with subregisters. If
367  // we have a vector add of a constant, we usually don't get the correct
368  // allocation due to the subregister usage.
370  Src0->isReg()) {
371  MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
372  MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
373  continue;
374  }
375 
376  if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
377  if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
378  unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
379  AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
380 
381  MI.setDesc(TII->get(Opc));
382  MI.tieOperands(0, 1);
383  }
384  }
385  }
386 
387  // Try to use s_cmpk_*
388  if (MI.isCompare() && TII->isSOPC(MI)) {
389  shrinkScalarCompare(TII, MI);
390  continue;
391  }
392 
393  // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
394  if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
395  const MachineOperand &Dst = MI.getOperand(0);
396  MachineOperand &Src = MI.getOperand(1);
397 
398  if (Src.isImm() &&
400  int32_t ReverseImm;
401  if (isKImmOperand(TII, Src))
402  MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
403  else if (isReverseInlineImm(TII, Src, ReverseImm)) {
404  MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
405  Src.setImm(ReverseImm);
406  }
407  }
408 
409  continue;
410  }
411 
412  if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
413  continue;
414 
415  if (!canShrink(MI, TII, TRI, MRI)) {
416  // Try commuting the instruction and see if that enables us to shrink
417  // it.
418  if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
419  !canShrink(MI, TII, TRI, MRI))
420  continue;
421  }
422 
423  // getVOPe32 could be -1 here if we started with an instruction that had
424  // a 32-bit encoding and then commuted it to an instruction that did not.
425  if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
426  continue;
427 
428  int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
429 
430  if (TII->isVOPC(Op32)) {
431  unsigned DstReg = MI.getOperand(0).getReg();
433  // VOPC instructions can only write to the VCC register. We can't
434  // force them to use VCC here, because this is only one register and
435  // cannot deal with sequences which would require multiple copies of
436  // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
437  //
438  // So, instead of forcing the instruction to write to VCC, we provide
439  // a hint to the register allocator to use VCC and then we we will run
440  // this pass again after RA and shrink it if it outputs to VCC.
441  MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
442  continue;
443  }
444  if (DstReg != AMDGPU::VCC)
445  continue;
446  }
447 
448  if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
449  // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
450  // instructions.
451  const MachineOperand *Src2 =
452  TII->getNamedOperand(MI, AMDGPU::OpName::src2);
453  if (!Src2->isReg())
454  continue;
455  unsigned SReg = Src2->getReg();
457  MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
458  continue;
459  }
460  if (SReg != AMDGPU::VCC)
461  continue;
462  }
463 
464  // Check for the bool flag output for instructions like V_ADD_I32_e64.
465  const MachineOperand *SDst = TII->getNamedOperand(MI,
466  AMDGPU::OpName::sdst);
467 
468  // Check the carry-in operand for v_addc_u32_e64.
469  const MachineOperand *Src2 = TII->getNamedOperand(MI,
470  AMDGPU::OpName::src2);
471 
472  if (SDst) {
473  if (SDst->getReg() != AMDGPU::VCC) {
475  MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC);
476  continue;
477  }
478 
479  // All of the instructions with carry outs also have an SGPR input in
480  // src2.
481  if (Src2 && Src2->getReg() != AMDGPU::VCC) {
483  MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC);
484 
485  continue;
486  }
487  }
488 
489  // We can shrink this instruction
490  DEBUG(dbgs() << "Shrinking " << MI);
491 
492  MachineInstrBuilder Inst32 =
493  BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
494 
495  // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
496  // For VOPC instructions, this is replaced by an implicit def of vcc.
497  int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
498  if (Op32DstIdx != -1) {
499  // dst
500  Inst32.addOperand(MI.getOperand(0));
501  } else {
502  assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
503  "Unexpected case");
504  }
505 
506 
507  Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
508 
509  const MachineOperand *Src1 =
510  TII->getNamedOperand(MI, AMDGPU::OpName::src1);
511  if (Src1)
512  Inst32.addOperand(*Src1);
513 
514  if (Src2) {
515  int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
516  if (Op32Src2Idx != -1) {
517  Inst32.addOperand(*Src2);
518  } else {
519  // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
520  // replaced with an implicit read of vcc. This was already added
521  // during the initial BuildMI, so find it to preserve the flags.
522  copyFlagsToImplicitVCC(*Inst32, *Src2);
523  }
524  }
525 
526  ++NumInstructionsShrunk;
527 
528  // Copy extra operands not present in the instruction definition.
529  copyExtraImplicitOps(*Inst32, MF, MI);
530 
531  MI.eraseFromParent();
532  foldImmediates(*Inst32, TII, MRI);
533 
534  DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
535 
536 
537  }
538  }
539  return false;
540 }
bool isImplicit() const
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:315
static bool isReverseInlineImm(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ReverseImm)
unsigned getNumImplicitUses() const
Return the number of implicit uses this instruction has.
Definition: MCInstrDesc.h:506
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
AMDGPU specific subclass of TargetSubtarget.
static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI)
size_t i
static bool sopkIsZext(const MachineInstr &MI)
Definition: SIInstrInfo.h:431
unsigned getNumImplicitDefs() const
Return the number of implicit defs this instruct has.
Definition: MCInstrDesc.h:528
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const SIInstrInfo * getInstrInfo() const override
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:353
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:270
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:271
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
static void copyFlagsToImplicitVCC(MachineInstr &MI, const MachineOperand &Orig)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
bool isLiteralConstant(const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Definition: SIInstrInfo.h:539
const HexagonInstrInfo * TII
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, unsigned OperandName) const
Returns the operand named Op.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
bool isUndef() const
static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI)
static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src, bool &IsUnsigned)
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
bool isKill() const
MachineBasicBlock * MBB
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
int64_t getImm() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
STATISTIC(NumInstructionsShrunk,"Number of 64-bit instruction reduced to 32-bit.")
unsigned const MachineRegisterInfo * MRI
static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI)
bool hasVGPRs(const TargetRegisterClass *RC) const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
Represent the analysis usage information of a pass.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
void setImm(int64_t immVal)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
static bool isVOP2(const MachineInstr &MI)
Definition: SIInstrInfo.h:295
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:36
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
bool isInlineConstant(const APInt &Imm) const
Iterator for intrusive lists based on ilist_node.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:274
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
MachineOperand class - Representation of each machine instruction operand.
void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register...
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src)
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:276
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
Definition: MachineInstr.h:485
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:586
iterator_range< mop_iterator > implicit_operands()
Definition: MachineInstr.h:315
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const TargetRegisterClass * getPhysRegClass(unsigned Reg) const
Return the 'base' register class for this register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1132
Representation of each machine instruction.
Definition: MachineInstr.h:52
static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction that are not part of t...
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Interface definition for SIInstrInfo.
FunctionPass * createSIShrinkInstructionsPass()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool isVOPC(const MachineInstr &MI)
Definition: SIInstrInfo.h:311
#define I(x, y, z)
Definition: MD5.cpp:54
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:312
unsigned getReg() const
getReg - Returns the register number.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:633
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasModifiersSet(const MachineInstr &MI, unsigned OpName) const
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
#define DEBUG(X)
Definition: Debug.h:100
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, MachineRegisterInfo &MRI, bool TryToCommute=true)
This function checks MI for operands defined by a move immediate instruction and then folds the liter...
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction...
Definition: MachineInstr.h:491
static bool isVOP1(const MachineInstr &MI)
Definition: SIInstrInfo.h:287
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.