LLVM  9.0.0svn
SIInsertSkips.cpp
Go to the documentation of this file.
1 //===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass inserts branches on the 0 exec mask over divergent branches
11 /// branches when it's expected that jumping over the untaken control flow will
12 /// be cheaper than having every workitem no-op through it.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPU.h"
17 #include "AMDGPUSubtarget.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DebugLoc.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/Pass.h"
35 #include <cassert>
36 #include <cstdint>
37 #include <iterator>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "si-insert-skips"
42 
44  "amdgpu-skip-threshold",
45  cl::desc("Number of instructions before jumping over divergent control flow"),
46  cl::init(12), cl::Hidden);
47 
48 namespace {
49 
50 class SIInsertSkips : public MachineFunctionPass {
51 private:
52  const SIRegisterInfo *TRI = nullptr;
53  const SIInstrInfo *TII = nullptr;
54  unsigned SkipThreshold = 0;
55 
56  bool shouldSkip(const MachineBasicBlock &From,
57  const MachineBasicBlock &To) const;
58 
59  bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
60 
61  void kill(MachineInstr &MI);
62 
63  MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
65 
66  bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB);
67 
68  bool optimizeVccBranch(MachineInstr &MI) const;
69 
70 public:
71  static char ID;
72 
73  SIInsertSkips() : MachineFunctionPass(ID) {}
74 
75  bool runOnMachineFunction(MachineFunction &MF) override;
76 
77  StringRef getPassName() const override {
78  return "SI insert s_cbranch_execz instructions";
79  }
80 
81  void getAnalysisUsage(AnalysisUsage &AU) const override {
83  }
84 };
85 
86 } // end anonymous namespace
87 
88 char SIInsertSkips::ID = 0;
89 
90 INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE,
91  "SI insert s_cbranch_execz instructions", false, false)
92 
93 char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID;
94 
95 static bool opcodeEmitsNoInsts(unsigned Opc) {
96  switch (Opc) {
97  case TargetOpcode::IMPLICIT_DEF:
98  case TargetOpcode::KILL:
99  case TargetOpcode::BUNDLE:
100  case TargetOpcode::CFI_INSTRUCTION:
102  case TargetOpcode::GC_LABEL:
103  case TargetOpcode::DBG_VALUE:
104  return true;
105  default:
106  return false;
107  }
108 }
109 
110 bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From,
111  const MachineBasicBlock &To) const {
112  if (From.succ_empty())
113  return false;
114 
115  unsigned NumInstr = 0;
116  const MachineFunction *MF = From.getParent();
117 
118  for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
119  MBBI != End && MBBI != ToI; ++MBBI) {
120  const MachineBasicBlock &MBB = *MBBI;
121 
122  for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end();
123  NumInstr < SkipThreshold && I != E; ++I) {
124  if (opcodeEmitsNoInsts(I->getOpcode()))
125  continue;
126 
127  // FIXME: Since this is required for correctness, this should be inserted
128  // during SILowerControlFlow.
129 
130  // When a uniform loop is inside non-uniform control flow, the branch
131  // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
132  // when EXEC = 0. We should skip the loop lest it becomes infinite.
133  if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
134  I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
135  return true;
136 
137  if (TII->hasUnwantedEffectsWhenEXECEmpty(*I))
138  return true;
139 
140  // These instructions are potentially expensive even if EXEC = 0.
141  if (TII->isSMRD(*I) || TII->isVMEM(*I) || I->getOpcode() == AMDGPU::S_WAITCNT)
142  return true;
143 
144  ++NumInstr;
145  if (NumInstr >= SkipThreshold)
146  return true;
147  }
148  }
149 
150  return false;
151 }
152 
153 bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
154  MachineBasicBlock &MBB = *MI.getParent();
155  MachineFunction *MF = MBB.getParent();
156 
158  !shouldSkip(MBB, MBB.getParent()->back()))
159  return false;
160 
161  MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
162 
163  const DebugLoc &DL = MI.getDebugLoc();
164 
165  // If the exec mask is non-zero, skip the next two instructions
166  BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
167  .addMBB(&NextBB);
168 
169  MachineBasicBlock::iterator Insert = SkipBB->begin();
170 
171  // Exec mask is zero: Export to NULL target...
172  BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE))
173  .addImm(0x09) // V_008DFC_SQ_EXP_NULL
174  .addReg(AMDGPU::VGPR0, RegState::Undef)
175  .addReg(AMDGPU::VGPR0, RegState::Undef)
176  .addReg(AMDGPU::VGPR0, RegState::Undef)
177  .addReg(AMDGPU::VGPR0, RegState::Undef)
178  .addImm(1) // vm
179  .addImm(0) // compr
180  .addImm(0); // en
181 
182  // ... and terminate wavefront.
183  BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)).addImm(0);
184 
185  return true;
186 }
187 
188 void SIInsertSkips::kill(MachineInstr &MI) {
189  MachineBasicBlock &MBB = *MI.getParent();
190  DebugLoc DL = MI.getDebugLoc();
191 
192  switch (MI.getOpcode()) {
193  case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: {
194  unsigned Opcode = 0;
195 
196  // The opcodes are inverted because the inline immediate has to be
197  // the first operand, e.g. from "x < imm" to "imm > x"
198  switch (MI.getOperand(2).getImm()) {
199  case ISD::SETOEQ:
200  case ISD::SETEQ:
201  Opcode = AMDGPU::V_CMPX_EQ_F32_e64;
202  break;
203  case ISD::SETOGT:
204  case ISD::SETGT:
205  Opcode = AMDGPU::V_CMPX_LT_F32_e64;
206  break;
207  case ISD::SETOGE:
208  case ISD::SETGE:
209  Opcode = AMDGPU::V_CMPX_LE_F32_e64;
210  break;
211  case ISD::SETOLT:
212  case ISD::SETLT:
213  Opcode = AMDGPU::V_CMPX_GT_F32_e64;
214  break;
215  case ISD::SETOLE:
216  case ISD::SETLE:
217  Opcode = AMDGPU::V_CMPX_GE_F32_e64;
218  break;
219  case ISD::SETONE:
220  case ISD::SETNE:
221  Opcode = AMDGPU::V_CMPX_LG_F32_e64;
222  break;
223  case ISD::SETO:
224  Opcode = AMDGPU::V_CMPX_O_F32_e64;
225  break;
226  case ISD::SETUO:
227  Opcode = AMDGPU::V_CMPX_U_F32_e64;
228  break;
229  case ISD::SETUEQ:
230  Opcode = AMDGPU::V_CMPX_NLG_F32_e64;
231  break;
232  case ISD::SETUGT:
233  Opcode = AMDGPU::V_CMPX_NGE_F32_e64;
234  break;
235  case ISD::SETUGE:
236  Opcode = AMDGPU::V_CMPX_NGT_F32_e64;
237  break;
238  case ISD::SETULT:
239  Opcode = AMDGPU::V_CMPX_NLE_F32_e64;
240  break;
241  case ISD::SETULE:
242  Opcode = AMDGPU::V_CMPX_NLT_F32_e64;
243  break;
244  case ISD::SETUNE:
245  Opcode = AMDGPU::V_CMPX_NEQ_F32_e64;
246  break;
247  default:
248  llvm_unreachable("invalid ISD:SET cond code");
249  }
250 
251  assert(MI.getOperand(0).isReg());
252 
253  if (TRI->isVGPR(MBB.getParent()->getRegInfo(),
254  MI.getOperand(0).getReg())) {
255  Opcode = AMDGPU::getVOPe32(Opcode);
256  BuildMI(MBB, &MI, DL, TII->get(Opcode))
257  .add(MI.getOperand(1))
258  .add(MI.getOperand(0));
259  } else {
260  BuildMI(MBB, &MI, DL, TII->get(Opcode))
261  .addReg(AMDGPU::VCC, RegState::Define)
262  .addImm(0) // src0 modifiers
263  .add(MI.getOperand(1))
264  .addImm(0) // src1 modifiers
265  .add(MI.getOperand(0))
266  .addImm(0); // omod
267  }
268  break;
269  }
270  case AMDGPU::SI_KILL_I1_TERMINATOR: {
271  const MachineOperand &Op = MI.getOperand(0);
272  int64_t KillVal = MI.getOperand(1).getImm();
273  assert(KillVal == 0 || KillVal == -1);
274 
275  // Kill all threads if Op0 is an immediate and equal to the Kill value.
276  if (Op.isImm()) {
277  int64_t Imm = Op.getImm();
278  assert(Imm == 0 || Imm == -1);
279 
280  if (Imm == KillVal)
281  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
282  .addImm(0);
283  break;
284  }
285 
286  unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
287  BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
288  .addReg(AMDGPU::EXEC)
289  .add(Op);
290  break;
291  }
292  default:
293  llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR");
294  }
295 }
296 
297 MachineBasicBlock *SIInsertSkips::insertSkipBlock(
299  MachineFunction *MF = MBB.getParent();
300 
302  MachineFunction::iterator MBBI(MBB);
303  ++MBBI;
304 
305  MF->insert(MBBI, SkipBB);
306  MBB.addSuccessor(SkipBB);
307 
308  return SkipBB;
309 }
310 
311 // Returns true if a branch over the block was inserted.
312 bool SIInsertSkips::skipMaskBranch(MachineInstr &MI,
313  MachineBasicBlock &SrcMBB) {
314  MachineBasicBlock *DestBB = MI.getOperand(0).getMBB();
315 
316  if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB))
317  return false;
318 
319  const DebugLoc &DL = MI.getDebugLoc();
320  MachineBasicBlock::iterator InsPt = std::next(MI.getIterator());
321 
322  BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
323  .addMBB(DestBB);
324 
325  return true;
326 }
327 
328 bool SIInsertSkips::optimizeVccBranch(MachineInstr &MI) const {
329  // Match:
330  // sreg = -1
331  // vcc = S_AND_B64 exec, sreg
332  // S_CBRANCH_VCC[N]Z
333  // =>
334  // S_CBRANCH_EXEC[N]Z
335  bool Changed = false;
336  MachineBasicBlock &MBB = *MI.getParent();
337  const unsigned CondReg = AMDGPU::VCC;
338  const unsigned ExecReg = AMDGPU::EXEC;
339  const unsigned And = AMDGPU::S_AND_B64;
340 
342  E = MBB.rend();
343  bool ReadsCond = false;
344  unsigned Threshold = 5;
345  for (++A ; A != E ; ++A) {
346  if (!--Threshold)
347  return false;
348  if (A->modifiesRegister(ExecReg, TRI))
349  return false;
350  if (A->modifiesRegister(CondReg, TRI)) {
351  if (!A->definesRegister(CondReg, TRI) || A->getOpcode() != And)
352  return false;
353  break;
354  }
355  ReadsCond |= A->readsRegister(CondReg, TRI);
356  }
357  if (A == E)
358  return false;
359 
360  MachineOperand &Op1 = A->getOperand(1);
361  MachineOperand &Op2 = A->getOperand(2);
362  if (Op1.getReg() != ExecReg && Op2.isReg() && Op2.getReg() == ExecReg) {
363  TII->commuteInstruction(*A);
364  Changed = true;
365  }
366  if (Op1.getReg() != ExecReg)
367  return Changed;
368  if (Op2.isImm() && Op2.getImm() != -1)
369  return Changed;
370 
371  unsigned SReg = AMDGPU::NoRegister;
372  if (Op2.isReg()) {
373  SReg = Op2.getReg();
374  auto M = std::next(A);
375  bool ReadsSreg = false;
376  for ( ; M != E ; ++M) {
377  if (M->definesRegister(SReg, TRI))
378  break;
379  if (M->modifiesRegister(SReg, TRI))
380  return Changed;
381  ReadsSreg |= M->readsRegister(SReg, TRI);
382  }
383  if (M == E ||
384  !M->isMoveImmediate() ||
385  !M->getOperand(1).isImm() ||
386  M->getOperand(1).getImm() != -1)
387  return Changed;
388  // First if sreg is only used in and instruction fold the immediate
389  // into that and.
390  if (!ReadsSreg && Op2.isKill()) {
391  A->getOperand(2).ChangeToImmediate(-1);
392  M->eraseFromParent();
393  }
394  }
395 
396  if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC) &&
397  MI.killsRegister(CondReg, TRI))
398  A->eraseFromParent();
399 
400  bool IsVCCZ = MI.getOpcode() == AMDGPU::S_CBRANCH_VCCZ;
401  if (SReg == ExecReg) {
402  if (IsVCCZ) {
403  MI.eraseFromParent();
404  return true;
405  }
406  MI.setDesc(TII->get(AMDGPU::S_BRANCH));
407  } else {
408  MI.setDesc(TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ
409  : AMDGPU::S_CBRANCH_EXECNZ));
410  }
411 
412  MI.RemoveOperand(MI.findRegisterUseOperandIdx(CondReg, false /*Kill*/, TRI));
414 
415  return true;
416 }
417 
418 bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) {
419  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
420  TII = ST.getInstrInfo();
421  TRI = &TII->getRegisterInfo();
422  SkipThreshold = SkipThresholdFlag;
423 
424  bool HaveKill = false;
425  bool MadeChange = false;
426 
427  // Track depth of exec mask, divergent branches.
428  SmallVector<MachineBasicBlock *, 16> ExecBranchStack;
429 
431 
432  MachineBasicBlock *EmptyMBBAtEnd = nullptr;
433 
434  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
435  BI != BE; BI = NextBB) {
436  NextBB = std::next(BI);
437  MachineBasicBlock &MBB = *BI;
438  bool HaveSkipBlock = false;
439 
440  if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) {
441  // Reached convergence point for last divergent branch.
442  ExecBranchStack.pop_back();
443  }
444 
445  if (HaveKill && ExecBranchStack.empty()) {
446  HaveKill = false;
447 
448  // TODO: Insert skip if exec is 0?
449  }
450 
452  for (I = MBB.begin(); I != MBB.end(); I = Next) {
453  Next = std::next(I);
454 
455  MachineInstr &MI = *I;
456 
457  switch (MI.getOpcode()) {
458  case AMDGPU::SI_MASK_BRANCH:
459  ExecBranchStack.push_back(MI.getOperand(0).getMBB());
460  MadeChange |= skipMaskBranch(MI, MBB);
461  break;
462 
463  case AMDGPU::S_BRANCH:
464  // Optimize out branches to the next block.
465  // FIXME: Shouldn't this be handled by BranchFolding?
466  if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
467  MI.eraseFromParent();
468  } else if (HaveSkipBlock) {
469  // Remove the given unconditional branch when a skip block has been
470  // inserted after the current one and let skip the two instructions
471  // performing the kill if the exec mask is non-zero.
472  MI.eraseFromParent();
473  }
474  break;
475 
476  case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
477  case AMDGPU::SI_KILL_I1_TERMINATOR:
478  MadeChange = true;
479  kill(MI);
480 
481  if (ExecBranchStack.empty()) {
482  if (NextBB != BE && skipIfDead(MI, *NextBB)) {
483  HaveSkipBlock = true;
484  NextBB = std::next(BI);
485  BE = MF.end();
486  }
487  } else {
488  HaveKill = true;
489  }
490 
491  MI.eraseFromParent();
492  break;
493 
494  case AMDGPU::SI_RETURN_TO_EPILOG:
495  // FIXME: Should move somewhere else
497 
498  // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
499  // because external bytecode will be appended at the end.
500  if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
501  // SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at
502  // the end and jump there.
503  if (!EmptyMBBAtEnd) {
504  EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
505  MF.insert(MF.end(), EmptyMBBAtEnd);
506  }
507 
508  MBB.addSuccessor(EmptyMBBAtEnd);
509  BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
510  .addMBB(EmptyMBBAtEnd);
511  I->eraseFromParent();
512  }
513  break;
514 
515  case AMDGPU::S_CBRANCH_VCCZ:
516  case AMDGPU::S_CBRANCH_VCCNZ:
517  MadeChange |= optimizeVccBranch(MI);
518  break;
519 
520  default:
521  break;
522  }
523  }
524  }
525 
526  return MadeChange;
527 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
AMDGPU specific subclass of TargetSubtarget.
MachineBasicBlock * getMBB() const
This class represents lattice values for constants.
Definition: AllocatorList.h:23
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned getReg() const
getReg - Returns the register number.
const SIInstrInfo * getInstrInfo() const override
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Calling convention used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
static const AMDGPUSubtarget & get(const MachineFunction &MF)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
#define DEBUG_TYPE
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
reverse_iterator rend()
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:427
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
char & SIInsertSkipsPassID
Represent the analysis usage information of a pass.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
self_iterator getIterator()
Definition: ilist_node.h:81
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:676
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:33
Iterator for intrusive lists based on ilist_node.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
BlockVerifier::State From
LLVM_READONLY int getVOPe32(uint16_t Opcode)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
int64_t getImm() const
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static cl::opt< unsigned > Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden)
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:253
Provides AMDGPU specific target descriptions.
Representation of each machine instruction.
Definition: MachineInstr.h:63
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Interface definition for SIInstrInfo.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:84
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
#define I(x, y, z)
Definition: MD5.cpp:58
const MachineBasicBlock & back() const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
static cl::opt< unsigned > SkipThresholdFlag("amdgpu-skip-threshold", cl::desc("Number of instructions before jumping over divergent control flow"), cl::init(12), cl::Hidden)
static INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE, "SI insert s_cbranch_execz instructions", false, false) char &llvm bool opcodeEmitsNoInsts(unsigned Opc)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
int findRegisterUseOperandIdx(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found...
const SIRegisterInfo * getRegisterInfo() const override