LLVM 22.0.0git
SIOptimizeExecMaskingPreRA.cpp
Go to the documentation of this file.
1//===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass performs exec mask handling peephole optimizations which needs
11/// to be done before register allocation to reduce register pressure.
12///
13//===----------------------------------------------------------------------===//
14
16#include "AMDGPU.h"
17#include "AMDGPULaneMaskUtils.h"
18#include "GCNSubtarget.h"
23
24using namespace llvm;
25
26#define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
27
28namespace {
29
30class SIOptimizeExecMaskingPreRA {
31private:
32 const GCNSubtarget &ST;
33 const SIRegisterInfo *TRI;
34 const SIInstrInfo *TII;
36 LiveIntervals *LIS;
38
39 MCRegister CondReg;
40 MCRegister ExecReg;
41
42 bool optimizeVcndVcmpPair(MachineBasicBlock &MBB);
43 bool optimizeElseBranch(MachineBasicBlock &MBB);
44
45public:
46 SIOptimizeExecMaskingPreRA(MachineFunction &MF, LiveIntervals *LIS)
47 : ST(MF.getSubtarget<GCNSubtarget>()), TRI(ST.getRegisterInfo()),
48 TII(ST.getInstrInfo()), MRI(&MF.getRegInfo()), LIS(LIS),
50 bool run(MachineFunction &MF);
51};
52
53class SIOptimizeExecMaskingPreRALegacy : public MachineFunctionPass {
54public:
55 static char ID;
56
57 SIOptimizeExecMaskingPreRALegacy() : MachineFunctionPass(ID) {
60 }
61
62 bool runOnMachineFunction(MachineFunction &MF) override;
63
64 StringRef getPassName() const override {
65 return "SI optimize exec mask operations pre-RA";
66 }
67
68 void getAnalysisUsage(AnalysisUsage &AU) const override {
70 AU.setPreservesAll();
72 }
73};
74
75} // End anonymous namespace.
76
77INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRALegacy, DEBUG_TYPE,
78 "SI optimize exec mask operations pre-RA", false, false)
80INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRALegacy, DEBUG_TYPE,
81 "SI optimize exec mask operations pre-RA", false, false)
82
83char SIOptimizeExecMaskingPreRALegacy::ID = 0;
84
85char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRALegacy::ID;
86
88 return new SIOptimizeExecMaskingPreRALegacy();
89}
90
91// See if there is a def between \p AndIdx and \p SelIdx that needs to live
92// beyond \p AndIdx.
93static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx,
94 SlotIndex SelIdx) {
95 LiveQueryResult AndLRQ = LR.Query(AndIdx);
96 return (!AndLRQ.isKill() && AndLRQ.valueIn() != LR.Query(SelIdx).valueOut());
97}
98
99// FIXME: Why do we bother trying to handle physical registers here?
100static bool isDefBetween(const SIRegisterInfo &TRI,
102 const MachineInstr &Sel, const MachineInstr &And) {
104 SlotIndex SelIdx = LIS->getInstructionIndex(Sel).getRegSlot();
105
106 if (Reg.isVirtual())
107 return isDefBetween(LIS->getInterval(Reg), AndIdx, SelIdx);
108
109 for (MCRegUnit Unit : TRI.regunits(Reg.asMCReg())) {
110 if (isDefBetween(LIS->getRegUnit(Unit), AndIdx, SelIdx))
111 return true;
112 }
113
114 return false;
115}
116
117// Optimize sequence
118// %sel = V_CNDMASK_B32_e64 0, 1, %cc
119// %cmp = V_CMP_NE_U32 1, %sel
120// $vcc = S_AND_B64 $exec, %cmp
121// S_CBRANCH_VCC[N]Z
122// =>
123// $vcc = S_ANDN2_B64 $exec, %cc
124// S_CBRANCH_VCC[N]Z
125//
126// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
127// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
128// only 3 first instructions are really needed. S_AND_B64 with exec is a
129// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
130// lanes.
131//
132// Returns true on success.
133bool SIOptimizeExecMaskingPreRA::optimizeVcndVcmpPair(MachineBasicBlock &MBB) {
134 auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
135 unsigned Opc = MI.getOpcode();
136 return Opc == AMDGPU::S_CBRANCH_VCCZ ||
137 Opc == AMDGPU::S_CBRANCH_VCCNZ; });
138 if (I == MBB.terminators().end())
139 return false;
140
141 auto *And =
142 TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister, *I, *MRI, LIS);
143 if (!And || And->getOpcode() != LMC.AndOpc || !And->getOperand(1).isReg() ||
144 !And->getOperand(2).isReg())
145 return false;
146
147 MachineOperand *AndCC = &And->getOperand(1);
148 Register CmpReg = AndCC->getReg();
149 unsigned CmpSubReg = AndCC->getSubReg();
150 if (CmpReg == Register(ExecReg)) {
151 AndCC = &And->getOperand(2);
152 CmpReg = AndCC->getReg();
153 CmpSubReg = AndCC->getSubReg();
154 } else if (And->getOperand(2).getReg() != Register(ExecReg)) {
155 return false;
156 }
157
158 auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, *MRI, LIS);
159 if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
160 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
161 Cmp->getParent() != And->getParent())
162 return false;
163
164 MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
165 MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
166 if (Op1->isImm() && Op2->isReg())
167 std::swap(Op1, Op2);
168 if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
169 return false;
170
171 Register SelReg = Op1->getReg();
172 if (SelReg.isPhysical())
173 return false;
174
175 auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, *MRI, LIS);
176 if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
177 return false;
178
179 if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
180 TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
181 return false;
182
183 Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
184 Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
185 MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
186 if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
187 Op1->getImm() != 0 || Op2->getImm() != 1)
188 return false;
189
190 Register CCReg = CC->getReg();
191
192 // If there was a def between the select and the and, we would need to move it
193 // to fold this.
194 if (isDefBetween(*TRI, LIS, CCReg, *Sel, *And))
195 return false;
196
197 // Cannot safely mirror live intervals with PHI nodes, so check for these
198 // before optimization.
199 SlotIndex SelIdx = LIS->getInstructionIndex(*Sel);
200 LiveInterval *SelLI = &LIS->getInterval(SelReg);
201 if (llvm::any_of(SelLI->vnis(),
202 [](const VNInfo *VNI) {
203 return VNI->isPHIDef();
204 }))
205 return false;
206
207 // TODO: Guard against implicit def operands?
208 LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
209 << *And);
210
211 MachineInstr *Andn2 =
212 BuildMI(MBB, *And, And->getDebugLoc(), TII->get(LMC.AndN2Opc),
213 And->getOperand(0).getReg())
214 .addReg(ExecReg)
215 .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
216 MachineOperand &AndSCC = And->getOperand(3);
217 assert(AndSCC.getReg() == AMDGPU::SCC);
218 MachineOperand &Andn2SCC = Andn2->getOperand(3);
219 assert(Andn2SCC.getReg() == AMDGPU::SCC);
220 Andn2SCC.setIsDead(AndSCC.isDead());
221
222 SlotIndex AndIdx = LIS->ReplaceMachineInstrInMaps(*And, *Andn2);
223 And->eraseFromParent();
224
225 LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
226
227 // Update live intervals for CCReg before potentially removing CmpReg/SelReg,
228 // and their associated liveness information.
229 SlotIndex CmpIdx = LIS->getInstructionIndex(*Cmp);
230 if (CCReg.isVirtual()) {
231 LiveInterval &CCLI = LIS->getInterval(CCReg);
232 auto CCQ = CCLI.Query(SelIdx.getRegSlot());
233 if (CCQ.valueIn()) {
234 LIS->removeInterval(CCReg);
236 }
237 } else
238 LIS->removeAllRegUnitsForPhysReg(CCReg);
239
240 // Try to remove compare. Cmp value should not used in between of cmp
241 // and s_and_b64 if VCC or just unused if any other register.
242 LiveInterval *CmpLI = CmpReg.isVirtual() ? &LIS->getInterval(CmpReg) : nullptr;
243 if ((CmpLI && CmpLI->Query(AndIdx.getRegSlot()).isKill()) ||
244 (CmpReg == Register(CondReg) &&
245 std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
246 [&](const MachineInstr &MI) {
247 return MI.readsRegister(CondReg, TRI);
248 }))) {
249 LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
250 if (CmpLI)
251 LIS->removeVRegDefAt(*CmpLI, CmpIdx.getRegSlot());
253 Cmp->eraseFromParent();
254
255 // Try to remove v_cndmask_b32.
256 // Kill status must be checked before shrinking the live range.
257 bool IsKill = SelLI->Query(CmpIdx.getRegSlot()).isKill();
258 LIS->shrinkToUses(SelLI);
259 bool IsDead = SelLI->Query(SelIdx.getRegSlot()).isDeadDef();
260 if (MRI->use_nodbg_empty(SelReg) && (IsKill || IsDead)) {
261 LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
262
263 LIS->removeVRegDefAt(*SelLI, SelIdx.getRegSlot());
265 bool ShrinkSel = Sel->getOperand(0).readsReg();
266 Sel->eraseFromParent();
267 if (ShrinkSel) {
268 // The result of the V_CNDMASK was a subreg def which counted as a read
269 // from the other parts of the reg. Shrink their live ranges.
270 LIS->shrinkToUses(SelLI);
271 }
272 }
273 }
274
275 return true;
276}
277
278// Optimize sequence
279// %dst = S_OR_SAVEEXEC %src
280// ... instructions not modifying exec ...
281// %tmp = S_AND $exec, %dst
282// $exec = S_XOR_term $exec, %tmp
283// =>
284// %dst = S_OR_SAVEEXEC %src
285// ... instructions not modifying exec ...
286// $exec = S_XOR_term $exec, %dst
287//
288// Clean up potentially unnecessary code added for safety during
289// control flow lowering.
290//
291// Return whether any changes were made to MBB.
292bool SIOptimizeExecMaskingPreRA::optimizeElseBranch(MachineBasicBlock &MBB) {
293 if (MBB.empty())
294 return false;
295
296 // Check this is an else block.
297 auto First = MBB.begin();
298 MachineInstr &SaveExecMI = *First;
299 if (SaveExecMI.getOpcode() != LMC.OrSaveExecOpc)
300 return false;
301
302 auto I = llvm::find_if(MBB.terminators(), [this](const MachineInstr &MI) {
303 return MI.getOpcode() == LMC.XorTermOpc;
304 });
305 if (I == MBB.terminators().end())
306 return false;
307
308 MachineInstr &XorTermMI = *I;
309 if (XorTermMI.getOperand(1).getReg() != Register(ExecReg))
310 return false;
311
312 Register SavedExecReg = SaveExecMI.getOperand(0).getReg();
313 Register DstReg = XorTermMI.getOperand(2).getReg();
314
315 // Find potentially unnecessary S_AND
316 MachineInstr *AndExecMI = nullptr;
317 I--;
318 while (I != First && !AndExecMI) {
319 if (I->getOpcode() == LMC.AndOpc && I->getOperand(0).getReg() == DstReg &&
320 I->getOperand(1).getReg() == Register(ExecReg))
321 AndExecMI = &*I;
322 I--;
323 }
324 if (!AndExecMI)
325 return false;
326
327 // Check for exec modifying instructions.
328 // Note: exec defs do not create live ranges beyond the
329 // instruction so isDefBetween cannot be used.
330 // Instead just check that the def segments are adjacent.
331 SlotIndex StartIdx = LIS->getInstructionIndex(SaveExecMI);
332 SlotIndex EndIdx = LIS->getInstructionIndex(*AndExecMI);
333 for (MCRegUnit Unit : TRI->regunits(ExecReg)) {
334 LiveRange &RegUnit = LIS->getRegUnit(Unit);
335 if (RegUnit.find(StartIdx) != std::prev(RegUnit.find(EndIdx)))
336 return false;
337 }
338
339 // Remove unnecessary S_AND
340 LIS->removeInterval(SavedExecReg);
341 LIS->removeInterval(DstReg);
342
343 SaveExecMI.getOperand(0).setReg(DstReg);
344
345 LIS->RemoveMachineInstrFromMaps(*AndExecMI);
346 AndExecMI->eraseFromParent();
347
349
350 return true;
351}
352
353PreservedAnalyses
356 auto &LIS = MFAM.getResult<LiveIntervalsAnalysis>(MF);
357 SIOptimizeExecMaskingPreRA(MF, &LIS).run(MF);
358 return PreservedAnalyses::all();
359}
360
361bool SIOptimizeExecMaskingPreRALegacy::runOnMachineFunction(
362 MachineFunction &MF) {
363 if (skipFunction(MF.getFunction()))
364 return false;
365
366 auto *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
367 return SIOptimizeExecMaskingPreRA(MF, LIS).run(MF);
368}
369
370bool SIOptimizeExecMaskingPreRA::run(MachineFunction &MF) {
371 CondReg = MCRegister::from(LMC.VccReg);
372 ExecReg = MCRegister::from(LMC.ExecReg);
373
374 DenseSet<Register> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
375 bool Changed = false;
376
377 for (MachineBasicBlock &MBB : MF) {
378
379 if (optimizeElseBranch(MBB)) {
380 RecalcRegs.insert(AMDGPU::SCC);
381 Changed = true;
382 }
383
384 if (optimizeVcndVcmpPair(MBB)) {
385 RecalcRegs.insert(AMDGPU::VCC_LO);
386 RecalcRegs.insert(AMDGPU::VCC_HI);
387 RecalcRegs.insert(AMDGPU::SCC);
388 Changed = true;
389 }
390
391 // Try to remove unneeded instructions before s_endpgm.
392 if (MBB.succ_empty()) {
393 if (MBB.empty())
394 continue;
395
396 // Skip this if the endpgm has any implicit uses, otherwise we would need
397 // to be careful to update / remove them.
398 // S_ENDPGM always has a single imm operand that is not used other than to
399 // end up in the encoding
400 MachineInstr &Term = MBB.back();
401 if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
402 continue;
403
404 SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
405
406 while (!Blocks.empty()) {
407 auto *CurBB = Blocks.pop_back_val();
408 auto I = CurBB->rbegin(), E = CurBB->rend();
409 if (I != E) {
410 if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
411 ++I;
412 else if (I->isBranch())
413 continue;
414 }
415
416 while (I != E) {
417 if (I->isDebugInstr()) {
418 I = std::next(I);
419 continue;
420 }
421
422 if (I->mayStore() || I->isBarrier() || I->isCall() ||
423 I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
424 break;
425
427 << "Removing no effect instruction: " << *I << '\n');
428
429 for (auto &Op : I->operands()) {
430 if (Op.isReg())
431 RecalcRegs.insert(Op.getReg());
432 }
433
434 auto Next = std::next(I);
436 I->eraseFromParent();
437 I = Next;
438
439 Changed = true;
440 }
441
442 if (I != E)
443 continue;
444
445 // Try to ascend predecessors.
446 for (auto *Pred : CurBB->predecessors()) {
447 if (Pred->succ_size() == 1)
448 Blocks.push_back(Pred);
449 }
450 }
451 continue;
452 }
453
454 // If the only user of a logical operation is move to exec, fold it now
455 // to prevent forming of saveexec. I.e.:
456 //
457 // %0:sreg_64 = COPY $exec
458 // %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
459 // =>
460 // %1 = S_AND_B64 $exec, %2:sreg_64
461 unsigned ScanThreshold = 10;
462 for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
463 && ScanThreshold--; ++I) {
464 // Continue scanning if this is not a full exec copy
465 if (!(I->isFullCopy() && I->getOperand(1).getReg() == Register(ExecReg)))
466 continue;
467
468 Register SavedExec = I->getOperand(0).getReg();
469 if (SavedExec.isVirtual() && MRI->hasOneNonDBGUse(SavedExec)) {
470 MachineInstr *SingleExecUser = &*MRI->use_instr_nodbg_begin(SavedExec);
471 int Idx = SingleExecUser->findRegisterUseOperandIdx(SavedExec,
472 /*TRI=*/nullptr);
473 assert(Idx != -1);
474 if (SingleExecUser->getParent() == I->getParent() &&
475 !SingleExecUser->getOperand(Idx).isImplicit() &&
476 static_cast<unsigned>(Idx) <
477 SingleExecUser->getDesc().getNumOperands() &&
478 TII->isOperandLegal(*SingleExecUser, Idx, &I->getOperand(1))) {
479 LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
481 I->eraseFromParent();
482 MRI->replaceRegWith(SavedExec, ExecReg);
483 LIS->removeInterval(SavedExec);
484 Changed = true;
485 }
486 }
487 break;
488 }
489 }
490
491 if (Changed) {
492 for (auto Reg : RecalcRegs) {
493 if (Reg.isVirtual()) {
494 LIS->removeInterval(Reg);
495 if (!MRI->reg_empty(Reg))
497 } else {
499 }
500 }
501 }
502
503 return Changed;
504}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
bool IsDead
static bool isDefBetween(Register Reg, SlotIndex First, SlotIndex Last, const MachineRegisterInfo *MRI, const LiveIntervals *LIS)
static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx, SlotIndex SelIdx)
SI Optimize VGPR LiveRange
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
void removeAllRegUnitsForPhysReg(MCRegister Reg)
Remove associated live ranges for the register units associated with Reg.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LiveInterval & getInterval(Register Reg)
void removeInterval(Register Reg)
Interval removal.
LiveRange & getRegUnit(MCRegUnit Unit)
Return the live range for register unit Unit.
LLVM_ABI void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
bool isDeadDef() const
Return true if this instruction has a dead def.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
bool isKill() const
Return true if the live-in value is killed by this instruction.
This class represents the liveness of a register, stack slot, etc.
iterator_range< vni_iterator > vnis()
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition MCRegister.h:77
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
reverse_iterator rbegin()
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
unsigned getSubReg() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
Changed
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
char & SIOptimizeExecMaskingPreRAID
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
FunctionPass * createSIOptimizeExecMaskingPreRAPass()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
Matching combinators.