LLVM 23.0.0git
SIOptimizeExecMaskingPreRA.cpp
Go to the documentation of this file.
1//===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass performs exec mask handling peephole optimizations which needs
11/// to be done before register allocation to reduce register pressure.
12///
13//===----------------------------------------------------------------------===//
14
16#include "AMDGPU.h"
17#include "AMDGPULaneMaskUtils.h"
18#include "GCNSubtarget.h"
23
24using namespace llvm;
25
26#define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
27
28namespace {
29
30class SIOptimizeExecMaskingPreRA {
31private:
32 const GCNSubtarget &ST;
33 const SIRegisterInfo *TRI;
34 const SIInstrInfo *TII;
36 LiveIntervals *LIS;
38
39 MCRegister CondReg;
40 MCRegister ExecReg;
41
42 bool optimizeVcndVcmpPair(MachineBasicBlock &MBB);
43 bool optimizeElseBranch(MachineBasicBlock &MBB);
44
45public:
46 SIOptimizeExecMaskingPreRA(MachineFunction &MF, LiveIntervals *LIS)
47 : ST(MF.getSubtarget<GCNSubtarget>()), TRI(ST.getRegisterInfo()),
48 TII(ST.getInstrInfo()), MRI(&MF.getRegInfo()), LIS(LIS),
50 bool run(MachineFunction &MF);
51};
52
53class SIOptimizeExecMaskingPreRALegacy : public MachineFunctionPass {
54public:
55 static char ID;
56
57 SIOptimizeExecMaskingPreRALegacy() : MachineFunctionPass(ID) {}
58
59 bool runOnMachineFunction(MachineFunction &MF) override;
60
61 StringRef getPassName() const override {
62 return "SI optimize exec mask operations pre-RA";
63 }
64
65 void getAnalysisUsage(AnalysisUsage &AU) const override {
67 AU.setPreservesAll();
69 }
70};
71
72} // End anonymous namespace.
73
74INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRALegacy, DEBUG_TYPE,
75 "SI optimize exec mask operations pre-RA", false, false)
77INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRALegacy, DEBUG_TYPE,
78 "SI optimize exec mask operations pre-RA", false, false)
79
80char SIOptimizeExecMaskingPreRALegacy::ID = 0;
81
82char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRALegacy::ID;
83
85 return new SIOptimizeExecMaskingPreRALegacy();
86}
87
88// See if there is a def between \p AndIdx and \p SelIdx that needs to live
89// beyond \p AndIdx.
90static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx,
91 SlotIndex SelIdx) {
92 LiveQueryResult AndLRQ = LR.Query(AndIdx);
93 return (!AndLRQ.isKill() && AndLRQ.valueIn() != LR.Query(SelIdx).valueOut());
94}
95
96// FIXME: Why do we bother trying to handle physical registers here?
97static bool isDefBetween(const SIRegisterInfo &TRI,
99 const MachineInstr &Sel, const MachineInstr &And) {
101 SlotIndex SelIdx = LIS->getInstructionIndex(Sel).getRegSlot();
102
103 if (Reg.isVirtual())
104 return isDefBetween(LIS->getInterval(Reg), AndIdx, SelIdx);
105
106 for (MCRegUnit Unit : TRI.regunits(Reg.asMCReg())) {
107 if (isDefBetween(LIS->getRegUnit(Unit), AndIdx, SelIdx))
108 return true;
109 }
110
111 return false;
112}
113
114// Optimize sequence
115// %sel = V_CNDMASK_B32_e64 0, 1, %cc
116// %cmp = V_CMP_NE_U32 1, %sel
117// $vcc = S_AND_B64 $exec, %cmp
118// S_CBRANCH_VCC[N]Z
119// =>
120// $vcc = S_ANDN2_B64 $exec, %cc
121// S_CBRANCH_VCC[N]Z
122//
123// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
124// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
125// only 3 first instructions are really needed. S_AND_B64 with exec is a
126// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
127// lanes.
128//
129// Returns true on success.
130bool SIOptimizeExecMaskingPreRA::optimizeVcndVcmpPair(MachineBasicBlock &MBB) {
131 auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
132 unsigned Opc = MI.getOpcode();
133 return Opc == AMDGPU::S_CBRANCH_VCCZ ||
134 Opc == AMDGPU::S_CBRANCH_VCCNZ; });
135 if (I == MBB.terminators().end())
136 return false;
137
138 auto *And =
139 TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister, *I, *MRI, LIS);
140 if (!And || And->getOpcode() != LMC.AndOpc || !And->getOperand(1).isReg() ||
141 !And->getOperand(2).isReg())
142 return false;
143
144 MachineOperand *AndCC = &And->getOperand(1);
145 Register CmpReg = AndCC->getReg();
146 unsigned CmpSubReg = AndCC->getSubReg();
147 if (CmpReg == Register(ExecReg)) {
148 AndCC = &And->getOperand(2);
149 CmpReg = AndCC->getReg();
150 CmpSubReg = AndCC->getSubReg();
151 } else if (And->getOperand(2).getReg() != Register(ExecReg)) {
152 return false;
153 }
154
155 auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, *MRI, LIS);
156 if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
157 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
158 Cmp->getParent() != And->getParent())
159 return false;
160
161 MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
162 MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
163 if (Op1->isImm() && Op2->isReg())
164 std::swap(Op1, Op2);
165 if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
166 return false;
167
168 Register SelReg = Op1->getReg();
169 if (SelReg.isPhysical())
170 return false;
171
172 auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, *MRI, LIS);
173 if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
174 return false;
175
176 if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
177 TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
178 return false;
179
180 Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
181 Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
182 MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
183 if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
184 Op1->getImm() != 0 || Op2->getImm() != 1)
185 return false;
186
187 Register CCReg = CC->getReg();
188
189 // If there was a def between the select and the and, we would need to move it
190 // to fold this.
191 if (isDefBetween(*TRI, LIS, CCReg, *Sel, *And))
192 return false;
193
194 // Cannot safely mirror live intervals with PHI nodes, so check for these
195 // before optimization.
196 SlotIndex SelIdx = LIS->getInstructionIndex(*Sel);
197 LiveInterval *SelLI = &LIS->getInterval(SelReg);
198 if (llvm::any_of(SelLI->vnis(),
199 [](const VNInfo *VNI) {
200 return VNI->isPHIDef();
201 }))
202 return false;
203
204 // TODO: Guard against implicit def operands?
205 LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
206 << *And);
207
208 MachineInstr *Andn2 =
209 BuildMI(MBB, *And, And->getDebugLoc(), TII->get(LMC.AndN2Opc),
210 And->getOperand(0).getReg())
211 .addReg(ExecReg)
212 .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
213 MachineOperand &AndSCC = And->getOperand(3);
214 assert(AndSCC.getReg() == AMDGPU::SCC);
215 MachineOperand &Andn2SCC = Andn2->getOperand(3);
216 assert(Andn2SCC.getReg() == AMDGPU::SCC);
217 Andn2SCC.setIsDead(AndSCC.isDead());
218
219 SlotIndex AndIdx = LIS->ReplaceMachineInstrInMaps(*And, *Andn2);
220 And->eraseFromParent();
221
222 LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
223
224 // Update live intervals for CCReg before potentially removing CmpReg/SelReg,
225 // and their associated liveness information.
226 SlotIndex CmpIdx = LIS->getInstructionIndex(*Cmp);
227 if (CCReg.isVirtual()) {
228 LiveInterval &CCLI = LIS->getInterval(CCReg);
229 auto CCQ = CCLI.Query(SelIdx.getRegSlot());
230 if (CCQ.valueIn()) {
231 LIS->removeInterval(CCReg);
233 }
234 } else
235 LIS->removeAllRegUnitsForPhysReg(CCReg);
236
237 // Try to remove compare. Cmp value should not used in between of cmp
238 // and s_and_b64 if VCC or just unused if any other register.
239 LiveInterval *CmpLI = CmpReg.isVirtual() ? &LIS->getInterval(CmpReg) : nullptr;
240 if ((CmpLI && CmpLI->Query(AndIdx.getRegSlot()).isKill()) ||
241 (CmpReg == Register(CondReg) &&
242 std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
243 [&](const MachineInstr &MI) {
244 return MI.readsRegister(CondReg, TRI);
245 }))) {
246 LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
247 if (CmpLI)
248 LIS->removeVRegDefAt(*CmpLI, CmpIdx.getRegSlot());
250 Cmp->eraseFromParent();
251
252 // Try to remove v_cndmask_b32.
253 // Kill status must be checked before shrinking the live range.
254 bool IsKill = SelLI->Query(CmpIdx.getRegSlot()).isKill();
255 LIS->shrinkToUses(SelLI);
256 bool IsDead = SelLI->Query(SelIdx.getRegSlot()).isDeadDef();
257 if (MRI->use_nodbg_empty(SelReg) && (IsKill || IsDead)) {
258 LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
259
260 LIS->removeVRegDefAt(*SelLI, SelIdx.getRegSlot());
262 bool ShrinkSel = Sel->getOperand(0).readsReg();
263 Sel->eraseFromParent();
264 if (ShrinkSel) {
265 // The result of the V_CNDMASK was a subreg def which counted as a read
266 // from the other parts of the reg. Shrink their live ranges.
267 LIS->shrinkToUses(SelLI);
268 }
269 }
270 }
271
272 return true;
273}
274
275// Optimize sequence
276// %dst = S_OR_SAVEEXEC %src
277// ... instructions not modifying exec ...
278// %tmp = S_AND $exec, %dst
279// $exec = S_XOR_term $exec, %tmp
280// =>
281// %dst = S_OR_SAVEEXEC %src
282// ... instructions not modifying exec ...
283// $exec = S_XOR_term $exec, %dst
284//
285// Clean up potentially unnecessary code added for safety during
286// control flow lowering.
287//
288// Return whether any changes were made to MBB.
289bool SIOptimizeExecMaskingPreRA::optimizeElseBranch(MachineBasicBlock &MBB) {
290 if (MBB.empty())
291 return false;
292
293 // Check this is an else block.
294 auto First = MBB.begin();
295 MachineInstr &SaveExecMI = *First;
296 if (SaveExecMI.getOpcode() != LMC.OrSaveExecOpc)
297 return false;
298
299 auto I = llvm::find_if(MBB.terminators(), [this](const MachineInstr &MI) {
300 return MI.getOpcode() == LMC.XorTermOpc;
301 });
302 if (I == MBB.terminators().end())
303 return false;
304
305 MachineInstr &XorTermMI = *I;
306 if (XorTermMI.getOperand(1).getReg() != Register(ExecReg))
307 return false;
308
309 Register SavedExecReg = SaveExecMI.getOperand(0).getReg();
310 Register DstReg = XorTermMI.getOperand(2).getReg();
311
312 // Find potentially unnecessary S_AND
313 MachineInstr *AndExecMI = nullptr;
314 I--;
315 while (I != First && !AndExecMI) {
316 if (I->getOpcode() == LMC.AndOpc && I->getOperand(0).getReg() == DstReg &&
317 I->getOperand(1).getReg() == Register(ExecReg))
318 AndExecMI = &*I;
319 I--;
320 }
321 if (!AndExecMI)
322 return false;
323
324 // Check for exec modifying instructions.
325 // Note: exec defs do not create live ranges beyond the
326 // instruction so isDefBetween cannot be used.
327 // Instead just check that the def segments are adjacent.
328 SlotIndex StartIdx = LIS->getInstructionIndex(SaveExecMI);
329 SlotIndex EndIdx = LIS->getInstructionIndex(*AndExecMI);
330 for (MCRegUnit Unit : TRI->regunits(ExecReg)) {
331 LiveRange &RegUnit = LIS->getRegUnit(Unit);
332 if (RegUnit.find(StartIdx) != std::prev(RegUnit.find(EndIdx)))
333 return false;
334 }
335
336 // Remove unnecessary S_AND
337 LIS->removeInterval(SavedExecReg);
338 LIS->removeInterval(DstReg);
339
340 SaveExecMI.getOperand(0).setReg(DstReg);
341
342 LIS->RemoveMachineInstrFromMaps(*AndExecMI);
343 AndExecMI->eraseFromParent();
344
346
347 return true;
348}
349
350PreservedAnalyses
353 auto &LIS = MFAM.getResult<LiveIntervalsAnalysis>(MF);
354 SIOptimizeExecMaskingPreRA(MF, &LIS).run(MF);
355 return PreservedAnalyses::all();
356}
357
358bool SIOptimizeExecMaskingPreRALegacy::runOnMachineFunction(
359 MachineFunction &MF) {
360 if (skipFunction(MF.getFunction()))
361 return false;
362
363 auto *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
364 return SIOptimizeExecMaskingPreRA(MF, LIS).run(MF);
365}
366
367bool SIOptimizeExecMaskingPreRA::run(MachineFunction &MF) {
368 CondReg = MCRegister::from(LMC.VccReg);
369 ExecReg = MCRegister::from(LMC.ExecReg);
370
371 DenseSet<Register> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
372 bool Changed = false;
373
374 for (MachineBasicBlock &MBB : MF) {
375
376 if (optimizeElseBranch(MBB)) {
377 RecalcRegs.insert(AMDGPU::SCC);
378 Changed = true;
379 }
380
381 if (optimizeVcndVcmpPair(MBB)) {
382 RecalcRegs.insert(AMDGPU::VCC_LO);
383 RecalcRegs.insert(AMDGPU::VCC_HI);
384 RecalcRegs.insert(AMDGPU::SCC);
385 Changed = true;
386 }
387
388 // Try to remove unneeded instructions before s_endpgm.
389 if (MBB.succ_empty()) {
390 if (MBB.empty())
391 continue;
392
393 // Skip this if the endpgm has any implicit uses, otherwise we would need
394 // to be careful to update / remove them.
395 // S_ENDPGM always has a single imm operand that is not used other than to
396 // end up in the encoding
397 MachineInstr &Term = MBB.back();
398 if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
399 continue;
400
401 SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
402
403 while (!Blocks.empty()) {
404 auto *CurBB = Blocks.pop_back_val();
405 auto I = CurBB->rbegin(), E = CurBB->rend();
406 if (I != E) {
407 if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
408 ++I;
409 else if (I->isBranch())
410 continue;
411 }
412
413 while (I != E) {
414 if (I->isDebugInstr()) {
415 I = std::next(I);
416 continue;
417 }
418
419 if (I->mayStore() || I->isBarrier() || I->isCall() ||
420 I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
421 break;
422
424 << "Removing no effect instruction: " << *I << '\n');
425
426 for (auto &Op : I->operands()) {
427 if (Op.isReg())
428 RecalcRegs.insert(Op.getReg());
429 }
430
431 auto Next = std::next(I);
433 I->eraseFromParent();
434 I = Next;
435
436 Changed = true;
437 }
438
439 if (I != E)
440 continue;
441
442 // Try to ascend predecessors.
443 for (auto *Pred : CurBB->predecessors()) {
444 if (Pred->succ_size() == 1)
445 Blocks.push_back(Pred);
446 }
447 }
448 continue;
449 }
450
451 // If the only user of a logical operation is move to exec, fold it now
452 // to prevent forming of saveexec. I.e.:
453 //
454 // %0:sreg_64 = COPY $exec
455 // %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
456 // =>
457 // %1 = S_AND_B64 $exec, %2:sreg_64
458 unsigned ScanThreshold = 10;
459 for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
460 && ScanThreshold--; ++I) {
461 // Continue scanning if this is not a full exec copy
462 if (!(I->isFullCopy() && I->getOperand(1).getReg() == Register(ExecReg)))
463 continue;
464
465 Register SavedExec = I->getOperand(0).getReg();
466 if (SavedExec.isVirtual() && MRI->hasOneNonDBGUse(SavedExec)) {
467 MachineInstr *SingleExecUser = &*MRI->use_instr_nodbg_begin(SavedExec);
468 int Idx = SingleExecUser->findRegisterUseOperandIdx(SavedExec,
469 /*TRI=*/nullptr);
470 assert(Idx != -1);
471 if (SingleExecUser->getParent() == I->getParent() &&
472 !SingleExecUser->getOperand(Idx).isImplicit() &&
473 static_cast<unsigned>(Idx) <
474 SingleExecUser->getDesc().getNumOperands() &&
475 TII->isOperandLegal(*SingleExecUser, Idx, &I->getOperand(1))) {
476 LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
478 I->eraseFromParent();
479 MRI->replaceRegWith(SavedExec, ExecReg);
480 LIS->removeInterval(SavedExec);
481 Changed = true;
482 }
483 }
484 break;
485 }
486 }
487
488 if (Changed) {
489 for (auto Reg : RecalcRegs) {
490 if (Reg.isVirtual()) {
491 LIS->removeInterval(Reg);
492 if (!MRI->reg_empty(Reg))
494 } else {
496 }
497 }
498 }
499
500 return Changed;
501}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
bool IsDead
static bool isDefBetween(Register Reg, SlotIndex First, SlotIndex Last, const MachineRegisterInfo *MRI, const LiveIntervals *LIS)
static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx, SlotIndex SelIdx)
SI Optimize VGPR LiveRange
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
void removeAllRegUnitsForPhysReg(MCRegister Reg)
Remove associated live ranges for the register units associated with Reg.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LiveInterval & getInterval(Register Reg)
void removeInterval(Register Reg)
Interval removal.
LiveRange & getRegUnit(MCRegUnit Unit)
Return the live range for register unit Unit.
LLVM_ABI void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
bool isDeadDef() const
Return true if this instruction has a dead def.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
bool isKill() const
Return true if the live-in value is killed by this instruction.
This class represents the liveness of a register, stack slot, etc.
iterator_range< vni_iterator > vnis()
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition MCRegister.h:77
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
reverse_iterator rbegin()
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
unsigned getSubReg() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
Changed
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
char & SIOptimizeExecMaskingPreRAID
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
@ And
Bitwise or logical AND of integers.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
FunctionPass * createSIOptimizeExecMaskingPreRAPass()
constexpr RegState getUndefRegState(bool B)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
Matching combinators.