LLVM 23.0.0git
SIOptimizeExecMasking.cpp
Go to the documentation of this file.
1//===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "AMDGPU.h"
11#include "AMDGPULaneMaskUtils.h"
12#include "GCNSubtarget.h"
14#include "SIRegisterInfo.h"
21
22using namespace llvm;
23
24#define DEBUG_TYPE "si-optimize-exec-masking"
25
26namespace {
27
28class SIOptimizeExecMasking {
29public:
30 SIOptimizeExecMasking(MachineFunction *MF)
31 : MF(MF), ST(&MF->getSubtarget<GCNSubtarget>()), TII(ST->getInstrInfo()),
32 TRI(&TII->getRegisterInfo()), MRI(&MF->getRegInfo()),
34 bool run();
35
36private:
38 const GCNSubtarget *ST;
39 const SIInstrInfo *TII;
40 const SIRegisterInfo *TRI;
43
46 SmallVector<MachineOperand *, 1> KillFlagCandidates;
47
48 Register isCopyFromExec(const MachineInstr &MI) const;
49 Register isCopyToExec(const MachineInstr &MI) const;
50 bool removeTerminatorBit(MachineInstr &MI) const;
52 fixTerminators(MachineBasicBlock &MBB) const;
54 findExecCopy(MachineBasicBlock &MBB,
56 bool isRegisterInUseBetween(MachineInstr &Stop, MachineInstr &Start,
57 MCRegister Reg, bool UseLiveOuts = false,
58 bool IgnoreStart = false) const;
59 bool isRegisterInUseAfter(MachineInstr &Stop, MCRegister Reg) const;
60 MachineInstr *findInstrBackwards(
61 MachineInstr &Origin, std::function<bool(MachineInstr *)> Pred,
62 ArrayRef<MCRegister> NonModifiableRegs,
63 MachineInstr *Terminator = nullptr,
64 SmallVectorImpl<MachineOperand *> *KillFlagCandidates = nullptr,
65 unsigned MaxInstructions = 20) const;
66 bool optimizeExecSequence();
67 void tryRecordVCmpxAndSaveexecSequence(MachineInstr &MI);
68 bool optimizeVCMPSaveExecSequence(MachineInstr &SaveExecInstr,
69 MachineInstr &VCmp) const;
70
71 void tryRecordOrSaveexecXorSequence(MachineInstr &MI);
72 bool optimizeOrSaveexecXorSequences();
73};
74
75class SIOptimizeExecMaskingLegacy : public MachineFunctionPass {
76public:
77 static char ID;
78
79 SIOptimizeExecMaskingLegacy() : MachineFunctionPass(ID) {}
80
81 bool runOnMachineFunction(MachineFunction &MF) override;
82
83 StringRef getPassName() const override {
84 return "SI optimize exec mask operations";
85 }
86
87 void getAnalysisUsage(AnalysisUsage &AU) const override {
88 AU.setPreservesCFG();
90 }
91};
92
93} // End anonymous namespace.
94
98 SIOptimizeExecMasking Impl(&MF);
99
100 if (!Impl.run())
101 return PreservedAnalyses::all();
102
104 PA.preserveSet<CFGAnalyses>();
105 return PA;
106}
107
108INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingLegacy, DEBUG_TYPE,
109 "SI optimize exec mask operations", false, false)
111INITIALIZE_PASS_END(SIOptimizeExecMaskingLegacy, DEBUG_TYPE,
112 "SI optimize exec mask operations", false, false)
113
114char SIOptimizeExecMaskingLegacy::ID = 0;
115
116char &llvm::SIOptimizeExecMaskingLegacyID = SIOptimizeExecMaskingLegacy::ID;
117
118/// If \p MI is a copy from exec, return the register copied to.
119Register SIOptimizeExecMasking::isCopyFromExec(const MachineInstr &MI) const {
120 switch (MI.getOpcode()) {
121 case AMDGPU::COPY:
122 case AMDGPU::S_MOV_B64:
123 case AMDGPU::S_MOV_B64_term:
124 case AMDGPU::S_MOV_B32:
125 case AMDGPU::S_MOV_B32_term: {
126 const MachineOperand &Src = MI.getOperand(1);
127 if (Src.isReg() && Src.getReg() == LMC.ExecReg)
128 return MI.getOperand(0).getReg();
129 }
130 }
131
132 return AMDGPU::NoRegister;
133}
134
135/// If \p MI is a copy to exec, return the register copied from.
136Register SIOptimizeExecMasking::isCopyToExec(const MachineInstr &MI) const {
137 switch (MI.getOpcode()) {
138 case AMDGPU::COPY:
139 case AMDGPU::S_MOV_B64:
140 case AMDGPU::S_MOV_B32: {
141 const MachineOperand &Dst = MI.getOperand(0);
142 if (Dst.isReg() && Dst.getReg() == LMC.ExecReg && MI.getOperand(1).isReg())
143 return MI.getOperand(1).getReg();
144 break;
145 }
146 case AMDGPU::S_MOV_B64_term:
147 case AMDGPU::S_MOV_B32_term:
148 llvm_unreachable("should have been replaced");
149 }
150
151 return Register();
152}
153
154/// If \p MI is a logical operation on an exec value,
155/// return the register copied to.
157 switch (MI.getOpcode()) {
158 case AMDGPU::S_AND_B64:
159 case AMDGPU::S_OR_B64:
160 case AMDGPU::S_XOR_B64:
161 case AMDGPU::S_ANDN2_B64:
162 case AMDGPU::S_ORN2_B64:
163 case AMDGPU::S_NAND_B64:
164 case AMDGPU::S_NOR_B64:
165 case AMDGPU::S_XNOR_B64: {
166 const MachineOperand &Src1 = MI.getOperand(1);
167 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
168 return MI.getOperand(0).getReg();
169 const MachineOperand &Src2 = MI.getOperand(2);
170 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
171 return MI.getOperand(0).getReg();
172 break;
173 }
174 case AMDGPU::S_AND_B32:
175 case AMDGPU::S_OR_B32:
176 case AMDGPU::S_XOR_B32:
177 case AMDGPU::S_ANDN2_B32:
178 case AMDGPU::S_ORN2_B32:
179 case AMDGPU::S_NAND_B32:
180 case AMDGPU::S_NOR_B32:
181 case AMDGPU::S_XNOR_B32: {
182 const MachineOperand &Src1 = MI.getOperand(1);
183 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
184 return MI.getOperand(0).getReg();
185 const MachineOperand &Src2 = MI.getOperand(2);
186 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
187 return MI.getOperand(0).getReg();
188 break;
189 }
190 }
191
192 return AMDGPU::NoRegister;
193}
194
195static unsigned getSaveExecOp(unsigned Opc) {
196 switch (Opc) {
197 case AMDGPU::S_AND_B64:
198 return AMDGPU::S_AND_SAVEEXEC_B64;
199 case AMDGPU::S_OR_B64:
200 return AMDGPU::S_OR_SAVEEXEC_B64;
201 case AMDGPU::S_XOR_B64:
202 return AMDGPU::S_XOR_SAVEEXEC_B64;
203 case AMDGPU::S_ANDN2_B64:
204 return AMDGPU::S_ANDN2_SAVEEXEC_B64;
205 case AMDGPU::S_ORN2_B64:
206 return AMDGPU::S_ORN2_SAVEEXEC_B64;
207 case AMDGPU::S_NAND_B64:
208 return AMDGPU::S_NAND_SAVEEXEC_B64;
209 case AMDGPU::S_NOR_B64:
210 return AMDGPU::S_NOR_SAVEEXEC_B64;
211 case AMDGPU::S_XNOR_B64:
212 return AMDGPU::S_XNOR_SAVEEXEC_B64;
213 case AMDGPU::S_AND_B32:
214 return AMDGPU::S_AND_SAVEEXEC_B32;
215 case AMDGPU::S_OR_B32:
216 return AMDGPU::S_OR_SAVEEXEC_B32;
217 case AMDGPU::S_XOR_B32:
218 return AMDGPU::S_XOR_SAVEEXEC_B32;
219 case AMDGPU::S_ANDN2_B32:
220 return AMDGPU::S_ANDN2_SAVEEXEC_B32;
221 case AMDGPU::S_ORN2_B32:
222 return AMDGPU::S_ORN2_SAVEEXEC_B32;
223 case AMDGPU::S_NAND_B32:
224 return AMDGPU::S_NAND_SAVEEXEC_B32;
225 case AMDGPU::S_NOR_B32:
226 return AMDGPU::S_NOR_SAVEEXEC_B32;
227 case AMDGPU::S_XNOR_B32:
228 return AMDGPU::S_XNOR_SAVEEXEC_B32;
229 default:
230 return AMDGPU::INSTRUCTION_LIST_END;
231 }
232}
233
234// These are only terminators to get correct spill code placement during
235// register allocation, so turn them back into normal instructions.
236bool SIOptimizeExecMasking::removeTerminatorBit(MachineInstr &MI) const {
237 switch (MI.getOpcode()) {
238 case AMDGPU::S_MOV_B32_term: {
239 bool RegSrc = MI.getOperand(1).isReg();
240 MI.setDesc(TII->get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B32));
241 return true;
242 }
243 case AMDGPU::S_MOV_B64_term: {
244 bool RegSrc = MI.getOperand(1).isReg();
245 MI.setDesc(TII->get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B64));
246 return true;
247 }
248 case AMDGPU::S_XOR_B64_term: {
249 // This is only a terminator to get the correct spill code placement during
250 // register allocation.
251 MI.setDesc(TII->get(AMDGPU::S_XOR_B64));
252 return true;
253 }
254 case AMDGPU::S_XOR_B32_term: {
255 // This is only a terminator to get the correct spill code placement during
256 // register allocation.
257 MI.setDesc(TII->get(AMDGPU::S_XOR_B32));
258 return true;
259 }
260 case AMDGPU::S_OR_B64_term: {
261 // This is only a terminator to get the correct spill code placement during
262 // register allocation.
263 MI.setDesc(TII->get(AMDGPU::S_OR_B64));
264 return true;
265 }
266 case AMDGPU::S_OR_B32_term: {
267 // This is only a terminator to get the correct spill code placement during
268 // register allocation.
269 MI.setDesc(TII->get(AMDGPU::S_OR_B32));
270 return true;
271 }
272 case AMDGPU::S_ANDN2_B64_term: {
273 // This is only a terminator to get the correct spill code placement during
274 // register allocation.
275 MI.setDesc(TII->get(AMDGPU::S_ANDN2_B64));
276 return true;
277 }
278 case AMDGPU::S_ANDN2_B32_term: {
279 // This is only a terminator to get the correct spill code placement during
280 // register allocation.
281 MI.setDesc(TII->get(AMDGPU::S_ANDN2_B32));
282 return true;
283 }
284 case AMDGPU::S_AND_B64_term: {
285 // This is only a terminator to get the correct spill code placement during
286 // register allocation.
287 MI.setDesc(TII->get(AMDGPU::S_AND_B64));
288 return true;
289 }
290 case AMDGPU::S_AND_B32_term: {
291 // This is only a terminator to get the correct spill code placement during
292 // register allocation.
293 MI.setDesc(TII->get(AMDGPU::S_AND_B32));
294 return true;
295 }
296 default:
297 return false;
298 }
299}
300
301// Turn all pseudoterminators in the block into their equivalent non-terminator
302// instructions. Returns the reverse iterator to the first non-terminator
303// instruction in the block.
305SIOptimizeExecMasking::fixTerminators(MachineBasicBlock &MBB) const {
307
308 bool Seen = false;
310 for (; I != E; ++I) {
311 if (!I->isTerminator())
312 return Seen ? FirstNonTerm : I;
313
314 if (removeTerminatorBit(*I)) {
315 if (!Seen) {
316 FirstNonTerm = I;
317 Seen = true;
318 }
319 }
320 }
321
322 return FirstNonTerm;
323}
324
325MachineBasicBlock::reverse_iterator SIOptimizeExecMasking::findExecCopy(
327 const unsigned InstLimit = 25;
328
329 auto E = MBB.rend();
330 for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
331 Register CopyFromExec = isCopyFromExec(*I);
332 if (CopyFromExec.isValid())
333 return I;
334 }
335
336 return E;
337}
338
339// XXX - Seems LiveRegUnits doesn't work correctly since it will incorrectly
340// report the register as unavailable because a super-register with a lane mask
341// is unavailable.
342static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
343 for (MachineBasicBlock *Succ : MBB.successors()) {
344 if (Succ->isLiveIn(Reg))
345 return true;
346 }
347
348 return false;
349}
350
351// Backwards-iterate from Origin (for n=MaxInstructions iterations) until either
352// the beginning of the BB is reached or Pred evaluates to true - which can be
353// an arbitrary condition based on the current MachineInstr, for instance an
354// target instruction. Breaks prematurely by returning nullptr if one of the
355// registers given in NonModifiableRegs is modified by the current instruction.
356MachineInstr *SIOptimizeExecMasking::findInstrBackwards(
357 MachineInstr &Origin, std::function<bool(MachineInstr *)> Pred,
358 ArrayRef<MCRegister> NonModifiableRegs, MachineInstr *Terminator,
359 SmallVectorImpl<MachineOperand *> *KillFlagCandidates,
360 unsigned MaxInstructions) const {
362 E = Origin.getParent()->rend();
363 unsigned CurrentIteration = 0;
364
365 for (++A; CurrentIteration < MaxInstructions && A != E; ++A) {
366 if (A->isDebugInstr())
367 continue;
368
369 if (Pred(&*A))
370 return &*A;
371
372 for (MCRegister Reg : NonModifiableRegs) {
373 if (A->modifiesRegister(Reg, TRI))
374 return nullptr;
375
376 // Check for kills that appear after the terminator instruction, that
377 // would not be detected by clearKillFlags, since they will cause the
378 // register to be dead at a later place, causing the verifier to fail.
379 // We use the candidates to clear the kill flags later.
380 if (Terminator && KillFlagCandidates && A != Terminator &&
381 A->killsRegister(Reg, TRI)) {
382 for (MachineOperand &MO : A->operands()) {
383 if (MO.isReg() && MO.isKill()) {
384 Register Candidate = MO.getReg();
385 if (Candidate != Reg && TRI->regsOverlap(Candidate, Reg))
386 KillFlagCandidates->push_back(&MO);
387 }
388 }
389 }
390 }
391
392 ++CurrentIteration;
393 }
394
395 return nullptr;
396}
397
398// Determine if a register Reg is not re-defined and still in use
399// in the range (Stop..Start].
400// It does so by backwards calculating liveness from the end of the BB until
401// either Stop or the beginning of the BB is reached.
402// After liveness is calculated, we can determine if Reg is still in use and not
403// defined inbetween the instructions.
404bool SIOptimizeExecMasking::isRegisterInUseBetween(MachineInstr &Stop,
405 MachineInstr &Start,
407 bool UseLiveOuts,
408 bool IgnoreStart) const {
409 LiveRegUnits LR(*TRI);
410 if (UseLiveOuts)
411 LR.addLiveOuts(*Stop.getParent());
412
414
415 if (IgnoreStart)
416 ++A;
417
418 for (; A != Stop.getParent()->rend() && A != Stop; ++A) {
419 LR.stepBackward(*A);
420 }
421
422 return !LR.available(Reg) || MRI->isReserved(Reg);
423}
424
425// Determine if a register Reg is not re-defined and still in use
426// in the range (Stop..BB.end].
427bool SIOptimizeExecMasking::isRegisterInUseAfter(MachineInstr &Stop,
428 MCRegister Reg) const {
429 return isRegisterInUseBetween(Stop, *Stop.getParent()->rbegin(), Reg, true);
430}
431
432// Optimize sequences emitted for control flow lowering. They are originally
433// emitted as the separate operations because spill code may need to be
434// inserted for the saved copy of exec.
435//
436// x = copy exec
437// z = s_<op>_b64 x, y
438// exec = copy z
439// =>
440// x = s_<op>_saveexec_b64 y
441//
442bool SIOptimizeExecMasking::optimizeExecSequence() {
443 bool Changed = false;
444 for (MachineBasicBlock &MBB : *MF) {
445 MachineBasicBlock::reverse_iterator I = fixTerminators(MBB);
447 if (I == E)
448 continue;
449
450 // It's possible to see other terminator copies after the exec copy. This
451 // can happen if control flow pseudos had their outputs used by phis.
452 Register CopyToExec;
453
454 unsigned SearchCount = 0;
455 const unsigned SearchLimit = 5;
456 while (I != E && SearchCount++ < SearchLimit) {
457 CopyToExec = isCopyToExec(*I);
458 if (CopyToExec)
459 break;
460 ++I;
461 }
462
463 if (!CopyToExec)
464 continue;
465
466 // Scan backwards to find the def.
467 auto *CopyToExecInst = &*I;
468 auto CopyFromExecInst = findExecCopy(MBB, I);
469 if (CopyFromExecInst == E) {
470 auto PrepareExecInst = std::next(I);
471 if (PrepareExecInst == E)
472 continue;
473 // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
474 if (CopyToExecInst->getOperand(1).isKill() &&
475 isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
476 LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
477
478 PrepareExecInst->getOperand(0).setReg(LMC.ExecReg);
479
480 LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
481
482 CopyToExecInst->eraseFromParent();
483 Changed = true;
484 }
485
486 continue;
487 }
488
489 if (isLiveOut(MBB, CopyToExec)) {
490 // The copied register is live out and has a second use in another block.
491 LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
492 continue;
493 }
494
495 Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
496 MachineInstr *SaveExecInst = nullptr;
497 SmallVector<MachineInstr *, 4> OtherUseInsts;
498
500 J = std::next(CopyFromExecInst->getIterator()),
501 JE = I->getIterator();
502 J != JE; ++J) {
503 if (SaveExecInst && J->readsRegister(LMC.ExecReg, TRI)) {
504 LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
505 // Make sure this is inserted after any VALU ops that may have been
506 // scheduled in between.
507 SaveExecInst = nullptr;
508 break;
509 }
510
511 bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
512
513 if (J->modifiesRegister(CopyToExec, TRI)) {
514 if (SaveExecInst) {
515 LLVM_DEBUG(dbgs() << "Multiple instructions modify "
516 << printReg(CopyToExec, TRI) << '\n');
517 SaveExecInst = nullptr;
518 break;
519 }
520
521 unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
522 if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
523 break;
524
525 if (ReadsCopyFromExec) {
526 SaveExecInst = &*J;
527 LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
528 continue;
529 }
530 LLVM_DEBUG(dbgs() << "Instruction does not read exec copy: " << *J
531 << '\n');
532 break;
533 }
534 if (ReadsCopyFromExec && !SaveExecInst) {
535 // Make sure no other instruction is trying to use this copy, before it
536 // will be rewritten by the saveexec, i.e. hasOneUse. There may have
537 // been another use, such as an inserted spill. For example:
538 //
539 // %sgpr0_sgpr1 = COPY %exec
540 // spill %sgpr0_sgpr1
541 // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
542 //
543 LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
544 << '\n');
545 break;
546 }
547
548 if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
549 assert(SaveExecInst != &*J);
550 OtherUseInsts.push_back(&*J);
551 }
552 }
553
554 if (!SaveExecInst)
555 continue;
556
557 LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
558
559 MachineOperand &Src0 = SaveExecInst->getOperand(1);
560 MachineOperand &Src1 = SaveExecInst->getOperand(2);
561
562 MachineOperand *OtherOp = nullptr;
563
564 if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
565 OtherOp = &Src1;
566 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
567 if (!SaveExecInst->isCommutable())
568 break;
569
570 OtherOp = &Src0;
571 } else
572 llvm_unreachable("unexpected");
573
574 CopyFromExecInst->eraseFromParent();
575
576 auto InsPt = SaveExecInst->getIterator();
577 const DebugLoc &DL = SaveExecInst->getDebugLoc();
578
579 BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
580 CopyFromExec)
581 .addReg(OtherOp->getReg());
582 SaveExecInst->eraseFromParent();
583
584 CopyToExecInst->eraseFromParent();
585
586 for (MachineInstr *OtherInst : OtherUseInsts) {
587 OtherInst->substituteRegister(CopyToExec, LMC.ExecReg,
588 AMDGPU::NoSubRegister, *TRI);
589 }
590
591 Changed = true;
592 }
593
594 return Changed;
595}
596
597// Inserts the optimized s_mov_b32 / v_cmpx sequence based on the
598// operands extracted from a v_cmp ..., s_and_saveexec pattern.
599bool SIOptimizeExecMasking::optimizeVCMPSaveExecSequence(
600 MachineInstr &SaveExecInstr, MachineInstr &VCmp) const {
601 const int NewOpcode = AMDGPU::getVCMPXOpFromVCMP(VCmp.getOpcode());
602
603 if (NewOpcode == -1)
604 return false;
605
606 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0);
607 MachineOperand *Src1 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src1);
608
609 Register MoveDest = SaveExecInstr.getOperand(0).getReg();
610
611 MachineBasicBlock::instr_iterator InsertPosIt = SaveExecInstr.getIterator();
612 if (!SaveExecInstr.uses().empty()) {
613 bool IsSGPR32 = TRI->getRegSizeInBits(MoveDest, *MRI) == 32;
614 unsigned MovOpcode = IsSGPR32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
615 BuildMI(*SaveExecInstr.getParent(), InsertPosIt,
616 SaveExecInstr.getDebugLoc(), TII->get(MovOpcode), MoveDest)
617 .addReg(LMC.ExecReg);
618 }
619
620 // Omit dst as V_CMPX is implicitly writing to EXEC.
621 // Add dummy src and clamp modifiers, if needed.
622 auto Builder = BuildMI(*VCmp.getParent(), std::next(InsertPosIt),
623 VCmp.getDebugLoc(), TII->get(NewOpcode));
624
625 auto TryAddImmediateValueFromNamedOperand =
626 [&](AMDGPU::OpName OperandName) -> void {
627 if (auto *Mod = TII->getNamedOperand(VCmp, OperandName))
628 Builder.addImm(Mod->getImm());
629 };
630
631 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::src0_modifiers);
632 Builder.add(*Src0);
633
634 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::src1_modifiers);
635 Builder.add(*Src1);
636
637 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::clamp);
638
639 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::op_sel);
640
641 // The kill flags may no longer be correct.
642 if (Src0->isReg())
643 MRI->clearKillFlags(Src0->getReg());
644 if (Src1->isReg())
645 MRI->clearKillFlags(Src1->getReg());
646
647 for (MachineOperand *MO : KillFlagCandidates)
648 MO->setIsKill(false);
649
650 SaveExecInstr.eraseFromParent();
651 VCmp.eraseFromParent();
652
653 return true;
654}
655
656// Record (on GFX10.3 and later) occurences of
657// v_cmp_* SGPR, IMM, VGPR
658// s_and_saveexec_b32 EXEC_SGPR_DEST, SGPR
659// to be replaced with
660// s_mov_b32 EXEC_SGPR_DEST, exec_lo
661// v_cmpx_* IMM, VGPR
662// to reduce pipeline stalls.
663void SIOptimizeExecMasking::tryRecordVCmpxAndSaveexecSequence(
664 MachineInstr &MI) {
665 if (!ST->hasGFX10_3Insts())
666 return;
667
668 if (MI.getOpcode() != LMC.AndSaveExecOpc)
669 return;
670
671 Register SaveExecDest = MI.getOperand(0).getReg();
672 if (!TRI->isSGPRReg(*MRI, SaveExecDest))
673 return;
674
675 MachineOperand *SaveExecSrc0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
676 if (!SaveExecSrc0->isReg())
677 return;
678
679 // Tries to find a possibility to optimize a v_cmp ..., s_and_saveexec
680 // sequence by looking at an instance of an s_and_saveexec instruction.
681 // Returns a pointer to the v_cmp instruction if it is safe to replace the
682 // sequence (see the conditions in the function body). This is after register
683 // allocation, so some checks on operand dependencies need to be considered.
684 MachineInstr *VCmp = nullptr;
685
686 // Try to find the last v_cmp instruction that defs the saveexec input
687 // operand without any write to Exec or the saveexec input operand inbetween.
688 VCmp = findInstrBackwards(
689 MI,
690 [&](MachineInstr *Check) {
691 return AMDGPU::getVCMPXOpFromVCMP(Check->getOpcode()) != -1 &&
692 Check->modifiesRegister(SaveExecSrc0->getReg(), TRI);
693 },
694 {LMC.ExecReg, SaveExecSrc0->getReg()});
695
696 if (!VCmp)
697 return;
698
699 MachineOperand *VCmpDest = TII->getNamedOperand(*VCmp, AMDGPU::OpName::sdst);
700 assert(VCmpDest && "Should have an sdst operand!");
701
702 // Check if any of the v_cmp source operands is written by the saveexec.
703 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0);
704 if (Src0->isReg() && TRI->isSGPRReg(*MRI, Src0->getReg()) &&
705 MI.modifiesRegister(Src0->getReg(), TRI))
706 return;
707
708 MachineOperand *Src1 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src1);
709 if (Src1->isReg() && TRI->isSGPRReg(*MRI, Src1->getReg()) &&
710 MI.modifiesRegister(Src1->getReg(), TRI))
711 return;
712
713 // Don't do the transformation if the destination operand is included in
714 // it's MBB Live-outs, meaning it's used in any of its successors, leading
715 // to incorrect code if the v_cmp and therefore the def of
716 // the dest operand is removed.
717 if (isLiveOut(*VCmp->getParent(), VCmpDest->getReg()))
718 return;
719
720 // If the v_cmp target is in use between v_cmp and s_and_saveexec or after the
721 // s_and_saveexec, skip the optimization.
722 if (isRegisterInUseBetween(*VCmp, MI, VCmpDest->getReg(), false, true) ||
723 isRegisterInUseAfter(MI, VCmpDest->getReg()))
724 return;
725
726 // Try to determine if there is a write to any of the VCmp
727 // operands between the saveexec and the vcmp.
728 // If yes, additional VGPR spilling might need to be inserted. In this case,
729 // it's not worth replacing the instruction sequence.
731 if (Src0->isReg())
732 NonDefRegs.push_back(Src0->getReg());
733
734 if (Src1->isReg())
735 NonDefRegs.push_back(Src1->getReg());
736
737 if (!findInstrBackwards(
738 MI, [&](MachineInstr *Check) { return Check == VCmp; }, NonDefRegs,
739 VCmp, &KillFlagCandidates))
740 return;
741
742 if (VCmp)
743 SaveExecVCmpMapping[&MI] = VCmp;
744}
745
746// Record occurences of
747// s_or_saveexec s_o, s_i
748// s_xor exec, exec, s_o
749// to be replaced with
750// s_andn2_saveexec s_o, s_i.
751void SIOptimizeExecMasking::tryRecordOrSaveexecXorSequence(MachineInstr &MI) {
752 if (MI.getOpcode() == LMC.XorOpc && &MI != &MI.getParent()->front()) {
753 const MachineOperand &XorDst = MI.getOperand(0);
754 const MachineOperand &XorSrc0 = MI.getOperand(1);
755 const MachineOperand &XorSrc1 = MI.getOperand(2);
756
757 if (XorDst.isReg() && XorDst.getReg() == LMC.ExecReg && XorSrc0.isReg() &&
758 XorSrc1.isReg() &&
759 (XorSrc0.getReg() == LMC.ExecReg || XorSrc1.getReg() == LMC.ExecReg)) {
760
761 // Peek at the previous instruction and check if this is a relevant
762 // s_or_saveexec instruction.
763 MachineInstr &PossibleOrSaveexec = *MI.getPrevNode();
764 if (PossibleOrSaveexec.getOpcode() != LMC.OrSaveExecOpc)
765 return;
766
767 const MachineOperand &OrDst = PossibleOrSaveexec.getOperand(0);
768 const MachineOperand &OrSrc0 = PossibleOrSaveexec.getOperand(1);
769 if (OrDst.isReg() && OrSrc0.isReg()) {
770 if ((XorSrc0.getReg() == LMC.ExecReg &&
771 XorSrc1.getReg() == OrDst.getReg()) ||
772 (XorSrc0.getReg() == OrDst.getReg() &&
773 XorSrc1.getReg() == LMC.ExecReg)) {
774 OrXors.emplace_back(&PossibleOrSaveexec, &MI);
775 }
776 }
777 }
778 }
779}
780
781bool SIOptimizeExecMasking::optimizeOrSaveexecXorSequences() {
782 if (OrXors.empty()) {
783 return false;
784 }
785
786 bool Changed = false;
787
788 for (const auto &Pair : OrXors) {
789 MachineInstr *Or = nullptr;
790 MachineInstr *Xor = nullptr;
791 std::tie(Or, Xor) = Pair;
792 BuildMI(*Or->getParent(), Or->getIterator(), Or->getDebugLoc(),
793 TII->get(LMC.AndN2SaveExecOpc), Or->getOperand(0).getReg())
794 .addReg(Or->getOperand(1).getReg());
795
796 Or->eraseFromParent();
797 Xor->eraseFromParent();
798
799 Changed = true;
800 }
801
802 return Changed;
803}
804
805bool SIOptimizeExecMaskingLegacy::runOnMachineFunction(MachineFunction &MF) {
806 if (skipFunction(MF.getFunction()))
807 return false;
808
809 return SIOptimizeExecMasking(&MF).run();
810}
811
812bool SIOptimizeExecMasking::run() {
813 bool Changed = optimizeExecSequence();
814
815 OrXors.clear();
816 SaveExecVCmpMapping.clear();
817 KillFlagCandidates.clear();
818 static unsigned SearchWindow = 10;
819 for (MachineBasicBlock &MBB : *MF) {
820 unsigned SearchCount = 0;
821
822 for (auto &MI : llvm::reverse(MBB)) {
823 if (MI.isDebugInstr())
824 continue;
825
826 if (SearchCount >= SearchWindow) {
827 break;
828 }
829
830 tryRecordOrSaveexecXorSequence(MI);
831 tryRecordVCmpxAndSaveexecSequence(MI);
832
833 if (MI.modifiesRegister(LMC.ExecReg, TRI)) {
834 break;
835 }
836
837 ++SearchCount;
838 }
839 }
840
841 Changed |= optimizeOrSaveexecXorSequences();
842 for (const auto &Entry : SaveExecVCmpMapping) {
843 MachineInstr *SaveExecInstr = Entry.getFirst();
844 MachineInstr *VCmpInstr = Entry.getSecond();
845
846 Changed |= optimizeVCMPSaveExecSequence(*SaveExecInstr, *VCmpInstr);
847 }
848
849 return Changed;
850}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static unsigned getSaveExecOp(unsigned Opc)
static Register isLogicalOpOnExec(const MachineInstr &MI)
If MI is a logical operation on an exec value, return the register copied to.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
Interface definition for SIRegisterInfo.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
A debug info location.
Definition DebugLoc.h:123
A set of register units used to track register liveness.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
reverse_iterator rbegin()
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int32_t getVCMPXOpFromVCMP(uint32_t Opcode)
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
std::reverse_iterator< iterator > rend() const
Definition BasicBlock.h:96
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
char & SIOptimizeExecMaskingLegacyID
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ Xor
Bitwise or logical XOR of integers.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
#define N