Line data Source code
1 : //===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 :
10 : #include "AMDGPU.h"
11 : #include "AMDGPUSubtarget.h"
12 : #include "SIInstrInfo.h"
13 : #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 : #include "llvm/ADT/SmallSet.h"
15 : #include "llvm/CodeGen/MachineFunctionPass.h"
16 : #include "llvm/CodeGen/MachineInstrBuilder.h"
17 : #include "llvm/CodeGen/MachineRegisterInfo.h"
18 : #include "llvm/Support/Debug.h"
19 :
20 : using namespace llvm;
21 :
22 : #define DEBUG_TYPE "si-optimize-exec-masking"
23 :
24 : namespace {
25 :
26 : class SIOptimizeExecMasking : public MachineFunctionPass {
27 : public:
28 : static char ID;
29 :
30 : public:
31 1967 : SIOptimizeExecMasking() : MachineFunctionPass(ID) {
32 1967 : initializeSIOptimizeExecMaskingPass(*PassRegistry::getPassRegistry());
33 1967 : }
34 :
35 : bool runOnMachineFunction(MachineFunction &MF) override;
36 :
37 1957 : StringRef getPassName() const override {
38 1957 : return "SI optimize exec mask operations";
39 : }
40 :
41 1957 : void getAnalysisUsage(AnalysisUsage &AU) const override {
42 1957 : AU.setPreservesCFG();
43 1957 : MachineFunctionPass::getAnalysisUsage(AU);
44 1957 : }
45 : };
46 :
47 : } // End anonymous namespace.
48 :
49 85105 : INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
50 : "SI optimize exec mask operations", false, false)
51 85105 : INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
52 200991 : INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
53 : "SI optimize exec mask operations", false, false)
54 :
55 : char SIOptimizeExecMasking::ID = 0;
56 :
57 : char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
58 :
59 : /// If \p MI is a copy from exec, return the register copied to.
60 : static unsigned isCopyFromExec(const MachineInstr &MI) {
61 0 : switch (MI.getOpcode()) {
62 0 : case AMDGPU::COPY:
63 : case AMDGPU::S_MOV_B64:
64 : case AMDGPU::S_MOV_B64_term: {
65 0 : const MachineOperand &Src = MI.getOperand(1);
66 0 : if (Src.isReg() && Src.getReg() == AMDGPU::EXEC)
67 0 : return MI.getOperand(0).getReg();
68 : }
69 : }
70 :
71 : return AMDGPU::NoRegister;
72 : }
73 :
74 : /// If \p MI is a copy to exec, return the register copied from.
75 21364 : static unsigned isCopyToExec(const MachineInstr &MI) {
76 42728 : switch (MI.getOpcode()) {
77 905 : case AMDGPU::COPY:
78 : case AMDGPU::S_MOV_B64: {
79 905 : const MachineOperand &Dst = MI.getOperand(0);
80 905 : if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC && MI.getOperand(1).isReg())
81 505 : return MI.getOperand(1).getReg();
82 : break;
83 : }
84 : case AMDGPU::S_MOV_B64_term:
85 : llvm_unreachable("should have been replaced");
86 : }
87 :
88 : return AMDGPU::NoRegister;
89 : }
90 :
91 : /// If \p MI is a logical operation on an exec value,
92 : /// return the register copied to.
93 16 : static unsigned isLogicalOpOnExec(const MachineInstr &MI) {
94 32 : switch (MI.getOpcode()) {
95 12 : case AMDGPU::S_AND_B64:
96 : case AMDGPU::S_OR_B64:
97 : case AMDGPU::S_XOR_B64:
98 : case AMDGPU::S_ANDN2_B64:
99 : case AMDGPU::S_ORN2_B64:
100 : case AMDGPU::S_NAND_B64:
101 : case AMDGPU::S_NOR_B64:
102 : case AMDGPU::S_XNOR_B64: {
103 12 : const MachineOperand &Src1 = MI.getOperand(1);
104 12 : if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
105 11 : return MI.getOperand(0).getReg();
106 : const MachineOperand &Src2 = MI.getOperand(2);
107 1 : if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
108 1 : return MI.getOperand(0).getReg();
109 : }
110 : }
111 :
112 : return AMDGPU::NoRegister;
113 : }
114 :
115 935 : static unsigned getSaveExecOp(unsigned Opc) {
116 935 : switch (Opc) {
117 : case AMDGPU::S_AND_B64:
118 : return AMDGPU::S_AND_SAVEEXEC_B64;
119 2 : case AMDGPU::S_OR_B64:
120 2 : return AMDGPU::S_OR_SAVEEXEC_B64;
121 0 : case AMDGPU::S_XOR_B64:
122 0 : return AMDGPU::S_XOR_SAVEEXEC_B64;
123 3 : case AMDGPU::S_ANDN2_B64:
124 3 : return AMDGPU::S_ANDN2_SAVEEXEC_B64;
125 0 : case AMDGPU::S_ORN2_B64:
126 0 : return AMDGPU::S_ORN2_SAVEEXEC_B64;
127 0 : case AMDGPU::S_NAND_B64:
128 0 : return AMDGPU::S_NAND_SAVEEXEC_B64;
129 0 : case AMDGPU::S_NOR_B64:
130 0 : return AMDGPU::S_NOR_SAVEEXEC_B64;
131 0 : case AMDGPU::S_XNOR_B64:
132 0 : return AMDGPU::S_XNOR_SAVEEXEC_B64;
133 1 : default:
134 1 : return AMDGPU::INSTRUCTION_LIST_END;
135 : }
136 : }
137 :
138 : // These are only terminators to get correct spill code placement during
139 : // register allocation, so turn them back into normal instructions. Only one of
140 : // these is expected per block.
141 : static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
142 46358 : switch (MI.getOpcode()) {
143 486 : case AMDGPU::S_MOV_B64_term: {
144 486 : MI.setDesc(TII.get(AMDGPU::COPY));
145 : return true;
146 : }
147 62 : case AMDGPU::S_XOR_B64_term: {
148 : // This is only a terminator to get the correct spill code placement during
149 : // register allocation.
150 62 : MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
151 : return true;
152 : }
153 75 : case AMDGPU::S_ANDN2_B64_term: {
154 : // This is only a terminator to get the correct spill code placement during
155 : // register allocation.
156 75 : MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
157 : return true;
158 : }
159 : default:
160 : return false;
161 : }
162 : }
163 :
164 22395 : static MachineBasicBlock::reverse_iterator fixTerminators(
165 : const SIInstrInfo &TII,
166 : MachineBasicBlock &MBB) {
167 : MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
168 44951 : for (; I != E; ++I) {
169 43920 : if (!I->isTerminator())
170 20741 : return I;
171 :
172 : if (removeTerminatorBit(TII, *I))
173 623 : return I;
174 : }
175 :
176 1031 : return E;
177 : }
178 :
179 0 : static MachineBasicBlock::reverse_iterator findExecCopy(
180 : const SIInstrInfo &TII,
181 : MachineBasicBlock &MBB,
182 : MachineBasicBlock::reverse_iterator I,
183 : unsigned CopyToExec) {
184 : const unsigned InstLimit = 25;
185 :
186 : auto E = MBB.rend();
187 0 : for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
188 : unsigned CopyFromExec = isCopyFromExec(*I);
189 0 : if (CopyFromExec != AMDGPU::NoRegister)
190 0 : return I;
191 : }
192 :
193 0 : return E;
194 : }
195 :
196 : // XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
197 : // repor tthe register as unavailable because a super-register with a lane mask
198 : // as unavailable.
199 483 : static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
200 1442 : for (MachineBasicBlock *Succ : MBB.successors()) {
201 961 : if (Succ->isLiveIn(Reg))
202 : return true;
203 : }
204 :
205 : return false;
206 : }
207 :
208 19753 : bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
209 19753 : if (skipFunction(MF.getFunction()))
210 : return false;
211 :
212 19749 : const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
213 19749 : const SIRegisterInfo *TRI = ST.getRegisterInfo();
214 19749 : const SIInstrInfo *TII = ST.getInstrInfo();
215 :
216 : // Optimize sequences emitted for control flow lowering. They are originally
217 : // emitted as the separate operations because spill code may need to be
218 : // inserted for the saved copy of exec.
219 : //
220 : // x = copy exec
221 : // z = s_<op>_b64 x, y
222 : // exec = copy z
223 : // =>
224 : // x = s_<op>_saveexec_b64 y
225 : //
226 :
227 42143 : for (MachineBasicBlock &MBB : MF) {
228 22395 : MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
229 : MachineBasicBlock::reverse_iterator E = MBB.rend();
230 22395 : if (I == E)
231 21939 : continue;
232 :
233 21364 : unsigned CopyToExec = isCopyToExec(*I);
234 21364 : if (CopyToExec == AMDGPU::NoRegister)
235 : continue;
236 :
237 : // Scan backwards to find the def.
238 : auto CopyToExecInst = &*I;
239 505 : auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec);
240 505 : if (CopyFromExecInst == E) {
241 22 : auto PrepareExecInst = std::next(I);
242 22 : if (PrepareExecInst == E)
243 : continue;
244 : // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
245 34 : if (CopyToExecInst->getOperand(1).isKill() &&
246 16 : isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
247 : LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
248 :
249 12 : PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC);
250 :
251 : LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
252 :
253 12 : CopyToExecInst->eraseFromParent();
254 : }
255 :
256 17 : continue;
257 : }
258 :
259 483 : if (isLiveOut(MBB, CopyToExec)) {
260 : // The copied register is live out and has a second use in another block.
261 : LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
262 : continue;
263 : }
264 :
265 481 : unsigned CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
266 : MachineInstr *SaveExecInst = nullptr;
267 : SmallVector<MachineInstr *, 4> OtherUseInsts;
268 :
269 : for (MachineBasicBlock::iterator J
270 962 : = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
271 1080 : J != JE; ++J) {
272 751 : if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {
273 : LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
274 : // Make sure this is inserted after any VALU ops that may have been
275 : // scheduled in between.
276 : SaveExecInst = nullptr;
277 : break;
278 : }
279 :
280 : bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
281 :
282 602 : if (J->modifiesRegister(CopyToExec, TRI)) {
283 481 : if (SaveExecInst) {
284 : LLVM_DEBUG(dbgs() << "Multiple instructions modify "
285 : << printReg(CopyToExec, TRI) << '\n');
286 : SaveExecInst = nullptr;
287 : break;
288 : }
289 :
290 480 : unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
291 480 : if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
292 : break;
293 :
294 479 : if (ReadsCopyFromExec) {
295 : SaveExecInst = &*J;
296 : LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
297 : continue;
298 : } else {
299 : LLVM_DEBUG(dbgs()
300 : << "Instruction does not read exec copy: " << *J << '\n');
301 : break;
302 : }
303 121 : } else if (ReadsCopyFromExec && !SaveExecInst) {
304 : // Make sure no other instruction is trying to use this copy, before it
305 : // will be rewritten by the saveexec, i.e. hasOneUse. There may have
306 : // been another use, such as an inserted spill. For example:
307 : //
308 : // %sgpr0_sgpr1 = COPY %exec
309 : // spill %sgpr0_sgpr1
310 : // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
311 : //
312 : LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
313 : << '\n');
314 : break;
315 : }
316 :
317 224 : if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
318 : assert(SaveExecInst != &*J);
319 104 : OtherUseInsts.push_back(&*J);
320 : }
321 : }
322 :
323 481 : if (!SaveExecInst)
324 : continue;
325 :
326 : LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
327 :
328 456 : MachineOperand &Src0 = SaveExecInst->getOperand(1);
329 : MachineOperand &Src1 = SaveExecInst->getOperand(2);
330 :
331 : MachineOperand *OtherOp = nullptr;
332 :
333 456 : if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
334 : OtherOp = &Src1;
335 1 : } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
336 1 : if (!SaveExecInst->isCommutable())
337 : break;
338 :
339 : OtherOp = &Src0;
340 : } else
341 0 : llvm_unreachable("unexpected");
342 :
343 455 : CopyFromExecInst->eraseFromParent();
344 :
345 455 : auto InsPt = SaveExecInst->getIterator();
346 : const DebugLoc &DL = SaveExecInst->getDebugLoc();
347 :
348 455 : BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
349 1365 : CopyFromExec)
350 455 : .addReg(OtherOp->getReg());
351 455 : SaveExecInst->eraseFromParent();
352 :
353 455 : CopyToExecInst->eraseFromParent();
354 :
355 546 : for (MachineInstr *OtherInst : OtherUseInsts) {
356 91 : OtherInst->substituteRegister(CopyToExec, AMDGPU::EXEC,
357 : AMDGPU::NoSubRegister, *TRI);
358 : }
359 : }
360 :
361 : return true;
362 :
363 : }
|