LLVM 20.0.0git
MipsExpandPseudo.cpp
Go to the documentation of this file.
1//===-- MipsExpandPseudoInsts.cpp - Expand pseudo instructions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass that expands pseudo instructions into target
10// instructions to allow proper scheduling, if-conversion, and other late
11// optimizations. This pass should be run after register allocation but before
12// the post-regalloc scheduling pass.
13//
14// This is currently only used for expanding atomic pseudos after register
15// allocation. We do this to avoid the fast register allocator introducing
16// spills between ll and sc. These stores cause some MIPS implementations to
17// abort the atomic RMW sequence.
18//
19//===----------------------------------------------------------------------===//
20
21#include "Mips.h"
22#include "MipsInstrInfo.h"
23#include "MipsSubtarget.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "mips-pseudo"
31
32namespace {
33 class MipsExpandPseudo : public MachineFunctionPass {
34 public:
35 static char ID;
36 MipsExpandPseudo() : MachineFunctionPass(ID) {}
37
38 const MipsInstrInfo *TII;
39 const MipsSubtarget *STI;
40
41 bool runOnMachineFunction(MachineFunction &Fn) override;
42
45 MachineFunctionProperties::Property::NoVRegs);
46 }
47
48 StringRef getPassName() const override {
49 return "Mips pseudo instruction expansion pass";
50 }
51
52 private:
53 bool expandAtomicCmpSwap(MachineBasicBlock &MBB,
56 bool expandAtomicCmpSwapSubword(MachineBasicBlock &MBB,
59
60 bool expandAtomicBinOp(MachineBasicBlock &BB,
62 MachineBasicBlock::iterator &NMBBI, unsigned Size);
63 bool expandAtomicBinOpSubword(MachineBasicBlock &BB,
66
69 bool expandMBB(MachineBasicBlock &MBB);
70 };
71 char MipsExpandPseudo::ID = 0;
72}
73
74bool MipsExpandPseudo::expandAtomicCmpSwapSubword(
77
78 MachineFunction *MF = BB.getParent();
79
80 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
81 DebugLoc DL = I->getDebugLoc();
82 unsigned LL, SC;
83
84 unsigned ZERO = Mips::ZERO;
85 unsigned BNE = Mips::BNE;
86 unsigned BEQ = Mips::BEQ;
87 unsigned SEOp =
88 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I8_POSTRA ? Mips::SEB : Mips::SEH;
89
90 if (STI->inMicroMipsMode()) {
91 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
92 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
93 BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
94 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
95 } else {
96 LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
97 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
98 SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
99 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
100 }
101
102 Register Dest = I->getOperand(0).getReg();
103 Register Ptr = I->getOperand(1).getReg();
104 Register Mask = I->getOperand(2).getReg();
105 Register ShiftCmpVal = I->getOperand(3).getReg();
106 Register Mask2 = I->getOperand(4).getReg();
107 Register ShiftNewVal = I->getOperand(5).getReg();
108 Register ShiftAmnt = I->getOperand(6).getReg();
109 Register Scratch = I->getOperand(7).getReg();
110 Register Scratch2 = I->getOperand(8).getReg();
111
112 // insert new blocks after the current block
113 const BasicBlock *LLVM_BB = BB.getBasicBlock();
114 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
115 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
116 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
117 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
119 MF->insert(It, loop1MBB);
120 MF->insert(It, loop2MBB);
121 MF->insert(It, sinkMBB);
122 MF->insert(It, exitMBB);
123
124 // Transfer the remainder of BB and its successor edges to exitMBB.
125 exitMBB->splice(exitMBB->begin(), &BB,
126 std::next(MachineBasicBlock::iterator(I)), BB.end());
128
129 // thisMBB:
130 // ...
131 // fallthrough --> loop1MBB
133 loop1MBB->addSuccessor(sinkMBB);
134 loop1MBB->addSuccessor(loop2MBB);
135 loop1MBB->normalizeSuccProbs();
136 loop2MBB->addSuccessor(loop1MBB);
137 loop2MBB->addSuccessor(sinkMBB);
138 loop2MBB->normalizeSuccProbs();
139 sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
140
141 // loop1MBB:
142 // ll dest, 0(ptr)
143 // and Mask', dest, Mask
144 // bne Mask', ShiftCmpVal, exitMBB
145 BuildMI(loop1MBB, DL, TII->get(LL), Scratch).addReg(Ptr).addImm(0);
146 BuildMI(loop1MBB, DL, TII->get(Mips::AND), Scratch2)
147 .addReg(Scratch)
148 .addReg(Mask);
149 BuildMI(loop1MBB, DL, TII->get(BNE))
150 .addReg(Scratch2).addReg(ShiftCmpVal).addMBB(sinkMBB);
151
152 // loop2MBB:
153 // and dest, dest, mask2
154 // or dest, dest, ShiftNewVal
155 // sc dest, dest, 0(ptr)
156 // beq dest, $0, loop1MBB
157 BuildMI(loop2MBB, DL, TII->get(Mips::AND), Scratch)
158 .addReg(Scratch, RegState::Kill)
159 .addReg(Mask2);
160 BuildMI(loop2MBB, DL, TII->get(Mips::OR), Scratch)
161 .addReg(Scratch, RegState::Kill)
162 .addReg(ShiftNewVal);
163 BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
164 .addReg(Scratch, RegState::Kill)
165 .addReg(Ptr)
166 .addImm(0);
167 BuildMI(loop2MBB, DL, TII->get(BEQ))
168 .addReg(Scratch, RegState::Kill)
169 .addReg(ZERO)
170 .addMBB(loop1MBB);
171
172 // sinkMBB:
173 // srl srlres, Mask', shiftamt
174 // sign_extend dest,srlres
175 BuildMI(sinkMBB, DL, TII->get(Mips::SRLV), Dest)
176 .addReg(Scratch2)
177 .addReg(ShiftAmnt);
178 if (STI->hasMips32r2()) {
179 BuildMI(sinkMBB, DL, TII->get(SEOp), Dest).addReg(Dest);
180 } else {
181 const unsigned ShiftImm =
182 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I16_POSTRA ? 16 : 24;
183 BuildMI(sinkMBB, DL, TII->get(Mips::SLL), Dest)
184 .addReg(Dest, RegState::Kill)
185 .addImm(ShiftImm);
186 BuildMI(sinkMBB, DL, TII->get(Mips::SRA), Dest)
187 .addReg(Dest, RegState::Kill)
188 .addImm(ShiftImm);
189 }
190
191 LivePhysRegs LiveRegs;
192 computeAndAddLiveIns(LiveRegs, *loop1MBB);
193 computeAndAddLiveIns(LiveRegs, *loop2MBB);
194 computeAndAddLiveIns(LiveRegs, *sinkMBB);
195 computeAndAddLiveIns(LiveRegs, *exitMBB);
196
197 NMBBI = BB.end();
198 I->eraseFromParent();
199 return true;
200}
201
202bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB,
205
206 const unsigned Size =
207 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I32_POSTRA ? 4 : 8;
208 MachineFunction *MF = BB.getParent();
209
210 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
211 DebugLoc DL = I->getDebugLoc();
212
213 unsigned LL, SC, ZERO, BNE, BEQ, MOVE;
214
215 if (Size == 4) {
216 if (STI->inMicroMipsMode()) {
217 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
218 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
219 BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
220 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
221 } else {
222 LL = STI->hasMips32r6()
223 ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
224 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
225 SC = STI->hasMips32r6()
226 ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
227 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
228 BNE = Mips::BNE;
229 BEQ = Mips::BEQ;
230 }
231
232 ZERO = Mips::ZERO;
233 MOVE = Mips::OR;
234 } else {
235 LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
236 SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
237 ZERO = Mips::ZERO_64;
238 BNE = Mips::BNE64;
239 BEQ = Mips::BEQ64;
240 MOVE = Mips::OR64;
241 }
242
243 Register Dest = I->getOperand(0).getReg();
244 Register Ptr = I->getOperand(1).getReg();
245 Register OldVal = I->getOperand(2).getReg();
246 Register NewVal = I->getOperand(3).getReg();
247 Register Scratch = I->getOperand(4).getReg();
248
249 // insert new blocks after the current block
250 const BasicBlock *LLVM_BB = BB.getBasicBlock();
251 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
252 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
253 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
255 MF->insert(It, loop1MBB);
256 MF->insert(It, loop2MBB);
257 MF->insert(It, exitMBB);
258
259 // Transfer the remainder of BB and its successor edges to exitMBB.
260 exitMBB->splice(exitMBB->begin(), &BB,
261 std::next(MachineBasicBlock::iterator(I)), BB.end());
263
264 // thisMBB:
265 // ...
266 // fallthrough --> loop1MBB
268 loop1MBB->addSuccessor(exitMBB);
269 loop1MBB->addSuccessor(loop2MBB);
270 loop1MBB->normalizeSuccProbs();
271 loop2MBB->addSuccessor(loop1MBB);
272 loop2MBB->addSuccessor(exitMBB);
273 loop2MBB->normalizeSuccProbs();
274
275 // loop1MBB:
276 // ll dest, 0(ptr)
277 // bne dest, oldval, exitMBB
278 BuildMI(loop1MBB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
279 BuildMI(loop1MBB, DL, TII->get(BNE))
280 .addReg(Dest, RegState::Kill).addReg(OldVal).addMBB(exitMBB);
281
282 // loop2MBB:
283 // move scratch, NewVal
284 // sc Scratch, Scratch, 0(ptr)
285 // beq Scratch, $0, loop1MBB
286 BuildMI(loop2MBB, DL, TII->get(MOVE), Scratch).addReg(NewVal).addReg(ZERO);
287 BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
288 .addReg(Scratch).addReg(Ptr).addImm(0);
289 BuildMI(loop2MBB, DL, TII->get(BEQ))
290 .addReg(Scratch, RegState::Kill).addReg(ZERO).addMBB(loop1MBB);
291
292 LivePhysRegs LiveRegs;
293 computeAndAddLiveIns(LiveRegs, *loop1MBB);
294 computeAndAddLiveIns(LiveRegs, *loop2MBB);
295 computeAndAddLiveIns(LiveRegs, *exitMBB);
296
297 NMBBI = BB.end();
298 I->eraseFromParent();
299 return true;
300}
301
302bool MipsExpandPseudo::expandAtomicBinOpSubword(
305
306 MachineFunction *MF = BB.getParent();
307
308 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
309 DebugLoc DL = I->getDebugLoc();
310
311 unsigned LL, SC, SLT, SLTu, OR, MOVN, MOVZ, SELNEZ, SELEQZ;
312 unsigned BEQ = Mips::BEQ;
313 unsigned SEOp = Mips::SEH;
314
315 if (STI->inMicroMipsMode()) {
316 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
317 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
318 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
319 SLT = Mips::SLT_MM;
320 SLTu = Mips::SLTu_MM;
321 OR = STI->hasMips32r6() ? Mips::OR_MMR6 : Mips::OR_MM;
322 MOVN = Mips::MOVN_I_MM;
323 MOVZ = Mips::MOVZ_I_MM;
324 SELNEZ = STI->hasMips32r6() ? Mips::SELNEZ_MMR6 : Mips::SELNEZ;
325 SELEQZ = STI->hasMips32r6() ? Mips::SELEQZ_MMR6 : Mips::SELEQZ;
326 } else {
327 LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
328 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
329 SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
330 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
331 SLT = Mips::SLT;
332 SLTu = Mips::SLTu;
333 OR = Mips::OR;
334 MOVN = Mips::MOVN_I_I;
335 MOVZ = Mips::MOVZ_I_I;
336 SELNEZ = Mips::SELNEZ;
337 SELEQZ = Mips::SELEQZ;
338 }
339
340 bool IsSwap = false;
341 bool IsNand = false;
342 bool IsMin = false;
343 bool IsMax = false;
344 bool IsUnsigned = false;
345 bool DestOK = false;
346
347 unsigned Opcode = 0;
348 switch (I->getOpcode()) {
349 case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
350 SEOp = Mips::SEB;
351 [[fallthrough]];
352 case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
353 IsNand = true;
354 break;
355 case Mips::ATOMIC_SWAP_I8_POSTRA:
356 SEOp = Mips::SEB;
357 [[fallthrough]];
358 case Mips::ATOMIC_SWAP_I16_POSTRA:
359 IsSwap = true;
360 break;
361 case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
362 SEOp = Mips::SEB;
363 [[fallthrough]];
364 case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
365 Opcode = Mips::ADDu;
366 break;
367 case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
368 SEOp = Mips::SEB;
369 [[fallthrough]];
370 case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
371 Opcode = Mips::SUBu;
372 break;
373 case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
374 SEOp = Mips::SEB;
375 [[fallthrough]];
376 case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
377 Opcode = Mips::AND;
378 break;
379 case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
380 SEOp = Mips::SEB;
381 [[fallthrough]];
382 case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
383 Opcode = Mips::OR;
384 break;
385 case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
386 SEOp = Mips::SEB;
387 [[fallthrough]];
388 case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
389 Opcode = Mips::XOR;
390 break;
391 case Mips::ATOMIC_LOAD_UMIN_I8_POSTRA:
392 SEOp = Mips::SEB;
393 IsUnsigned = true;
394 IsMin = true;
395 break;
396 case Mips::ATOMIC_LOAD_UMIN_I16_POSTRA:
397 IsUnsigned = true;
398 IsMin = true;
399 break;
400 case Mips::ATOMIC_LOAD_MIN_I8_POSTRA:
401 SEOp = Mips::SEB;
402 IsMin = true;
403 break;
404 case Mips::ATOMIC_LOAD_MIN_I16_POSTRA:
405 IsMin = true;
406 break;
407 case Mips::ATOMIC_LOAD_UMAX_I8_POSTRA:
408 SEOp = Mips::SEB;
409 IsUnsigned = true;
410 IsMax = true;
411 break;
412 case Mips::ATOMIC_LOAD_UMAX_I16_POSTRA:
413 IsUnsigned = true;
414 IsMax = true;
415 break;
416 case Mips::ATOMIC_LOAD_MAX_I8_POSTRA:
417 SEOp = Mips::SEB;
418 IsMax = true;
419 break;
420 case Mips::ATOMIC_LOAD_MAX_I16_POSTRA:
421 IsMax = true;
422 break;
423 default:
424 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
425 }
426
427 Register Dest = I->getOperand(0).getReg();
428 Register Ptr = I->getOperand(1).getReg();
429 Register Incr = I->getOperand(2).getReg();
430 Register Mask = I->getOperand(3).getReg();
431 Register Mask2 = I->getOperand(4).getReg();
432 Register ShiftAmnt = I->getOperand(5).getReg();
433 Register OldVal = I->getOperand(6).getReg();
434 Register BinOpRes = I->getOperand(7).getReg();
435 Register StoreVal = I->getOperand(8).getReg();
436
437 const BasicBlock *LLVM_BB = BB.getBasicBlock();
438 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
439 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
440 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
442 MF->insert(It, loopMBB);
443 MF->insert(It, sinkMBB);
444 MF->insert(It, exitMBB);
445
446 exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
448
450 loopMBB->addSuccessor(sinkMBB);
451 loopMBB->addSuccessor(loopMBB);
452 loopMBB->normalizeSuccProbs();
453
454 BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
455 if (IsNand) {
456 // and andres, oldval, incr2
457 // nor binopres, $0, andres
458 // and newval, binopres, mask
459 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
460 .addReg(OldVal)
461 .addReg(Incr);
462 BuildMI(loopMBB, DL, TII->get(Mips::NOR), BinOpRes)
463 .addReg(Mips::ZERO)
464 .addReg(BinOpRes);
465 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
466 .addReg(BinOpRes)
467 .addReg(Mask);
468 } else if (IsMin || IsMax) {
469
470 assert(I->getNumOperands() == 10 &&
471 "Atomics min|max|umin|umax use an additional register");
472 Register Scratch4 = I->getOperand(9).getReg();
473
474 unsigned SLTScratch4 = IsUnsigned ? SLTu : SLT;
475 unsigned SELIncr = IsMax ? SELNEZ : SELEQZ;
476 unsigned SELOldVal = IsMax ? SELEQZ : SELNEZ;
477 unsigned MOVIncr = IsMax ? MOVN : MOVZ;
478
479 BuildMI(loopMBB, DL, TII->get(Mips::SRAV), StoreVal)
480 .addReg(OldVal)
481 .addReg(ShiftAmnt);
482 if (IsUnsigned) {
483 const unsigned OpMask = SEOp == Mips::SEH ? 0xffff : 0xff;
484 BuildMI(loopMBB, DL, TII->get(Mips::ANDi), StoreVal)
485 .addReg(StoreVal)
486 .addImm(OpMask);
487 } else if (STI->hasMips32r2()) {
488 BuildMI(loopMBB, DL, TII->get(SEOp), StoreVal).addReg(StoreVal);
489 } else {
490 const unsigned ShiftImm = SEOp == Mips::SEH ? 16 : 24;
491 const unsigned SROp = IsUnsigned ? Mips::SRL : Mips::SRA;
492 BuildMI(loopMBB, DL, TII->get(Mips::SLL), StoreVal)
493 .addReg(StoreVal, RegState::Kill)
494 .addImm(ShiftImm);
495 BuildMI(loopMBB, DL, TII->get(SROp), StoreVal)
496 .addReg(StoreVal, RegState::Kill)
497 .addImm(ShiftImm);
498 }
499 BuildMI(loopMBB, DL, TII->get(Mips::OR), Dest)
500 .addReg(Mips::ZERO)
501 .addReg(StoreVal);
502 DestOK = true;
503 BuildMI(loopMBB, DL, TII->get(Mips::SLLV), StoreVal)
504 .addReg(StoreVal)
505 .addReg(ShiftAmnt);
506
507 // unsigned: sltu Scratch4, StoreVal, Incr
508 // signed: slt Scratch4, StoreVal, Incr
509 BuildMI(loopMBB, DL, TII->get(SLTScratch4), Scratch4)
510 .addReg(StoreVal)
511 .addReg(Incr);
512
513 if (STI->hasMips64r6() || STI->hasMips32r6()) {
514 // max: seleqz BinOpRes, OldVal, Scratch4
515 // selnez Scratch4, Incr, Scratch4
516 // or BinOpRes, BinOpRes, Scratch4
517 // min: selnqz BinOpRes, OldVal, Scratch4
518 // seleqz Scratch4, Incr, Scratch4
519 // or BinOpRes, BinOpRes, Scratch4
520 BuildMI(loopMBB, DL, TII->get(SELOldVal), BinOpRes)
521 .addReg(StoreVal)
522 .addReg(Scratch4);
523 BuildMI(loopMBB, DL, TII->get(SELIncr), Scratch4)
524 .addReg(Incr)
525 .addReg(Scratch4);
526 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
527 .addReg(BinOpRes)
528 .addReg(Scratch4);
529 } else {
530 // max: move BinOpRes, StoreVal
531 // movn BinOpRes, Incr, Scratch4, BinOpRes
532 // min: move BinOpRes, StoreVal
533 // movz BinOpRes, Incr, Scratch4, BinOpRes
534 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
535 .addReg(StoreVal)
536 .addReg(Mips::ZERO);
537 BuildMI(loopMBB, DL, TII->get(MOVIncr), BinOpRes)
538 .addReg(Incr)
539 .addReg(Scratch4)
540 .addReg(BinOpRes);
541 }
542
543 // and BinOpRes, BinOpRes, Mask
544 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
545 .addReg(BinOpRes)
546 .addReg(Mask);
547
548 } else if (!IsSwap) {
549 // <binop> binopres, oldval, incr2
550 // and newval, binopres, mask
551 BuildMI(loopMBB, DL, TII->get(Opcode), BinOpRes)
552 .addReg(OldVal)
553 .addReg(Incr);
554 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
555 .addReg(BinOpRes)
556 .addReg(Mask);
557 } else { // atomic.swap
558 // and newval, incr2, mask
559 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
560 .addReg(Incr)
561 .addReg(Mask);
562 }
563
564 // and StoreVal, OlddVal, Mask2
565 // or StoreVal, StoreVal, BinOpRes
566 // StoreVal<tied1> = sc StoreVal, 0(Ptr)
567 // beq StoreVal, zero, loopMBB
568 BuildMI(loopMBB, DL, TII->get(Mips::AND), StoreVal)
569 .addReg(OldVal).addReg(Mask2);
570 BuildMI(loopMBB, DL, TII->get(Mips::OR), StoreVal)
571 .addReg(StoreVal).addReg(BinOpRes);
572 BuildMI(loopMBB, DL, TII->get(SC), StoreVal)
573 .addReg(StoreVal).addReg(Ptr).addImm(0);
574 BuildMI(loopMBB, DL, TII->get(BEQ))
575 .addReg(StoreVal).addReg(Mips::ZERO).addMBB(loopMBB);
576
577 // sinkMBB:
578 // and maskedoldval1,oldval,mask
579 // srl srlres,maskedoldval1,shiftamt
580 // sign_extend dest,srlres
581
582 if (!DestOK) {
583 sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
584 BuildMI(sinkMBB, DL, TII->get(Mips::AND), Dest).addReg(OldVal).addReg(Mask);
585 BuildMI(sinkMBB, DL, TII->get(Mips::SRLV), Dest)
586 .addReg(Dest)
587 .addReg(ShiftAmnt);
588
589 if (STI->hasMips32r2()) {
590 BuildMI(sinkMBB, DL, TII->get(SEOp), Dest).addReg(Dest);
591 } else {
592 const unsigned ShiftImm = SEOp == Mips::SEH ? 16 : 24;
593 BuildMI(sinkMBB, DL, TII->get(Mips::SLL), Dest)
594 .addReg(Dest, RegState::Kill)
595 .addImm(ShiftImm);
596 BuildMI(sinkMBB, DL, TII->get(Mips::SRA), Dest)
597 .addReg(Dest, RegState::Kill)
598 .addImm(ShiftImm);
599 }
600 }
601
602 LivePhysRegs LiveRegs;
603 computeAndAddLiveIns(LiveRegs, *loopMBB);
604 computeAndAddLiveIns(LiveRegs, *sinkMBB);
605 computeAndAddLiveIns(LiveRegs, *exitMBB);
606
607 NMBBI = BB.end();
608 I->eraseFromParent();
609
610 return true;
611}
612
613bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
616 unsigned Size) {
617 MachineFunction *MF = BB.getParent();
618
619 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
620 DebugLoc DL = I->getDebugLoc();
621
622 unsigned LL, SC, ZERO, BEQ, SLT, SLTu, OR, MOVN, MOVZ, SELNEZ, SELEQZ;
623
624 if (Size == 4) {
625 if (STI->inMicroMipsMode()) {
626 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
627 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
628 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
629 SLT = Mips::SLT_MM;
630 SLTu = Mips::SLTu_MM;
631 OR = STI->hasMips32r6() ? Mips::OR_MMR6 : Mips::OR_MM;
632 MOVN = Mips::MOVN_I_MM;
633 MOVZ = Mips::MOVZ_I_MM;
634 SELNEZ = STI->hasMips32r6() ? Mips::SELNEZ_MMR6 : Mips::SELNEZ;
635 SELEQZ = STI->hasMips32r6() ? Mips::SELEQZ_MMR6 : Mips::SELEQZ;
636 } else {
637 LL = STI->hasMips32r6()
638 ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
639 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
640 SC = STI->hasMips32r6()
641 ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
642 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
643 BEQ = Mips::BEQ;
644 SLT = Mips::SLT;
645 SLTu = Mips::SLTu;
646 OR = Mips::OR;
647 MOVN = Mips::MOVN_I_I;
648 MOVZ = Mips::MOVZ_I_I;
649 SELNEZ = Mips::SELNEZ;
650 SELEQZ = Mips::SELEQZ;
651 }
652
653 ZERO = Mips::ZERO;
654 } else {
655 LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
656 SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
657 ZERO = Mips::ZERO_64;
658 BEQ = Mips::BEQ64;
659 SLT = Mips::SLT64;
660 SLTu = Mips::SLTu64;
661 OR = Mips::OR64;
662 MOVN = Mips::MOVN_I64_I64;
663 MOVZ = Mips::MOVZ_I64_I64;
664 SELNEZ = Mips::SELNEZ64;
665 SELEQZ = Mips::SELEQZ64;
666 }
667
668 Register OldVal = I->getOperand(0).getReg();
669 Register Ptr = I->getOperand(1).getReg();
670 Register Incr = I->getOperand(2).getReg();
671 Register Scratch = I->getOperand(3).getReg();
672
673 unsigned Opcode = 0;
674 unsigned AND = 0;
675 unsigned NOR = 0;
676
677 bool IsOr = false;
678 bool IsNand = false;
679 bool IsMin = false;
680 bool IsMax = false;
681 bool IsUnsigned = false;
682
683 switch (I->getOpcode()) {
684 case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
685 Opcode = Mips::ADDu;
686 break;
687 case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
688 Opcode = Mips::SUBu;
689 break;
690 case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
691 Opcode = Mips::AND;
692 break;
693 case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
694 Opcode = Mips::OR;
695 break;
696 case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
697 Opcode = Mips::XOR;
698 break;
699 case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
700 IsNand = true;
701 AND = Mips::AND;
702 NOR = Mips::NOR;
703 break;
704 case Mips::ATOMIC_SWAP_I32_POSTRA:
705 IsOr = true;
706 break;
707 case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
708 Opcode = Mips::DADDu;
709 break;
710 case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
711 Opcode = Mips::DSUBu;
712 break;
713 case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
714 Opcode = Mips::AND64;
715 break;
716 case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
717 Opcode = Mips::OR64;
718 break;
719 case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
720 Opcode = Mips::XOR64;
721 break;
722 case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
723 IsNand = true;
724 AND = Mips::AND64;
725 NOR = Mips::NOR64;
726 break;
727 case Mips::ATOMIC_SWAP_I64_POSTRA:
728 IsOr = true;
729 break;
730 case Mips::ATOMIC_LOAD_UMIN_I32_POSTRA:
731 case Mips::ATOMIC_LOAD_UMIN_I64_POSTRA:
732 IsUnsigned = true;
733 [[fallthrough]];
734 case Mips::ATOMIC_LOAD_MIN_I32_POSTRA:
735 case Mips::ATOMIC_LOAD_MIN_I64_POSTRA:
736 IsMin = true;
737 break;
738 case Mips::ATOMIC_LOAD_UMAX_I32_POSTRA:
739 case Mips::ATOMIC_LOAD_UMAX_I64_POSTRA:
740 IsUnsigned = true;
741 [[fallthrough]];
742 case Mips::ATOMIC_LOAD_MAX_I32_POSTRA:
743 case Mips::ATOMIC_LOAD_MAX_I64_POSTRA:
744 IsMax = true;
745 break;
746 default:
747 llvm_unreachable("Unknown pseudo atomic!");
748 }
749
750 const BasicBlock *LLVM_BB = BB.getBasicBlock();
751 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
752 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
754 MF->insert(It, loopMBB);
755 MF->insert(It, exitMBB);
756
757 exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
759
761 loopMBB->addSuccessor(exitMBB);
762 loopMBB->addSuccessor(loopMBB);
763 loopMBB->normalizeSuccProbs();
764
765 BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
766 assert((OldVal != Ptr) && "Clobbered the wrong ptr reg!");
767 assert((OldVal != Incr) && "Clobbered the wrong reg!");
768 if (IsMin || IsMax) {
769
770 assert(I->getNumOperands() == 5 &&
771 "Atomics min|max|umin|umax use an additional register");
772 MCRegister Scratch2 = I->getOperand(4).getReg().asMCReg();
773
774 // On Mips64 result of slt is GPR32.
775 MCRegister Scratch2_32 =
776 (Size == 8) ? STI->getRegisterInfo()->getSubReg(Scratch2, Mips::sub_32)
777 : Scratch2;
778
779 unsigned SLTScratch2 = IsUnsigned ? SLTu : SLT;
780 unsigned SELIncr = IsMax ? SELNEZ : SELEQZ;
781 unsigned SELOldVal = IsMax ? SELEQZ : SELNEZ;
782 unsigned MOVIncr = IsMax ? MOVN : MOVZ;
783
784 // unsigned: sltu Scratch2, oldVal, Incr
785 // signed: slt Scratch2, oldVal, Incr
786 BuildMI(loopMBB, DL, TII->get(SLTScratch2), Scratch2_32)
787 .addReg(OldVal)
788 .addReg(Incr);
789
790 if (STI->hasMips64r6() || STI->hasMips32r6()) {
791 // max: seleqz Scratch, OldVal, Scratch2
792 // selnez Scratch2, Incr, Scratch2
793 // or Scratch, Scratch, Scratch2
794 // min: selnez Scratch, OldVal, Scratch2
795 // seleqz Scratch2, Incr, Scratch2
796 // or Scratch, Scratch, Scratch2
797 BuildMI(loopMBB, DL, TII->get(SELOldVal), Scratch)
798 .addReg(OldVal)
799 .addReg(Scratch2);
800 BuildMI(loopMBB, DL, TII->get(SELIncr), Scratch2)
801 .addReg(Incr)
802 .addReg(Scratch2);
803 BuildMI(loopMBB, DL, TII->get(OR), Scratch)
804 .addReg(Scratch)
805 .addReg(Scratch2);
806 } else {
807 // max: move Scratch, OldVal
808 // movn Scratch, Incr, Scratch2, Scratch
809 // min: move Scratch, OldVal
810 // movz Scratch, Incr, Scratch2, Scratch
811 BuildMI(loopMBB, DL, TII->get(OR), Scratch)
812 .addReg(OldVal)
813 .addReg(ZERO);
814 BuildMI(loopMBB, DL, TII->get(MOVIncr), Scratch)
815 .addReg(Incr)
816 .addReg(Scratch2)
817 .addReg(Scratch);
818 }
819
820 } else if (Opcode) {
821 BuildMI(loopMBB, DL, TII->get(Opcode), Scratch).addReg(OldVal).addReg(Incr);
822 } else if (IsNand) {
823 assert(AND && NOR &&
824 "Unknown nand instruction for atomic pseudo expansion");
825 BuildMI(loopMBB, DL, TII->get(AND), Scratch).addReg(OldVal).addReg(Incr);
826 BuildMI(loopMBB, DL, TII->get(NOR), Scratch).addReg(ZERO).addReg(Scratch);
827 } else {
828 assert(IsOr && OR && "Unknown instruction for atomic pseudo expansion!");
829 (void)IsOr;
830 BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO);
831 }
832
833 BuildMI(loopMBB, DL, TII->get(SC), Scratch)
834 .addReg(Scratch)
835 .addReg(Ptr)
836 .addImm(0);
837 BuildMI(loopMBB, DL, TII->get(BEQ))
838 .addReg(Scratch)
839 .addReg(ZERO)
840 .addMBB(loopMBB);
841
842 NMBBI = BB.end();
843 I->eraseFromParent();
844
845 LivePhysRegs LiveRegs;
846 computeAndAddLiveIns(LiveRegs, *loopMBB);
847 computeAndAddLiveIns(LiveRegs, *exitMBB);
848
849 return true;
850}
851
852bool MipsExpandPseudo::expandMI(MachineBasicBlock &MBB,
855
856 bool Modified = false;
857
858 switch (MBBI->getOpcode()) {
859 case Mips::ATOMIC_CMP_SWAP_I32_POSTRA:
860 case Mips::ATOMIC_CMP_SWAP_I64_POSTRA:
861 return expandAtomicCmpSwap(MBB, MBBI, NMBB);
862 case Mips::ATOMIC_CMP_SWAP_I8_POSTRA:
863 case Mips::ATOMIC_CMP_SWAP_I16_POSTRA:
864 return expandAtomicCmpSwapSubword(MBB, MBBI, NMBB);
865 case Mips::ATOMIC_SWAP_I8_POSTRA:
866 case Mips::ATOMIC_SWAP_I16_POSTRA:
867 case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
868 case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
869 case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
870 case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
871 case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
872 case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
873 case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
874 case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
875 case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
876 case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
877 case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
878 case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
879 case Mips::ATOMIC_LOAD_MIN_I8_POSTRA:
880 case Mips::ATOMIC_LOAD_MIN_I16_POSTRA:
881 case Mips::ATOMIC_LOAD_MAX_I8_POSTRA:
882 case Mips::ATOMIC_LOAD_MAX_I16_POSTRA:
883 case Mips::ATOMIC_LOAD_UMIN_I8_POSTRA:
884 case Mips::ATOMIC_LOAD_UMIN_I16_POSTRA:
885 case Mips::ATOMIC_LOAD_UMAX_I8_POSTRA:
886 case Mips::ATOMIC_LOAD_UMAX_I16_POSTRA:
887 return expandAtomicBinOpSubword(MBB, MBBI, NMBB);
888 case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
889 case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
890 case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
891 case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
892 case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
893 case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
894 case Mips::ATOMIC_SWAP_I32_POSTRA:
895 case Mips::ATOMIC_LOAD_MIN_I32_POSTRA:
896 case Mips::ATOMIC_LOAD_MAX_I32_POSTRA:
897 case Mips::ATOMIC_LOAD_UMIN_I32_POSTRA:
898 case Mips::ATOMIC_LOAD_UMAX_I32_POSTRA:
899 return expandAtomicBinOp(MBB, MBBI, NMBB, 4);
900 case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
901 case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
902 case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
903 case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
904 case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
905 case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
906 case Mips::ATOMIC_SWAP_I64_POSTRA:
907 case Mips::ATOMIC_LOAD_MIN_I64_POSTRA:
908 case Mips::ATOMIC_LOAD_MAX_I64_POSTRA:
909 case Mips::ATOMIC_LOAD_UMIN_I64_POSTRA:
910 case Mips::ATOMIC_LOAD_UMAX_I64_POSTRA:
911 return expandAtomicBinOp(MBB, MBBI, NMBB, 8);
912 default:
913 return Modified;
914 }
915}
916
917bool MipsExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
918 bool Modified = false;
919
921 while (MBBI != E) {
922 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
923 Modified |= expandMI(MBB, MBBI, NMBBI);
924 MBBI = NMBBI;
925 }
926
927 return Modified;
928}
929
930bool MipsExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
931 STI = &MF.getSubtarget<MipsSubtarget>();
932 TII = STI->getInstrInfo();
933
934 bool Modified = false;
935 for (MachineBasicBlock &MBB : MF)
936 Modified |= expandMBB(MBB);
937
938 if (Modified)
939 MF.RenumberBlocks();
940
941 return Modified;
942}
943
944/// createMipsExpandPseudoPass - returns an instance of the pseudo instruction
945/// expansion pass.
947 return new MipsExpandPseudo();
948}
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
@ ZERO
Special weight used for cases with exact zero probability.
uint64_t Size
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
static BranchProbability getOne()
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ Kill
The last use of a register.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
FunctionPass * createMipsExpandPseudoPass()
createMipsExpandPseudoPass - returns an instance of the pseudo instruction expansion pass.