26#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \
27 "RISC-V atomic pseudo instruction expansion pass"
51 bool IsMasked,
int Width,
71char RISCVExpandAtomicPseudo::ID = 0;
73bool RISCVExpandAtomicPseudo::runOnMachineFunction(
MachineFunction &MF) {
75 TII = STI->getInstrInfo();
87 assert(OldSize >= NewSize);
111 switch (
MBBI->getOpcode()) {
112 case RISCV::PseudoAtomicLoadNand32:
115 case RISCV::PseudoAtomicLoadNand64:
118 case RISCV::PseudoMaskedAtomicSwap32:
121 case RISCV::PseudoMaskedAtomicLoadAdd32:
123 case RISCV::PseudoMaskedAtomicLoadSub32:
125 case RISCV::PseudoMaskedAtomicLoadNand32:
128 case RISCV::PseudoMaskedAtomicLoadMax32:
131 case RISCV::PseudoMaskedAtomicLoadMin32:
134 case RISCV::PseudoMaskedAtomicLoadUMax32:
137 case RISCV::PseudoMaskedAtomicLoadUMin32:
140 case RISCV::PseudoCmpXchg32:
141 return expandAtomicCmpXchg(
MBB,
MBBI,
false, 32, NextMBBI);
142 case RISCV::PseudoCmpXchg64:
143 return expandAtomicCmpXchg(
MBB,
MBBI,
false, 64, NextMBBI);
144 case RISCV::PseudoMaskedCmpXchg32:
145 return expandAtomicCmpXchg(
MBB,
MBBI,
true, 32, NextMBBI);
156 case AtomicOrdering::Monotonic:
158 case AtomicOrdering::Acquire:
159 if (Subtarget->hasStdExtZtso())
161 return RISCV::LR_W_AQ;
162 case AtomicOrdering::Release:
164 case AtomicOrdering::AcquireRelease:
165 if (Subtarget->hasStdExtZtso())
167 return RISCV::LR_W_AQ;
168 case AtomicOrdering::SequentiallyConsistent:
169 return RISCV::LR_W_AQ_RL;
178 case AtomicOrdering::Monotonic:
180 case AtomicOrdering::Acquire:
182 case AtomicOrdering::Release:
183 if (Subtarget->hasStdExtZtso())
185 return RISCV::SC_W_RL;
186 case AtomicOrdering::AcquireRelease:
187 if (Subtarget->hasStdExtZtso())
189 return RISCV::SC_W_RL;
190 case AtomicOrdering::SequentiallyConsistent:
191 return RISCV::SC_W_RL;
200 case AtomicOrdering::Monotonic:
202 case AtomicOrdering::Acquire:
203 if (Subtarget->hasStdExtZtso())
205 return RISCV::LR_D_AQ;
206 case AtomicOrdering::Release:
208 case AtomicOrdering::AcquireRelease:
209 if (Subtarget->hasStdExtZtso())
211 return RISCV::LR_D_AQ;
212 case AtomicOrdering::SequentiallyConsistent:
213 return RISCV::LR_D_AQ_RL;
222 case AtomicOrdering::Monotonic:
224 case AtomicOrdering::Acquire:
226 case AtomicOrdering::Release:
227 if (Subtarget->hasStdExtZtso())
229 return RISCV::SC_D_RL;
230 case AtomicOrdering::AcquireRelease:
231 if (Subtarget->hasStdExtZtso())
233 return RISCV::SC_D_RL;
234 case AtomicOrdering::SequentiallyConsistent:
235 return RISCV::SC_D_RL;
242 return getLRForRMW32(Ordering, Subtarget);
244 return getLRForRMW64(Ordering, Subtarget);
251 return getSCForRMW32(Ordering, Subtarget);
253 return getSCForRMW64(Ordering, Subtarget);
264 Register ScratchReg =
MI.getOperand(1).getReg();
275 BuildMI(LoopMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)), DestReg)
289 BuildMI(LoopMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg)
302 assert(OldValReg != ScratchReg &&
"OldValReg and ScratchReg must be unique");
303 assert(OldValReg != MaskReg &&
"OldValReg and MaskReg must be unique");
304 assert(ScratchReg != MaskReg &&
"ScratchReg and MaskReg must be unique");
327 assert(Width == 32 &&
"Should never need to expand masked 64-bit operations");
329 Register ScratchReg =
MI.getOperand(1).getReg();
344 BuildMI(LoopMBB,
DL,
TII->get(getLRForRMW32(Ordering, STI)), DestReg)
374 insertMaskedMerge(
TII,
DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
377 BuildMI(LoopMBB,
DL,
TII->get(getSCForRMW32(Ordering, STI)), ScratchReg)
386bool RISCVExpandAtomicPseudo::expandAtomicBinOp(
409 doAtomicBinOpExpansion(
TII,
MI,
DL, &
MBB, LoopMBB, DoneMBB, BinOp, Width,
412 doMaskedAtomicBinOpExpansion(
TII,
MI,
DL, &
MBB, LoopMBB, DoneMBB, BinOp,
416 MI.eraseFromParent();
436bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
440 assert(IsMasked ==
true &&
441 "Should only need to expand masked atomic max/min");
442 assert(Width == 32 &&
"Should never need to expand masked 64-bit operations");
454 MF->
insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
455 MF->
insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
456 MF->
insert(++LoopTailMBB->getIterator(), DoneMBB);
459 LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
460 LoopHeadMBB->addSuccessor(LoopTailMBB);
461 LoopIfBodyMBB->addSuccessor(LoopTailMBB);
462 LoopTailMBB->addSuccessor(LoopHeadMBB);
463 LoopTailMBB->addSuccessor(DoneMBB);
469 Register Scratch1Reg =
MI.getOperand(1).getReg();
470 Register Scratch2Reg =
MI.getOperand(2).getReg();
485 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW32(Ordering, STI)), DestReg)
487 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::AND), Scratch2Reg)
490 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::ADDI), Scratch1Reg)
498 insertSext(
TII,
DL, LoopHeadMBB, Scratch2Reg,
MI.getOperand(6).getReg());
506 insertSext(
TII,
DL, LoopHeadMBB, Scratch2Reg,
MI.getOperand(6).getReg());
531 insertMaskedMerge(
TII,
DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
532 MaskReg, Scratch1Reg);
537 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW32(Ordering, STI)), Scratch1Reg)
546 MI.eraseFromParent();
581 if (
MBBI == E ||
MBBI->getOpcode() != RISCV::AND)
585 if (!(ANDOp1 == DestReg && ANDOp2 == MaskReg) &&
586 !(ANDOp1 == MaskReg && ANDOp2 == DestReg))
589 DestReg =
MBBI->getOperand(0).getReg();
595 if (
MBBI == E ||
MBBI->getOpcode() != RISCV::BNE)
599 if (!(BNEOp0 == DestReg && BNEOp1 == CmpValReg) &&
600 !(BNEOp0 == CmpValReg && BNEOp1 == DestReg))
605 if (BNEOp0 == DestReg && !
MBBI->getOperand(0).isKill())
607 if (BNEOp1 == DestReg && !
MBBI->getOperand(1).isKill())
612 LoopHeadBNETarget =
MBBI->getOperand(2).getMBB();
618 for (
auto *
MI : ToErase)
619 MI->eraseFromParent();
623bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
634 Register ScratchReg =
MI.getOperand(1).getReg();
636 Register CmpValReg =
MI.getOperand(3).getReg();
637 Register NewValReg =
MI.getOperand(4).getReg();
641 tryToFoldBNEOnCmpXchgResult(
MBB, std::next(
MBBI), DestReg, CmpValReg, MaskReg,
646 MF->
insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
647 MF->
insert(++LoopTailMBB->getIterator(), DoneMBB);
650 LoopHeadMBB->addSuccessor(LoopTailMBB);
651 LoopHeadMBB->addSuccessor(LoopHeadBNETarget);
652 LoopTailMBB->addSuccessor(DoneMBB);
653 LoopTailMBB->addSuccessor(LoopHeadMBB);
665 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)),
671 .
addMBB(LoopHeadBNETarget);
675 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)),
689 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)),
692 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::AND), ScratchReg)
698 .
addMBB(LoopHeadBNETarget);
706 insertMaskedMerge(
TII,
DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
707 MaskReg, ScratchReg);
708 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)),
719 MI.eraseFromParent();
735 return new RISCVExpandAtomicPseudo();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME
static unsigned getInstSizeInBytes(const MachineInstr &MI, const SystemZInstrInfo *TII)
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
FunctionPass class - This class is used to implement most global optimizations.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
LLVM_ABI void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createRISCVExpandAtomicPseudoPass()
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().