26#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \
27 "RISC-V atomic pseudo instruction expansion pass"
51 bool IsMasked,
int Width,
71char RISCVExpandAtomicPseudo::ID = 0;
73bool RISCVExpandAtomicPseudo::runOnMachineFunction(
MachineFunction &MF) {
87 assert(OldSize >= NewSize);
111 switch (
MBBI->getOpcode()) {
112 case RISCV::PseudoAtomicSwap32:
115 case RISCV::PseudoAtomicSwap64:
118 case RISCV::PseudoAtomicLoadAdd32:
121 case RISCV::PseudoAtomicLoadAdd64:
124 case RISCV::PseudoAtomicLoadSub32:
127 case RISCV::PseudoAtomicLoadSub64:
130 case RISCV::PseudoAtomicLoadAnd32:
133 case RISCV::PseudoAtomicLoadAnd64:
136 case RISCV::PseudoAtomicLoadOr32:
138 case RISCV::PseudoAtomicLoadOr64:
140 case RISCV::PseudoAtomicLoadXor32:
143 case RISCV::PseudoAtomicLoadXor64:
146 case RISCV::PseudoAtomicLoadNand32:
149 case RISCV::PseudoAtomicLoadNand64:
152 case RISCV::PseudoAtomicLoadMin32:
155 case RISCV::PseudoAtomicLoadMin64:
158 case RISCV::PseudoAtomicLoadMax32:
161 case RISCV::PseudoAtomicLoadMax64:
164 case RISCV::PseudoAtomicLoadUMin32:
167 case RISCV::PseudoAtomicLoadUMin64:
170 case RISCV::PseudoAtomicLoadUMax32:
173 case RISCV::PseudoAtomicLoadUMax64:
176 case RISCV::PseudoMaskedAtomicSwap32:
179 case RISCV::PseudoMaskedAtomicLoadAdd32:
181 case RISCV::PseudoMaskedAtomicLoadSub32:
183 case RISCV::PseudoMaskedAtomicLoadNand32:
186 case RISCV::PseudoMaskedAtomicLoadMax32:
189 case RISCV::PseudoMaskedAtomicLoadMin32:
192 case RISCV::PseudoMaskedAtomicLoadUMax32:
195 case RISCV::PseudoMaskedAtomicLoadUMin32:
198 case RISCV::PseudoCmpXchg32:
199 return expandAtomicCmpXchg(
MBB,
MBBI,
false, 32, NextMBBI);
200 case RISCV::PseudoCmpXchg64:
201 return expandAtomicCmpXchg(
MBB,
MBBI,
false, 64, NextMBBI);
202 case RISCV::PseudoMaskedCmpXchg32:
203 return expandAtomicCmpXchg(
MBB,
MBBI,
true, 32, NextMBBI);
217 if (Subtarget->hasStdExtZtso())
219 return RISCV::LR_W_AQ;
223 if (Subtarget->hasStdExtZtso())
225 return RISCV::LR_W_AQ;
227 return RISCV::LR_W_AQRL;
241 if (Subtarget->hasStdExtZtso())
243 return RISCV::SC_W_RL;
245 if (Subtarget->hasStdExtZtso())
247 return RISCV::SC_W_RL;
249 return RISCV::SC_W_RL;
261 if (Subtarget->hasStdExtZtso())
263 return RISCV::LR_D_AQ;
267 if (Subtarget->hasStdExtZtso())
269 return RISCV::LR_D_AQ;
271 return RISCV::LR_D_AQRL;
285 if (Subtarget->hasStdExtZtso())
287 return RISCV::SC_D_RL;
289 if (Subtarget->hasStdExtZtso())
291 return RISCV::SC_D_RL;
293 return RISCV::SC_D_RL;
300 return getLRForRMW32(Ordering, Subtarget);
302 return getLRForRMW64(Ordering, Subtarget);
309 return getSCForRMW32(Ordering, Subtarget);
311 return getSCForRMW64(Ordering, Subtarget);
322 Register ScratchReg =
MI.getOperand(1).getReg();
333 BuildMI(LoopMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)), DestReg)
377 BuildMI(LoopMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg)
390 assert(OldValReg != ScratchReg &&
"OldValReg and ScratchReg must be unique");
391 assert(OldValReg != MaskReg &&
"OldValReg and MaskReg must be unique");
392 assert(ScratchReg != MaskReg &&
"ScratchReg and MaskReg must be unique");
415 assert(Width == 32 &&
"Should never need to expand masked 64-bit operations");
417 Register ScratchReg =
MI.getOperand(1).getReg();
432 BuildMI(LoopMBB,
DL,
TII->get(getLRForRMW32(Ordering, STI)), DestReg)
462 insertMaskedMerge(
TII,
DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
465 BuildMI(LoopMBB,
DL,
TII->get(getSCForRMW32(Ordering, STI)), ScratchReg)
474bool RISCVExpandAtomicPseudo::expandAtomicBinOp(
486 MF->
insert(++
MBB.getIterator(), LoopMBB);
494 MBB.addSuccessor(LoopMBB);
497 doAtomicBinOpExpansion(
TII,
MI,
DL, &
MBB, LoopMBB, DoneMBB, BinOp, Width,
500 doMaskedAtomicBinOpExpansion(
TII,
MI,
DL, &
MBB, LoopMBB, DoneMBB, BinOp,
503 NextMBBI =
MBB.end();
504 MI.eraseFromParent();
524static void doAtomicMinMaxOpExpansion(
531 Register ScratchReg =
MI.getOperand(1).getReg();
541 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)), DestReg)
543 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::ADDI), ScratchReg)
579 BuildMI(LoopIfBodyMBB,
DL,
TII->get(RISCV::ADDI), ScratchReg)
586 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)),
596static void doMaskedAtomicMinMaxOpExpansion(
602 assert(Width == 32 &&
"Should never need to expand masked 64-bit operations");
604 Register Scratch1Reg =
MI.getOperand(1).getReg();
605 Register Scratch2Reg =
MI.getOperand(2).getReg();
620 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW32(Ordering, STI)), DestReg)
622 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::AND), Scratch2Reg)
625 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::ADDI), Scratch1Reg)
633 insertSext(
TII,
DL, LoopHeadMBB, Scratch2Reg,
MI.getOperand(6).getReg());
641 insertSext(
TII,
DL, LoopHeadMBB, Scratch2Reg,
MI.getOperand(6).getReg());
666 insertMaskedMerge(
TII,
DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
667 MaskReg, Scratch1Reg);
672 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW32(Ordering, STI)), Scratch1Reg)
681bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
695 MF->
insert(++
MBB.getIterator(), LoopHeadMBB);
708 MBB.addSuccessor(LoopHeadMBB);
711 doAtomicMinMaxOpExpansion(
TII,
MI,
DL, &
MBB, LoopHeadMBB, LoopIfBodyMBB,
712 LoopTailMBB, DoneMBB, BinOp, Width, STI);
714 doMaskedAtomicMinMaxOpExpansion(
TII,
MI,
DL, &
MBB, LoopHeadMBB,
715 LoopIfBodyMBB, LoopTailMBB, DoneMBB, BinOp,
718 NextMBBI =
MBB.end();
719 MI.eraseFromParent();
754 if (
MBBI ==
E ||
MBBI->getOpcode() != RISCV::AND)
758 if (!(ANDOp1 == DestReg && ANDOp2 == MaskReg) &&
759 !(ANDOp1 == MaskReg && ANDOp2 == DestReg))
762 DestReg =
MBBI->getOperand(0).getReg();
768 if (
MBBI ==
E ||
MBBI->getOpcode() != RISCV::BNE)
772 if (!(BNEOp0 == DestReg && BNEOp1 == CmpValReg) &&
773 !(BNEOp0 == CmpValReg && BNEOp1 == DestReg))
778 if (BNEOp0 == DestReg && !
MBBI->getOperand(0).isKill())
780 if (BNEOp1 == DestReg && !
MBBI->getOperand(1).isKill())
785 LoopHeadBNETarget =
MBBI->getOperand(2).getMBB();
790 MBB.removeSuccessor(LoopHeadBNETarget);
791 for (
auto *
MI : ToErase)
792 MI->eraseFromParent();
796bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
807 Register ScratchReg =
MI.getOperand(1).getReg();
809 Register CmpValReg =
MI.getOperand(3).getReg();
810 Register NewValReg =
MI.getOperand(4).getReg();
814 tryToFoldBNEOnCmpXchgResult(
MBB, std::next(
MBBI), DestReg, CmpValReg, MaskReg,
818 MF->
insert(++
MBB.getIterator(), LoopHeadMBB);
829 MBB.addSuccessor(LoopHeadMBB);
838 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)),
844 .
addMBB(LoopHeadBNETarget);
848 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)),
862 BuildMI(LoopHeadMBB,
DL,
TII->get(getLRForRMW(Ordering, Width, STI)),
865 BuildMI(LoopHeadMBB,
DL,
TII->get(RISCV::AND), ScratchReg)
871 .
addMBB(LoopHeadBNETarget);
879 insertMaskedMerge(
TII,
DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
880 MaskReg, ScratchReg);
881 BuildMI(LoopTailMBB,
DL,
TII->get(getSCForRMW(Ordering, Width, STI)),
891 NextMBBI =
MBB.end();
892 MI.eraseFromParent();
908 return new RISCVExpandAtomicPseudo();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Promote Memory to Register
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME
static unsigned getInstSizeInBytes(const MachineInstr &MI, const SystemZInstrInfo *TII)
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
FunctionPass class - This class is used to implement most global optimizations.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
LLVM_ABI void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createRISCVExpandAtomicPseudoPass()
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().