Go to the documentation of this file.
20 #define DEBUG_TYPE "llvm-mca"
49 : STI(STI), PRF(PRF),
RM(STI.getSchedModel()), CB(CB), LSU(LSU),
50 NumIssued(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
57 return !IssuedInst.empty() || SI.
isValid() || CarriedOver;
61 if (SI.
isValid() || CarriedOver)
68 if (Bandwidth < NumMicroOps && !ShouldCarryOver)
80 if (
RM.checkAvailability(
IR.getInstruction()->getDesc())) {
89 unsigned FirstWBCycle =
IR.getInstruction()->getLatency();
90 for (
const WriteState &WS :
IR.getInstruction()->getDefs()) {
91 int CyclesLeft = WS.getCyclesLeft();
93 CyclesLeft = WS.getLatency();
96 FirstWBCycle =
std::min(FirstWBCycle, (
unsigned)CyclesLeft);
106 for (
const ReadState &RS :
IR.getInstruction()->getUses()) {
115 bool InOrderIssueStage::canExecute(
const InstRef &
IR) {
117 assert(!SI.
isValid() &&
"Should not have reached this code!");
129 if (
IR.getInstruction()->isMemOp() && !LSU.
isReady(
IR)) {
141 if (LastWriteBackCycle) {
142 if (!
IR.getInstruction()->getRetireOOO()) {
145 if (NextWriteBackCycle < LastWriteBackCycle) {
146 SI.
update(
IR, LastWriteBackCycle - NextWriteBackCycle,
157 unsigned SourceIndex,
169 void InOrderIssueStage::notifyInstructionIssued(
const InstRef &
IR,
171 notifyEvent<HWInstructionEvent>(
173 notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(
IR, UsedRes));
178 void InOrderIssueStage::notifyInstructionDispatched(
179 const InstRef &
IR,
unsigned Ops, ArrayRef<unsigned> UsedRegs) {
180 notifyEvent<HWInstructionEvent>(
181 HWInstructionDispatchedEvent(
IR, UsedRegs, Ops));
186 void InOrderIssueStage::notifyInstructionExecuted(
const InstRef &
IR) {
187 notifyEvent<HWInstructionEvent>(
192 void InOrderIssueStage::notifyInstructionRetired(
const InstRef &
IR,
193 ArrayRef<unsigned> FreedRegs) {
194 notifyEvent<HWInstructionEvent>(HWInstructionRetiredEvent(
IR, FreedRegs));
214 unsigned SourceIndex =
IR.getSourceIndex();
217 if (!canExecute(
IR)) {
231 notifyInstructionDispatched(
IR, NumMicroOps, UsedRegs);
245 notifyInstructionIssued(
IR, UsedResources);
247 bool ShouldCarryOver = NumMicroOps > Bandwidth;
248 if (ShouldCarryOver) {
249 CarryOver = NumMicroOps - Bandwidth;
252 NumIssued += Bandwidth;
255 NumIssued += NumMicroOps;
256 Bandwidth = IS.
getEndGroup() ? 0 : Bandwidth - NumMicroOps;
264 notifyEvent<HWInstructionEvent>(
268 retireInstruction(
IR);
272 IssuedInst.push_back(
IR);
274 if (!
IR.getInstruction()->getRetireOOO())
280 void InOrderIssueStage::updateIssuedInst() {
283 unsigned NumExecuted = 0;
284 for (
auto I = IssuedInst.begin(),
E = IssuedInst.end();
285 I != (
E - NumExecuted);) {
287 Instruction &IS = *
IR.getInstruction();
290 if (!IS.isExecuted()) {
292 <<
" is still executing\n");
299 notifyInstructionExecuted(
IR);
302 retireInstruction(*
I);
304 std::iter_swap(
I,
E - NumExecuted);
308 IssuedInst.resize(IssuedInst.size() - NumExecuted);
311 void InOrderIssueStage::updateCarriedOver() {
315 assert(!SI.
isValid() &&
"A stalled instruction cannot be carried over.");
317 if (CarryOver > Bandwidth) {
318 CarryOver -= Bandwidth;
320 LLVM_DEBUG(
dbgs() <<
"[N] Carry over (" << CarryOver <<
"uops left) #"
321 << CarriedOver <<
" \n");
325 LLVM_DEBUG(
dbgs() <<
"[N] Carry over (complete) #" << CarriedOver <<
" \n");
330 Bandwidth -= CarryOver;
332 CarriedOver = InstRef();
336 void InOrderIssueStage::retireInstruction(InstRef &
IR) {
337 Instruction &IS = *
IR.getInstruction();
341 for (
const WriteState &WS : IS.getDefs())
347 notifyInstructionRetired(
IR, FreedRegs);
350 void InOrderIssueStage::notifyStallEvent() {
360 notifyEvent<HWStallEvent>(
362 notifyEvent<HWPressureEvent>(
367 notifyEvent<HWStallEvent>(
369 notifyEvent<HWPressureEvent>(
374 notifyEvent<HWStallEvent>(
427 if (LastWriteBackCycle > 0)
428 --LastWriteBackCycle;
This is an optimization pass for GlobalISel generic memory operations.
void dispatch(unsigned RCUTokenID)
static unsigned checkRegisterHazard(const RegisterFile &PRF, const MCSubtargetInfo &STI, const InstRef &IR)
Return a number of cycles left until register requirements of the instructions are met.
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
void removeRegisterWrite(const WriteState &WS, MutableArrayRef< unsigned > FreedPhysRegs)
StallKind getStallKind() const
RAWHazard checkRAWHazards(const MCSubtargetInfo &STI, const ReadState &RS) const
const InstRef & getInstruction() const
A resource manager for processor resource units and groups.
Default Load/Store Unit (LS Unit) for simulated processors.
unsigned resolveResourceMask(uint64_t Mask) const
static void addRegisterReadWrite(RegisterFile &PRF, Instruction &IS, unsigned SourceIndex, const MCSubtargetInfo &STI, SmallVectorImpl< unsigned > &UsedRegs)
int getCyclesLeft() const
An instruction propagated through the simulated instruction pipeline.
bool isEliminated() const
SmallVectorImpl< WriteState > & getDefs()
unsigned getNumRegisterFiles() const
bool hasUnknownCycles() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Instruction * getInstruction()
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static const unsigned UnhandledTokenID
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void execute(unsigned IID)
Tracks register operand latency in cycles.
Statically lint checks LLVM IR
Error cycleEnd() override
Called once at the end of each cycle.
unsigned dispatch(const InstRef &IR) override
Allocates LS resources for instruction IR.
A reference to a register write.
void update(const InstRef &Inst, unsigned Cycles, StallKind SK)
const InstrDesc & getDesc() const
An instruction descriptor.
Class which can be overriden by targets to enforce instruction dependencies and behaviours that aren'...
Subclass of Error for the sole purpose of identifying the success path in the type system.
static bool hasResourceHazard(const ResourceManager &RM, const InstRef &IR)
void onInstructionExecuted(const InstRef &IR) override
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
constexpr int UNKNOWN_CYCLES
An InstRef contains both a SourceMgr index and Instruction pair.
unsigned getNumMicroOps() const
bool hasWorkToComplete() const override
Returns true if some instructions are still executing this stage.
virtual unsigned checkCustomHazard(ArrayRef< InstRef > IssuedInst, const InstRef &IR)
Before the llvm-mca pipeline dispatches an instruction, it first checks for any register or resource ...
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
bool isAvailable(const InstRef &) const override
Returns true if it can execute IR during this cycle.
void onInstructionExecuted(Instruction *IS)
virtual void onInstructionRetired(const InstRef &IR)
bool getBeginGroup() const
virtual void onInstructionIssued(const InstRef &IR)
void addRegisterRead(ReadState &RS, const MCSubtargetInfo &STI) const
Manages hardware register files, and tracks register definitions for register renaming purposes.
Error execute(InstRef &IR) override
The primary action that this stage performs on instruction IR.
Lightweight error class with error context and mandatory checking.
std::pair< ResourceRef, ResourceCycles > ResourceUse
bool isReady(const InstRef &IR) const
Check if a peviously dispatched instruction IR is now ready for execution.
void cycleEvent(SmallVectorImpl< ResourceRef > &ResourcesFreed)
void addRegisterWrite(WriteRef Write, MutableArrayRef< unsigned > UsedPhysRegs)
void invalidate()
Invalidate this reference.
SmallVectorImpl< ReadState > & getUses()
void issueInstruction(const InstrDesc &Desc, SmallVectorImpl< std::pair< ResourceRef, ResourceCycles >> &Pipes)
unsigned getCyclesLeft() const
virtual void cycleEvent()
Generic base class for all target subtargets.
Tracks uses of a register definition (e.g.
unsigned getIssueWidth() const
void setLSUTokenID(unsigned LSUTok)
A Use represents the edge between a Value definition and its users.
Error cycleStart() override
Called once at the start of each cycle.
static unsigned findFirstWriteBackCycle(const InstRef &IR)