33 cl::desc(
"Force the use of resource intervals in the schedule model"));
36 return EnableSchedModel && SchedModel.hasInstrSchedModel();
40 return EnableSchedItins && !InstrItins.isEmpty();
44 bool EnableSModel,
bool EnableSItins) {
48 STI->initInstrItins(InstrItins);
50 EnableSchedModel = EnableSModel;
51 EnableSchedItins = EnableSItins;
53 unsigned NumRes = SchedModel.getNumProcResourceKinds();
54 ResourceFactors.resize(NumRes);
55 ResourceLCM = SchedModel.IssueWidth;
56 for (
unsigned Idx = 0; Idx < NumRes; ++Idx) {
57 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
59 ResourceLCM = std::lcm(ResourceLCM, NumUnits);
61 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
62 for (
unsigned Idx = 0; Idx < NumRes; ++Idx) {
63 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
64 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
94 int UOps = InstrItins.getNumMicroOps(
MI->getDesc().getSchedClass());
95 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *
MI);
103 return MI->isTransient() ? 0 : 1;
111 return Cycles >= 0 ? Cycles : 1000;
119 unsigned SchedClass =
MI->getDesc().getSchedClass();
128 assert(++NIter < 6 &&
"Variants are nested deeper than the magic number");
130 SchedClass = STI->resolveSchedClass(SchedClass,
MI,
this);
131 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
143 for (
unsigned i = 0; i != DefOperIdx; ++i) {
159 for (
unsigned i = 0; i != UseOperIdx; ++i) {
173 const unsigned InstrLatency = computeInstrLatency(
DefMI);
174 const unsigned DefaultDefLatency = TII->defaultDefLatency(SchedModel, *
DefMI);
177 return DefaultDefLatency;
180 std::optional<unsigned> OperLatency;
182 OperLatency = TII->getOperandLatency(&InstrItins, *
DefMI, DefOperIdx,
186 unsigned DefClass =
DefMI->getDesc().getSchedClass();
187 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
192 return OperLatency ? *OperLatency
193 : std::max(InstrLatency, DefaultDefLatency);
199 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
202 STI->getWriteLatencyEntry(SCDesc, DefIdx);
213 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
214 if (Advance > 0 && (
unsigned)Advance >
Latency)
221 if (SCDesc->
isValid() && !
DefMI->getOperand(DefOperIdx).isImplicit() &&
222 !
DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() &&
223 SchedModel.isComplete()) {
224 errs() <<
"DefIdx " << DefIdx <<
" exceeds machine model writes for "
225 << *
DefMI <<
" (Try with MCSchedModel.CompleteModel set to false)";
232 return DefMI->isTransient() ? 0 : DefaultDefLatency;
236TargetSchedModel::computeInstrLatency(
const MCSchedClassDesc &SCDesc)
const {
240unsigned TargetSchedModel::computeInstrLatency(
unsigned Opcode)
const {
242 unsigned SCIdx = TII->get(Opcode).getSchedClass();
243 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
246unsigned TargetSchedModel::computeInstrLatency(
const MCInst &Inst)
const {
248 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
249 return computeInstrLatency(Inst.
getOpcode());
254 bool UseDefaultDefLatency)
const {
259 return TII->getInstrLatency(&InstrItins, *
MI);
264 return computeInstrLatency(*SCDesc);
266 return TII->defaultDefLatency(SchedModel, *
MI);
272 if (!SchedModel.isOutOfOrder())
287 return computeInstrLatency(
DefMI);
295 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
296 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
307 unsigned SchedClass =
MI->getDesc().getSchedClass();
320 unsigned SchedClass = TII->get(Opcode).getSchedClass();
336 return SchedModel.getReciprocalThroughput(*STI, *TII,
MI);
344 return SchedModel.EnableIntervals;
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Register const TargetRegisterInfo * TRI
static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx)
Find the use index of this operand.
static unsigned capLatency(int Cycles)
static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx)
Find the def index of this operand.
static cl::opt< bool > ForceEnableIntervals("sched-model-force-enable-intervals", cl::Hidden, cl::init(false), cl::desc("Force the use of resource intervals in the schedule model"))
Instances of this class represent a single low-level machine instruction.
unsigned getOpcode() const
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
MachineOperand class - Representation of each machine instruction operand.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Wrapper class representing virtual and physical registers.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
LLVM_ABI bool mustEndGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if current group must end.
LLVM_ABI bool hasInstrSchedModel() const
Return true if this machine model includes an instruction-level scheduling model.
LLVM_ABI unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *DepMI) const
Output dependency latency of a pair of defs of the same register.
LLVM_ABI bool mustBeginGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if new group must begin.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
LLVM_ABI const MCSchedClassDesc * resolveSchedClass(const MachineInstr *MI) const
Return the MCSchedClassDesc for this instruction.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
LLVM_ABI double computeReciprocalThroughput(const MachineInstr *MI) const
Compute the reciprocal throughput of the given instruction.
LLVM_ABI unsigned getNumMicroOps(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return the number of issue slots required for this MI.
const InstrItineraryData * getInstrItineraries() const
LLVM_ABI bool enableIntervals() const
LLVM_ABI bool hasInstrItineraries() const
Return true if this machine model includes cycle-to-cycle itinerary data.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Summarize the scheduling resources required for an instruction of a particular scheduling class.
uint16_t NumReadAdvanceEntries
static LLVM_ABI int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Returns the latency value for the scheduling class.
static LLVM_ABI double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Specify the latency in cpu cycles for a particular scheduling class and def index.
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...