74#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75#define LLVM_CODEGEN_MACHINESCHEDULER_H
132class MachineDominatorTree;
133class MachineFunction;
135class MachineLoopInfo;
136class RegisterClassInfo;
138class ScheduleHazardRecognizer;
139class TargetInstrInfo;
140class TargetPassConfig;
141class TargetRegisterInfo;
166 ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
246 virtual void anchor();
254 unsigned NumRegionInstrs) {}
319 std::vector<std::unique_ptr<ScheduleDAGMutation>>
Mutations;
327#if LLVM_ENABLE_ABI_BREAKING_CHECKS
330 unsigned NumInstrsScheduled = 0;
346 return SchedImpl->doMBBSchedRegionsTopDown();
374 unsigned regioninstrs)
override;
378 void schedule()
override;
381 void finishBlock()
override;
387 void viewGraph(
const Twine &Name,
const Twine &Title)
override;
388 void viewGraph()
override;
395 void postProcessDAG();
401 void updateQueues(
SUnit *SU,
bool IsTopNode);
404 void placeDebugValues();
407 void dumpSchedule()
const;
409 void dumpScheduleTraceTopDown()
const;
410 void dumpScheduleTraceBottomUp()
const;
413 bool checkSchedLimit();
418 void releaseSucc(
SUnit *SU,
SDep *SuccEdge);
419 void releaseSuccessors(
SUnit *SU);
420 void releasePred(
SUnit *SU,
SDep *PredEdge);
421 void releasePredecessors(
SUnit *SU);
466 std::unique_ptr<MachineSchedStrategy> S)
503 void computeDFSResult();
516 unsigned regioninstrs)
override;
520 void schedule()
override;
523 unsigned computeCyclicCriticalPath();
525 void dump()
const override;
534 void buildDAGWithRegPressure();
541 void scheduleMI(
SUnit *SU,
bool IsTopNode);
545 void initRegPressure();
549 void updateScheduledPressure(
const SUnit *SU,
550 const std::vector<unsigned> &NewMaxPressure);
552 void collectVRegUses(
SUnit &SU);
571 std::vector<SUnit*> Queue;
576 unsigned getID()
const {
return ID; }
583 bool empty()
const {
return Queue.empty(); }
587 unsigned size()
const {
return Queue.size(); }
605 (*I)->NodeQueueId &= ~ID;
607 unsigned idx =
I - Queue.begin();
609 return Queue.begin() + idx;
746 unsigned ReleaseAtCycle) {
747 return std::make_pair<long, long>((
long)
C - (
long)ReleaseAtCycle + 1L,
748 (
long)
C - (
long)AcquireAtCycle + 1L);
751 unsigned ReleaseAtCycle) {
752 return std::make_pair<long, long>((
long)
C + (
long)AcquireAtCycle,
753 (
long)
C + (
long)ReleaseAtCycle);
803 LLVM_ABI unsigned getFirstAvailableAt(
804 unsigned CurrCycle,
unsigned AcquireAtCycle,
unsigned ReleaseAtCycle,
805 std::function<
IntervalTy(
unsigned,
unsigned,
unsigned)> IntervalBuilder)
813 unsigned AcquireAtCycle,
814 unsigned ReleaseAtCycle)
const {
815 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
819 unsigned AcquireAtCycle,
820 unsigned ReleaseAtCycle)
const {
821 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
826 std::list<IntervalTy> _Intervals;
837 bool empty()
const {
return _Intervals.empty(); }
839 : _Intervals(Intervals) {
845 return c1._Intervals == c2._Intervals;
850 for (
auto p : Segments._Intervals)
851 os <<
"[" << p.first <<
", " << p.second <<
"), ";
892 unsigned MinReadyCycle;
895 unsigned ExpectedLatency;
900 unsigned DependentLatency;
904 unsigned RetiredMOps;
914 unsigned MaxExecutedResCount;
917 unsigned ZoneCritResIdx;
920 bool IsResourceLimited;
926 std::map<unsigned, ResourceSegments> ReservedResourceSegments;
927 std::vector<unsigned> ReservedCycles;
962#if LLVM_ENABLE_ABI_BREAKING_CHECKS
965 unsigned MaxObservedStall;
1001 return std::max(ExpectedLatency, CurrCycle);
1009 return ExecutedResCounts[ResIdx];
1015 if (!ZoneCritResIdx)
1016 return RetiredMOps *
SchedModel->getMicroOpFactor();
1024 return std::max(CurrCycle *
SchedModel->getLatencyFactor(),
1025 MaxExecutedResCount);
1038 unsigned ReleaseAtCycle,
1039 unsigned AcquireAtCycle);
1041 LLVM_ABI std::pair<unsigned, unsigned>
1043 unsigned ReleaseAtCycle,
unsigned AcquireAtCycle);
1046 return SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin &&
1047 !
SchedModel->getProcResource(PIdx)->BufferSize;
1072 unsigned Cycles,
unsigned ReadyCycle,
1073 unsigned StartAtCycle);
1136 return !(*
this ==
RHS);
1231 bool shouldReduceLatency(
const CandPolicy &Policy,
SchedBoundary &CurrZone,
1232 bool ComputeRemLatency,
unsigned &RemLatency)
const;
1237 GenericSchedulerBase::SchedCandidate &TryCand,
1238 GenericSchedulerBase::SchedCandidate &Cand,
1241 GenericSchedulerBase::SchedCandidate &TryCand,
1242 GenericSchedulerBase::SchedCandidate &Cand,
1245 GenericSchedulerBase::SchedCandidate &Cand,
1246 SchedBoundary &Zone);
1248 const PressureChange &CandP,
1249 GenericSchedulerBase::SchedCandidate &TryCand,
1250 GenericSchedulerBase::SchedCandidate &Cand,
1252 const TargetRegisterInfo *
TRI,
1253 const MachineFunction &MF);
1267 unsigned NumRegionInstrs)
override;
1269 void dumpPolicy()
const override;
1281 SUnit *pickNode(
bool &IsTopNode)
override;
1283 void schedNode(
SUnit *SU,
bool IsTopNode)
override;
1301 void registerRoots()
override;
1372 void registerRoots()
override;
1374 SUnit *pickNode(
bool &IsTopNode)
override;
1376 SUnit *pickNodeBidirectional(
bool &IsTopNode);
1382 void schedNode(
SUnit *SU,
bool IsTopNode)
override;
1399 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1401 void pickNodeFromQueue(
SchedBoundary &Zone, SchedCandidate &Cand);
1406LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1408 const TargetRegisterInfo *
TRI,
1409 bool ReorderWhileClustering =
false);
1413LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1415 const TargetRegisterInfo *
TRI,
1416 bool ReorderWhileClustering =
false);
1418LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1420 const TargetRegisterInfo *
TRI);
1425template <
typename Strategy = GenericScheduler>
1439 if (!MacroFusions.empty())
1445template <
typename Strategy = PostGenericScheduler>
1452 if (!MacroFusions.empty())
1460 std::unique_ptr<impl_detail::MachineSchedulerImpl> Impl;
1475 std::unique_ptr<impl_detail::PostMachineSchedulerImpl> Impl;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
This file defines the SmallVector class.
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, const llvm::StringTable &StandardNames, VectorLibrary VecLib)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
void traceCandidate(const SchedCandidate &Cand)
LLVM_ABI void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
MachineSchedPolicy RegionPolicy
const TargetSchedModel * SchedModel
static const char * getReasonStr(GenericSchedulerBase::CandReason Reason)
MachineSchedPolicy getPolicy() const override
GenericSchedulerBase(const MachineSchedContext *C)
const MachineSchedContext * Context
CandReason
Represent the type of SchedCandidate found within a single queue.
const TargetRegisterInfo * TRI
void checkAcyclicLatency()
Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic critical path by more cycle...
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Apply a set of heuristics to a new candidate.
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, RegPressureTracker &TempTracker)
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
void reschedulePhysReg(SUnit *SU, bool isTop)
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Candidate)
Pick the best candidate from the queue.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
GenericScheduler(const MachineSchedContext *C)
SUnit * pickNodeBidirectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
MachineInstrBundleIterator< MachineInstr > iterator
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Representation of each machine instruction.
MachinePassRegistryListener - Listener to adds and removals of nodes in registration list.
MachinePassRegistryNode(const char *N, const char *D, ScheduleDAGInstrs *C)
MachinePassRegistryNode * getNext() const
MachinePassRegistry - Track the registration of machine passes.
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
static LLVM_ABI MachinePassRegistry< ScheduleDAGCtor > Registry
ScheduleDAGCtor FunctionPassCtor
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
static MachineSchedRegistry * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
MachineSchedRegistry * getNext() const
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy.
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
virtual void schedNode(SUnit *SU, bool IsTopNode)=0
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
virtual ~MachineSchedStrategy()=default
virtual void initialize(ScheduleDAGMI *DAG)=0
Initialize the strategy after building the DAG for a new region.
virtual MachineSchedPolicy getPolicy() const
virtual void releaseTopNode(SUnit *SU)=0
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
virtual void dumpPolicy() const
virtual bool doMBBSchedRegionsTopDown() const
virtual SUnit * pickNode(bool &IsTopNode)=0
Pick the next node to schedule, or return NULL.
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
virtual void releaseBottomNode(SUnit *SU)=0
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI MachineSchedulerPass(const TargetMachine *TM)
LLVM_ABI ~MachineSchedulerPass()
LLVM_ABI MachineSchedulerPass(MachineSchedulerPass &&Other)
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
~PostGenericScheduler() override=default
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
PostGenericScheduler(const MachineSchedContext *C)
LLVM_ABI PostMachineSchedulerPass(PostMachineSchedulerPass &&Other)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PostMachineSchedulerPass(const TargetMachine *TM)
LLVM_ABI ~PostMachineSchedulerPass()
A set of analyses that are preserved following a run of a transformation pass.
List of PressureChanges in order of increasing, unique PSetID.
Helpers for implementing custom MachineSchedStrategy classes.
ArrayRef< SUnit * > elements()
LLVM_ABI void dump() const
ReadyQueue(unsigned id, const Twine &name)
bool isInQueue(SUnit *SU) const
std::vector< SUnit * >::iterator iterator
StringRef getName() const
iterator remove(iterator I)
Track the current register pressure at some position in the instruction stream, and remember the high...
ResourceSegments()=default
LLVM_ABI void add(IntervalTy A, const unsigned CutOff=10)
Adds an interval [a, b) to the collection of the instance.
static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
These function return the interval used by a resource in bottom and top scheduling.
friend bool operator==(const ResourceSegments &c1, const ResourceSegments &c2)
static LLVM_ABI bool intersects(IntervalTy A, IntervalTy B)
Checks whether intervals intersect.
unsigned getFirstAvailableAtFromTop(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
friend llvm::raw_ostream & operator<<(llvm::raw_ostream &os, const ResourceSegments &Segments)
std::pair< int64_t, int64_t > IntervalTy
Represents an interval of discrete integer values closed on the left and open on the right: [a,...
static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
ResourceSegments(const std::list< IntervalTy > &Intervals)
unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop should be merged in a single function in...
Scheduling unit. This is a node in the scheduling DAG.
unsigned NodeQueueId
Queue id of node.
unsigned TopReadyCycle
Cycle relative to start when node is ready.
unsigned NodeNum
Entry # of node in the node vector.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
bool isScheduled
True once scheduled.
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Each Scheduling boundary is associated with ready queues.
LLVM_ABI unsigned getNextResourceCycleByInstance(unsigned InstanceIndex, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource unit can be scheduled.
LLVM_ABI void releasePending()
Release pending ready nodes in to the available queue.
unsigned getDependentLatency() const
bool isReservedGroup(unsigned PIdx) const
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
LLVM_ABI void incExecutedResources(unsigned PIdx, unsigned Count)
bool isResourceLimited() const
const TargetSchedModel * SchedModel
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
SchedBoundary(const SchedBoundary &other)=delete
LLVM_ABI unsigned findMaxLatency(ArrayRef< SUnit * > ReadySUs)
LLVM_ABI void dumpReservedCycles() const
Dump the state of the information that tracks resource usage.
LLVM_ABI unsigned getOtherResourceCount(unsigned &OtherCritIdx)
LLVM_ABI void bumpNode(SUnit *SU)
Move the boundary of scheduled code by one SUnit.
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
LLVM_ABI SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
LLVM_ABI void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, unsigned Idx=0)
Release SU to make it ready.
LLVM_ABI unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles, unsigned ReadyCycle, unsigned StartAtCycle)
Add the given processor resource to this scheduled zone.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
LLVM_ABI ~SchedBoundary()
ScheduleHazardRecognizer * HazardRec
LLVM_ABI void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem)
SchedBoundary & operator=(const SchedBoundary &other)=delete
unsigned getResourceCount(unsigned ResIdx) const
LLVM_ABI void bumpCycle(unsigned NextCycle)
Move the boundary of scheduled code by one cycle.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
LLVM_ABI bool checkHazard(SUnit *SU)
Does this SU have a hazard within the current instruction group.
LLVM_ABI std::pair< unsigned, unsigned > getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource can be scheduled.
LLVM_ABI void dumpScheduledState() const
LLVM_ABI void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
unsigned getZoneCritResIdx() const
unsigned getUnscheduledLatency(SUnit *SU) const
Compute the values of each DAG node for various metrics during DFS.
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGInstrs(MachineFunction &mf, const MachineLoopInfo *mli, bool RemoveKillFlags=false)
const MachineLoopInfo * MLI
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
PressureDiff & getPressureDiff(const SUnit *SU)
SchedDFSResult * DFSResult
Information about DAG subtrees.
bool ShouldTrackLaneMasks
RegPressureTracker BotRPTracker
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target's pressure limit before scheduling, listed in increasing...
IntervalPressure TopPressure
The top of the unscheduled zone.
PressureDiffs SUPressureDiffs
const RegPressureTracker & getBotRPTracker() const
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
IntervalPressure BotPressure
The bottom of the unscheduled zone.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
RegisterClassInfo * RegClassInfo
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
const PressureDiff & getPressureDiff(const SUnit *SU) const
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
bool ShouldTrackPressure
Register pressure in this region computed by initRegPressure.
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
BitVector & getScheduledTrees()
MachineBasicBlock::iterator LiveRegionEnd
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
const std::vector< PressureChange > & getRegionCriticalPSets() const
IntervalPressure RegPressure
RegPressureTracker TopRPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::unique_ptr< MachineSchedStrategy > SchedImpl
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
MachineBasicBlock::iterator top() const
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
MachineBasicBlock::iterator bottom() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
LiveIntervals * getLIS() const
~ScheduleDAGMI() override
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
MachineBlockFrequencyInfo * MBFI
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual std::vector< MacroFusionPredTy > getMacroFusions() const
Get the list of MacroFusion predicates.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Impl class for MachineScheduler.
Impl class for PostMachineScheduler.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
template class LLVM_TEMPLATE_ABI opt< bool >
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop)
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createMacroFusionDAGMutation(ArrayRef< MacroFusionPredTy > Predicates, bool BranchOnly=false)
Create a DAG scheduling mutation to pair instructions back to back for instructions that benefit acco...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
SparseMultiSet< VReg2SUnit, Register, VirtReg2IndexFunctor > VReg2SUnitMultiMap
Track local uses of virtual registers.
ScheduleDAGMI * createSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
cl::opt< bool > ViewMISchedDAGs
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionAddr VTableAddr Count
LLVM_ABI cl::opt< bool > VerifyScheduling
LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
LLVM_ABI cl::opt< MISched::Direction > PreRADirection
LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
cl::opt< bool > PrintDAGs
Implement std::hash so that hash_code can be used in STL containers.
Policy for scheduling the next instruction in the candidate's zone.
bool operator==(const CandPolicy &RHS) const
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
SchedCandidate(const CandPolicy &Policy)
void setBest(SchedCandidate &Best)
void reset(const CandPolicy &NewPolicy)
LLVM_ABI void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SchedResourceDelta ResDelta
Status of an instruction's critical resource consumption.
SchedResourceDelta()=default
bool operator!=(const SchedResourceDelta &RHS) const
bool operator==(const SchedResourceDelta &RHS) const
unsigned DemandedResources
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx.
Summarize the scheduling resources required for an instruction of a particular scheduling class.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
const MachineDominatorTree * MDT
RegisterClassInfo * RegClassInfo
MachineBlockFrequencyInfo * MBFI
const MachineLoopInfo * MLI
MachineSchedContext & operator=(const MachineSchedContext &other)=delete
MachineSchedContext(const MachineSchedContext &other)=delete
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool DisableLatencyHeuristic
MachineSchedPolicy()=default
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Store the effects of a change in pressure on things that MI scheduler cares about.
MachineBasicBlock::iterator RegionBegin
RegionBegin is the first instruction in the scheduling region, and RegionEnd is either MBB->end() or ...
MachineBasicBlock::iterator RegionEnd
SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, unsigned N)
Summarize the unscheduled region.
LLVM_ABI void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SmallVector< unsigned, 16 > RemainingCounts
bool IsAcyclicLatencyLimited