LLVM 23.0.0git
MachineScheduler.h
Go to the documentation of this file.
1//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides an interface for customizing the standard MachineScheduler
10// pass. Note that the entire pass may be replaced as follows:
11//
12// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14// ...}
15//
16// The MachineScheduler pass is only responsible for choosing the regions to be
17// scheduled. Targets can override the DAG builder and scheduler without
18// replacing the pass as follows:
19//
20// ScheduleDAGInstrs *<Target>TargetMachine::
21// createMachineScheduler(MachineSchedContext *C) {
22// return new CustomMachineScheduler(C);
23// }
24//
25// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26// scheduling while updating the instruction stream, register pressure, and live
27// intervals. Most targets don't need to override the DAG builder and list
28// scheduler, but subtargets that require custom scheduling heuristics may
29// plugin an alternate MachineSchedStrategy. The strategy is responsible for
30// selecting the highest priority node from the list:
31//
32// ScheduleDAGInstrs *<Target>TargetMachine::
33// createMachineScheduler(MachineSchedContext *C) {
34// return new ScheduleDAGMILive(C, CustomStrategy(C));
35// }
36//
37// The DAG builder can also be customized in a sense by adding DAG mutations
38// that will run after DAG building and before list scheduling. DAG mutations
39// can adjust dependencies based on target-specific knowledge or add weak edges
40// to aid heuristics:
41//
42// ScheduleDAGInstrs *<Target>TargetMachine::
43// createMachineScheduler(MachineSchedContext *C) {
44// ScheduleDAGMI *DAG = createSchedLive(C);
45// DAG->addMutation(new CustomDAGMutation(...));
46// return DAG;
47// }
48//
49// A target that supports alternative schedulers can use the
50// MachineSchedRegistry to allow command line selection. This can be done by
51// implementing the following boilerplate:
52//
53// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54// return new CustomMachineScheduler(C);
55// }
56// static MachineSchedRegistry
57// SchedCustomRegistry("custom", "Run my target's custom scheduler",
58// createCustomMachineSched);
59//
60//
61// Finally, subtargets that don't need to implement custom heuristics but would
62// like to configure the GenericScheduler's policy for a given scheduler region,
63// including scheduling direction and register pressure tracking policy, can do
64// this:
65//
66// void <SubTarget>Subtarget::
67// overrideSchedPolicy(MachineSchedPolicy &Policy,
68// const SchedRegion &Region) const {
69// Policy.<Flag> = true;
70// }
71//
72//===----------------------------------------------------------------------===//
73
74#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75#define LLVM_CODEGEN_MACHINESCHEDULER_H
76
77#include "llvm/ADT/APInt.h"
78#include "llvm/ADT/ArrayRef.h"
79#include "llvm/ADT/BitVector.h"
80#include "llvm/ADT/STLExtras.h"
82#include "llvm/ADT/StringRef.h"
83#include "llvm/ADT/Twine.h"
95#include <algorithm>
96#include <cassert>
98#include <memory>
99#include <string>
100#include <vector>
101
102namespace llvm {
103namespace impl_detail {
104// FIXME: Remove these declarations once RegisterClassInfo is queryable as an
105// analysis.
108} // namespace impl_detail
109
110namespace MISched {
117} // namespace MISched
118
119LLVM_ABI extern cl::opt<MISched::Direction> PreRADirection;
121
122#ifndef NDEBUG
125#else
126LLVM_ABI extern const bool ViewMISchedDAGs;
127LLVM_ABI extern const bool PrintDAGs;
128#endif
129
130class AAResults;
131class LiveIntervals;
132class MachineDominatorTree;
133class MachineFunction;
134class MachineInstr;
135class MachineLoopInfo;
136class RegisterClassInfo;
137class SchedDFSResult;
138class ScheduleHazardRecognizer;
139class TargetInstrInfo;
140class TargetPassConfig;
141class TargetRegisterInfo;
142
143/// MachineSchedContext provides enough context from the MachineScheduler pass
144/// for the target to instantiate a scheduler.
161
162/// MachineSchedRegistry provides a selection of available machine instruction
163/// schedulers.
166 ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
167public:
169
170 // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
172
174
175 MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
177 Registry.Add(this);
178 }
179
180 ~MachineSchedRegistry() { Registry.Remove(this); }
181
182 // Accessors.
183 //
187
189 return (MachineSchedRegistry *)Registry.getList();
190 }
191
193 Registry.setListener(L);
194 }
195};
196
197class ScheduleDAGMI;
198
199/// Define a generic scheduling policy for targets that don't provide their own
200/// MachineSchedStrategy. This can be overriden for each scheduling region
201/// before building the DAG.
203 // Allow the scheduler to disable register pressure tracking.
205 /// Track LaneMasks to allow reordering of independent subregister writes
206 /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
208
209 // Allow the scheduler to force top-down or bottom-up scheduling. If neither
210 // is true, the scheduler runs in both directions and converges.
211 bool OnlyTopDown = false;
212 bool OnlyBottomUp = false;
213
214 // Disable heuristic that tries to fetch nodes from long dependency chains
215 // first.
217
218 // Compute DFSResult for use in scheduling heuristics.
219 bool ComputeDFSResult = false;
220
222};
223
224/// A region of an MBB for scheduling.
226 /// RegionBegin is the first instruction in the scheduling region, and
227 /// RegionEnd is either MBB->end() or the scheduling boundary after the
228 /// last instruction in the scheduling region. These iterators cannot refer
229 /// to instructions outside of the identified scheduling region because
230 /// those may be reordered before scheduling this region.
234
238};
239
240/// MachineSchedStrategy - Interface to the scheduling algorithm used by
241/// ScheduleDAGMI.
242///
243/// Initialization sequence:
244/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
246 virtual void anchor();
247
248public:
249 virtual ~MachineSchedStrategy() = default;
250
251 /// Optionally override the per-region scheduling policy.
254 unsigned NumRegionInstrs) {}
255
256 virtual MachineSchedPolicy getPolicy() const { return {}; }
257 virtual void dumpPolicy() const {}
258
259 /// Check if pressure tracking is needed before building the DAG and
260 /// initializing this strategy. Called after initPolicy.
261 virtual bool shouldTrackPressure() const { return true; }
262
263 /// Returns true if lanemasks should be tracked. LaneMask tracking is
264 /// necessary to reorder independent subregister defs for the same vreg.
265 /// This has to be enabled in combination with shouldTrackPressure().
266 virtual bool shouldTrackLaneMasks() const { return false; }
267
268 // If this method returns true, handling of the scheduling regions
269 // themselves (in case of a scheduling boundary in MBB) will be done
270 // beginning with the topmost region of MBB.
271 virtual bool doMBBSchedRegionsTopDown() const { return false; }
272
273 /// Initialize the strategy after building the DAG for a new region.
274 virtual void initialize(ScheduleDAGMI *DAG) = 0;
275
276 /// Tell the strategy that MBB is about to be processed.
277 virtual void enterMBB(MachineBasicBlock *MBB) {};
278
279 /// Tell the strategy that current MBB is done.
280 virtual void leaveMBB() {};
281
282 /// Notify this strategy that all roots have been released (including those
283 /// that depend on EntrySU or ExitSU).
284 virtual void registerRoots() {}
285
286 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
287 /// schedule the node at the top of the unscheduled region. Otherwise it will
288 /// be scheduled at the bottom.
289 virtual SUnit *pickNode(bool &IsTopNode) = 0;
290
291 /// Scheduler callback to notify that a new subtree is scheduled.
292 virtual void scheduleTree(unsigned SubtreeID) {}
293
294 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
295 /// instruction and updated scheduled/remaining flags in the DAG nodes.
296 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
297
298 /// When all predecessor dependencies have been resolved, free this node for
299 /// top-down scheduling.
300 virtual void releaseTopNode(SUnit *SU) = 0;
301
302 /// When all successor dependencies have been resolved, free this node for
303 /// bottom-up scheduling.
304 virtual void releaseBottomNode(SUnit *SU) = 0;
305};
306
307/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
308/// schedules machine instructions according to the given MachineSchedStrategy
309/// without much extra book-keeping. This is the common functionality between
310/// PreRA and PostRA MachineScheduler.
312protected:
316 std::unique_ptr<MachineSchedStrategy> SchedImpl;
317
318 /// Ordered list of DAG postprocessing steps.
319 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
320
321 /// The top of the unscheduled zone.
323
324 /// The bottom of the unscheduled zone.
326
327#if LLVM_ENABLE_ABI_BREAKING_CHECKS
328 /// The number of instructions scheduled so far. Used to cut off the
329 /// scheduler at the point determined by misched-cutoff.
330 unsigned NumInstrsScheduled = 0;
331#endif
332
333public:
334 ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
335 bool RemoveKillFlags)
337 LIS(C->LIS), MBFI(C->MBFI), SchedImpl(std::move(S)) {}
338
339 // Provide a vtable anchor
340 ~ScheduleDAGMI() override;
341
342 /// If this method returns true, handling of the scheduling regions
343 /// themselves (in case of a scheduling boundary in MBB) will be done
344 /// beginning with the topmost region of MBB.
345 bool doMBBSchedRegionsTopDown() const override {
346 return SchedImpl->doMBBSchedRegionsTopDown();
347 }
348
349 // Returns LiveIntervals instance for use in DAG mutators and such.
350 LiveIntervals *getLIS() const { return LIS; }
351
352 /// Return true if this DAG supports VReg liveness and RegPressure.
353 virtual bool hasVRegLiveness() const { return false; }
354
355 /// Add a postprocessing step to the DAG builder.
356 /// Mutations are applied in the order that they are added after normal DAG
357 /// building and before MachineSchedStrategy initialization.
358 ///
359 /// ScheduleDAGMI takes ownership of the Mutation object.
360 void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
361 if (Mutation)
362 Mutations.push_back(std::move(Mutation));
363 }
364
367
368 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
369 /// region. This covers all instructions in a block, while schedule() may only
370 /// cover a subset.
371 void enterRegion(MachineBasicBlock *bb,
374 unsigned regioninstrs) override;
375
376 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
377 /// reorderable instructions.
378 void schedule() override;
379
380 void startBlock(MachineBasicBlock *bb) override;
381 void finishBlock() override;
382
383 /// Change the position of an instruction within the basic block and update
384 /// live ranges and region boundary iterators.
385 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
386
387 void viewGraph(const Twine &Name, const Twine &Title) override;
388 void viewGraph() override;
389
390protected:
391 // Top-Level entry points for the schedule() driver...
392
393 /// Apply each ScheduleDAGMutation step in order. This allows different
394 /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
395 void postProcessDAG();
396
397 /// Release ExitSU predecessors and setup scheduler queues.
398 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
399
400 /// Update scheduler DAG and queues after scheduling an instruction.
401 void updateQueues(SUnit *SU, bool IsTopNode);
402
403 /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
404 void placeDebugValues();
405
406 /// dump the scheduled Sequence.
407 void dumpSchedule() const;
408 /// Print execution trace of the schedule top-down or bottom-up.
409 void dumpScheduleTraceTopDown() const;
410 void dumpScheduleTraceBottomUp() const;
411
412 // Lesser helpers...
413 bool checkSchedLimit();
414
415 void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
416 SmallVectorImpl<SUnit*> &BotRoots);
417
418 void releaseSucc(SUnit *SU, SDep *SuccEdge);
419 void releaseSuccessors(SUnit *SU);
420 void releasePred(SUnit *SU, SDep *PredEdge);
421 void releasePredecessors(SUnit *SU);
422};
423
424/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
425/// machine instructions while updating LiveIntervals and tracking regpressure.
427protected:
429
430 /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
431 /// will be empty.
434
436
437 /// Maps vregs to the SUnits of their uses in the current scheduling region.
439
440 // Map each SU to its summary of pressure changes. This array is updated for
441 // liveness during bottom-up scheduling. Top-down scheduling may proceed but
442 // has no affect on the pressure diffs.
444
445 /// Register pressure in this region computed by initRegPressure.
450
451 /// List of pressure sets that exceed the target's pressure limit before
452 /// scheduling, listed in increasing set ID order. Each pressure set is paired
453 /// with its max pressure in the currently scheduled regions.
454 std::vector<PressureChange> RegionCriticalPSets;
455
456 /// The top of the unscheduled zone.
459
460 /// The bottom of the unscheduled zone.
463
464public:
466 std::unique_ptr<MachineSchedStrategy> S)
467 : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
470
471 ~ScheduleDAGMILive() override;
472
473 /// Return true if this DAG supports VReg liveness and RegPressure.
474 bool hasVRegLiveness() const override { return true; }
475
476 /// Return true if register pressure tracking is enabled.
478
479 /// Get current register pressure for the top scheduled instructions.
480 const IntervalPressure &getTopPressure() const { return TopPressure; }
482
483 /// Get current register pressure for the bottom scheduled instructions.
484 const IntervalPressure &getBotPressure() const { return BotPressure; }
486
487 /// Get register pressure for the entire scheduling region before scheduling.
488 const IntervalPressure &getRegPressure() const { return RegPressure; }
489
490 const std::vector<PressureChange> &getRegionCriticalPSets() const {
491 return RegionCriticalPSets;
492 }
493
495 return SUPressureDiffs[SU->NodeNum];
496 }
497 const PressureDiff &getPressureDiff(const SUnit *SU) const {
498 return SUPressureDiffs[SU->NodeNum];
499 }
500
501 /// Compute a DFSResult after DAG building is complete, and before any
502 /// queue comparisons.
503 void computeDFSResult();
504
505 /// Return a non-null DFS result if the scheduling strategy initialized it.
506 const SchedDFSResult *getDFSResult() const { return DFSResult; }
507
509
510 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
511 /// region. This covers all instructions in a block, while schedule() may only
512 /// cover a subset.
513 void enterRegion(MachineBasicBlock *bb,
516 unsigned regioninstrs) override;
517
518 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
519 /// reorderable instructions.
520 void schedule() override;
521
522 /// Compute the cyclic critical path through the DAG.
523 unsigned computeCyclicCriticalPath();
524
525 void dump() const override;
526
527protected:
528 // Top-Level entry points for the schedule() driver...
529
530 /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
531 /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
532 /// region, TopTracker and BottomTracker will be initialized to the top and
533 /// bottom of the DAG region without covereing any unscheduled instruction.
534 void buildDAGWithRegPressure();
535
536 /// Release ExitSU predecessors and setup scheduler queues. Re-position
537 /// the Top RP tracker in case the region beginning has changed.
538 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
539
540 /// Move an instruction and update register pressure.
541 void scheduleMI(SUnit *SU, bool IsTopNode);
542
543 // Lesser helpers...
544
545 void initRegPressure();
546
547 void updatePressureDiffs(ArrayRef<VRegMaskOrUnit> LiveUses);
548
549 void updateScheduledPressure(const SUnit *SU,
550 const std::vector<unsigned> &NewMaxPressure);
551
552 void collectVRegUses(SUnit &SU);
553};
554
555//===----------------------------------------------------------------------===//
556///
557/// Helpers for implementing custom MachineSchedStrategy classes. These take
558/// care of the book-keeping associated with list scheduling heuristics.
559///
560//===----------------------------------------------------------------------===//
561
562/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
563/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
564/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
565///
566/// This is a convenience class that may be used by implementations of
567/// MachineSchedStrategy.
569 unsigned ID;
570 std::string Name;
571 std::vector<SUnit*> Queue;
572
573public:
574 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
575
576 unsigned getID() const { return ID; }
577
578 StringRef getName() const { return Name; }
579
580 // SU is in this queue if it's NodeQueueID is a superset of this ID.
581 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
582
583 bool empty() const { return Queue.empty(); }
584
585 void clear() { Queue.clear(); }
586
587 unsigned size() const { return Queue.size(); }
588
589 using iterator = std::vector<SUnit*>::iterator;
590
591 iterator begin() { return Queue.begin(); }
592
593 iterator end() { return Queue.end(); }
594
595 ArrayRef<SUnit*> elements() { return Queue; }
596
597 iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
598
599 void push(SUnit *SU) {
600 Queue.push_back(SU);
601 SU->NodeQueueId |= ID;
602 }
603
605 (*I)->NodeQueueId &= ~ID;
606 *I = Queue.back();
607 unsigned idx = I - Queue.begin();
608 Queue.pop_back();
609 return Queue.begin() + idx;
610 }
611
612 LLVM_ABI void dump() const;
613};
614
615/// Summarize the unscheduled region.
617 // Critical path through the DAG in expected latency.
618 unsigned CriticalPath;
620
621 // Scaled count of micro-ops left to schedule.
623
625
626 // Unscheduled resources
628
630
631 void reset() {
632 CriticalPath = 0;
633 CyclicCritPath = 0;
634 RemIssueCount = 0;
636 RemainingCounts.clear();
637 }
638
639 LLVM_ABI void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
640};
641
642/// ResourceSegments are a collection of intervals closed on the
643/// left and opened on the right:
644///
645/// list{ [a1, b1), [a2, b2), ..., [a_N, b_N) }
646///
647/// The collection has the following properties:
648///
649/// 1. The list is ordered: a_i < b_i and b_i < a_(i+1)
650///
651/// 2. The intervals in the collection do not intersect each other.
652///
653/// A \ref ResourceSegments instance represents the cycle
654/// reservation history of the instance of and individual resource.
656public:
657 /// Represents an interval of discrete integer values closed on
658 /// the left and open on the right: [a, b).
659 typedef std::pair<int64_t, int64_t> IntervalTy;
660
661 /// Adds an interval [a, b) to the collection of the instance.
662 ///
663 /// When adding [a, b[ to the collection, the operation merges the
664 /// adjacent intervals. For example
665 ///
666 /// 0 1 2 3 4 5 6 7 8 9 10
667 /// [-----) [--) [--)
668 /// + [--)
669 /// = [-----------) [--)
670 ///
671 /// To be able to debug duplicate resource usage, the function has
672 /// assertion that checks that no interval should be added if it
673 /// overlaps any of the intervals in the collection. We can
674 /// require this because by definition a \ref ResourceSegments is
675 /// attached only to an individual resource instance.
676 LLVM_ABI void add(IntervalTy A, const unsigned CutOff = 10);
677
678public:
679 /// Checks whether intervals intersect.
681
682 /// These function return the interval used by a resource in bottom and top
683 /// scheduling.
684 ///
685 /// Consider an instruction that uses resources X0, X1 and X2 as follows:
686 ///
687 /// X0 X1 X1 X2 +--------+-------------+--------------+
688 /// |Resource|AcquireAtCycle|ReleaseAtCycle|
689 /// +--------+-------------+--------------+
690 /// | X0 | 0 | 1 |
691 /// +--------+-------------+--------------+
692 /// | X1 | 1 | 3 |
693 /// +--------+-------------+--------------+
694 /// | X2 | 3 | 4 |
695 /// +--------+-------------+--------------+
696 ///
697 /// If we can schedule the instruction at cycle C, we need to
698 /// compute the interval of the resource as follows:
699 ///
700 /// # TOP DOWN SCHEDULING
701 ///
702 /// Cycles scheduling flows to the _right_, in the same direction
703 /// of time.
704 ///
705 /// C 1 2 3 4 5 ...
706 /// ------|------|------|------|------|------|----->
707 /// X0 X1 X1 X2 ---> direction of time
708 /// X0 [C, C+1)
709 /// X1 [C+1, C+3)
710 /// X2 [C+3, C+4)
711 ///
712 /// Therefore, the formula to compute the interval for a resource
713 /// of an instruction that can be scheduled at cycle C in top-down
714 /// scheduling is:
715 ///
716 /// [C+AcquireAtCycle, C+ReleaseAtCycle)
717 ///
718 ///
719 /// # BOTTOM UP SCHEDULING
720 ///
721 /// Cycles scheduling flows to the _left_, in opposite direction
722 /// of time.
723 ///
724 /// In bottom up scheduling, the scheduling happens in opposite
725 /// direction to the execution of the cycles of the
726 /// instruction. When the instruction is scheduled at cycle `C`,
727 /// the resources are allocated in the past relative to `C`:
728 ///
729 /// 2 1 C -1 -2 -3 -4 -5 ...
730 /// <-----|------|------|------|------|------|------|------|---
731 /// X0 X1 X1 X2 ---> direction of time
732 /// X0 (C+1, C]
733 /// X1 (C, C-2]
734 /// X2 (C-2, C-3]
735 ///
736 /// Therefore, the formula to compute the interval for a resource
737 /// of an instruction that can be scheduled at cycle C in bottom-up
738 /// scheduling is:
739 ///
740 /// [C-ReleaseAtCycle+1, C-AcquireAtCycle+1)
741 ///
742 ///
743 /// NOTE: In both cases, the number of cycles booked by a
744 /// resources is the value (ReleaseAtCycle - AcquireAtCycle).
745 static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle,
746 unsigned ReleaseAtCycle) {
747 return std::make_pair<long, long>((long)C - (long)ReleaseAtCycle + 1L,
748 (long)C - (long)AcquireAtCycle + 1L);
749 }
750 static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle,
751 unsigned ReleaseAtCycle) {
752 return std::make_pair<long, long>((long)C + (long)AcquireAtCycle,
753 (long)C + (long)ReleaseAtCycle);
754 }
755
756private:
757 /// Finds the first cycle in which a resource can be allocated.
758 ///
759 /// The function uses the \param IntervalBuider [*] to build a
760 /// resource interval [a, b[ out of the input parameters \param
761 /// CurrCycle, \param AcquireAtCycle and \param ReleaseAtCycle.
762 ///
763 /// The function then loops through the intervals in the ResourceSegments
764 /// and shifts the interval [a, b[ and the ReturnCycle to the
765 /// right until there is no intersection between the intervals of
766 /// the \ref ResourceSegments instance and the new shifted [a, b[. When
767 /// this condition is met, the ReturnCycle (which
768 /// correspond to the cycle in which the resource can be
769 /// allocated) is returned.
770 ///
771 /// c = CurrCycle in input
772 /// c 1 2 3 4 5 6 7 8 9 10 ... ---> (time
773 /// flow)
774 /// ResourceSegments... [---) [-------) [-----------)
775 /// c [1 3[ -> AcquireAtCycle=1, ReleaseAtCycle=3
776 /// ++c [1 3)
777 /// ++c [1 3)
778 /// ++c [1 3)
779 /// ++c [1 3)
780 /// ++c [1 3) ---> returns c
781 /// incremented by 5 (c+5)
782 ///
783 ///
784 /// Notice that for bottom-up scheduling the diagram is slightly
785 /// different because the current cycle c is always on the right
786 /// of the interval [a, b) (see \ref
787 /// `getResourceIntervalBottom`). This is because the cycle
788 /// increments for bottom-up scheduling moved in the direction
789 /// opposite to the direction of time:
790 ///
791 /// --------> direction of time.
792 /// XXYZZZ (resource usage)
793 /// --------> direction of top-down execution cycles.
794 /// <-------- direction of bottom-up execution cycles.
795 ///
796 /// Even though bottom-up scheduling moves against the flow of
797 /// time, the algorithm used to find the first free slot in between
798 /// intervals is the same as for top-down scheduling.
799 ///
800 /// [*] See \ref `getResourceIntervalTop` and
801 /// \ref `getResourceIntervalBottom` to see how such resource intervals
802 /// are built.
803 LLVM_ABI unsigned getFirstAvailableAt(
804 unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle,
805 std::function<IntervalTy(unsigned, unsigned, unsigned)> IntervalBuilder)
806 const;
807
808public:
809 /// getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop
810 /// should be merged in a single function in which a function that
811 /// creates the `NewInterval` is passed as a parameter.
812 unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle,
813 unsigned AcquireAtCycle,
814 unsigned ReleaseAtCycle) const {
815 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
817 }
818 unsigned getFirstAvailableAtFromTop(unsigned CurrCycle,
819 unsigned AcquireAtCycle,
820 unsigned ReleaseAtCycle) const {
821 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
823 }
824
825private:
826 std::list<IntervalTy> _Intervals;
827 /// Merge all adjacent intervals in the collection. For all pairs
828 /// of adjacient intervals, it performs [a, b) + [b, c) -> [a, c).
829 ///
830 /// Before performing the merge operation, the intervals are
831 /// sorted with \ref sort_predicate.
832 LLVM_ABI void sortAndMerge();
833
834public:
835 // constructor for empty set
836 explicit ResourceSegments() = default;
837 bool empty() const { return _Intervals.empty(); }
838 explicit ResourceSegments(const std::list<IntervalTy> &Intervals)
839 : _Intervals(Intervals) {
840 sortAndMerge();
841 }
842
843 friend bool operator==(const ResourceSegments &c1,
844 const ResourceSegments &c2) {
845 return c1._Intervals == c2._Intervals;
846 }
848 const ResourceSegments &Segments) {
849 os << "{ ";
850 for (auto p : Segments._Intervals)
851 os << "[" << p.first << ", " << p.second << "), ";
852 os << "}\n";
853 return os;
854 }
855};
856
857/// Each Scheduling boundary is associated with ready queues. It tracks the
858/// current cycle in the direction of movement, and maintains the state
859/// of "hazards" and other interlocks at the current cycle.
861public:
862 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
863 enum {
867 };
868
869 ScheduleDAGMI *DAG = nullptr;
870 const TargetSchedModel *SchedModel = nullptr;
871 SchedRemainder *Rem = nullptr;
872
875
877
878private:
879 /// True if the pending Q should be checked/updated before scheduling another
880 /// instruction.
881 bool CheckPending;
882
883 /// Number of cycles it takes to issue the instructions scheduled in this
884 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
885 /// See getStalls().
886 unsigned CurrCycle;
887
888 /// Micro-ops issued in the current cycle
889 unsigned CurrMOps;
890
891 /// MinReadyCycle - Cycle of the soonest available instruction.
892 unsigned MinReadyCycle;
893
894 // The expected latency of the critical path in this scheduled zone.
895 unsigned ExpectedLatency;
896
897 // The latency of dependence chains leading into this zone.
898 // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
899 // For each cycle scheduled: DLat -= 1.
900 unsigned DependentLatency;
901
902 /// Count the scheduled (issued) micro-ops that can be retired by
903 /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
904 unsigned RetiredMOps;
905
906 // Count scheduled resources that have been executed. Resources are
907 // considered executed if they become ready in the time that it takes to
908 // saturate any resource including the one in question. Counts are scaled
909 // for direct comparison with other resources. Counts can be compared with
910 // MOps * getMicroOpFactor and Latency * getLatencyFactor.
911 SmallVector<unsigned, 16> ExecutedResCounts;
912
913 /// Cache the max count for a single resource.
914 unsigned MaxExecutedResCount;
915
916 // Cache the critical resources ID in this scheduled zone.
917 unsigned ZoneCritResIdx;
918
919 // Is the scheduled region resource limited vs. latency limited.
920 bool IsResourceLimited;
921
922public:
923private:
924 /// Record how resources have been allocated across the cycles of
925 /// the execution.
926 std::map<unsigned, ResourceSegments> ReservedResourceSegments;
927 std::vector<unsigned> ReservedCycles;
928 /// For each PIdx, stores first index into ReservedResourceSegments that
929 /// corresponds to it.
930 ///
931 /// For example, consider the following 3 resources (ResourceCount =
932 /// 3):
933 ///
934 /// +------------+--------+
935 /// |ResourceName|NumUnits|
936 /// +------------+--------+
937 /// | X | 2 |
938 /// +------------+--------+
939 /// | Y | 3 |
940 /// +------------+--------+
941 /// | Z | 1 |
942 /// +------------+--------+
943 ///
944 /// In this case, the total number of resource instances is 6. The
945 /// vector \ref ReservedResourceSegments will have a slot for each instance.
946 /// The vector \ref ReservedCyclesIndex will track at what index the first
947 /// instance of the resource is found in the vector of \ref
948 /// ReservedResourceSegments:
949 ///
950 /// Indexes of instances in
951 /// ReservedResourceSegments
952 ///
953 /// 0 1 2 3 4 5
954 /// ReservedCyclesIndex[0] = 0; [X0, X1,
955 /// ReservedCyclesIndex[1] = 2; Y0, Y1, Y2
956 /// ReservedCyclesIndex[2] = 5; Z
957 SmallVector<unsigned, 16> ReservedCyclesIndex;
958
959 // For each PIdx, stores the resource group IDs of its subunits
960 SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
961
962#if LLVM_ENABLE_ABI_BREAKING_CHECKS
963 // Remember the greatest possible stall as an upper bound on the number of
964 // times we should retry the pending queue because of a hazard.
965 unsigned MaxObservedStall;
966#endif
967
968public:
969 /// Pending queues extend the ready queues with the same ID and the
970 /// PendingFlag set.
971 SchedBoundary(unsigned ID, const Twine &Name):
972 Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
973 reset();
974 }
975 SchedBoundary &operator=(const SchedBoundary &other) = delete;
976 SchedBoundary(const SchedBoundary &other) = delete;
978
979 LLVM_ABI void reset();
980
981 LLVM_ABI void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
982 SchedRemainder *rem);
983
984 bool isTop() const {
985 return Available.getID() == TopQID;
986 }
987
988 /// Number of cycles to issue the instructions scheduled in this zone.
989 unsigned getCurrCycle() const { return CurrCycle; }
990
991 /// Micro-ops issued in the current cycle
992 unsigned getCurrMOps() const { return CurrMOps; }
993
994 // The latency of dependence chains leading into this zone.
995 unsigned getDependentLatency() const { return DependentLatency; }
996
997 /// Get the number of latency cycles "covered" by the scheduled
998 /// instructions. This is the larger of the critical path within the zone
999 /// and the number of cycles required to issue the instructions.
1000 unsigned getScheduledLatency() const {
1001 return std::max(ExpectedLatency, CurrCycle);
1002 }
1003
1004 unsigned getUnscheduledLatency(SUnit *SU) const {
1005 return isTop() ? SU->getHeight() : SU->getDepth();
1006 }
1007
1008 unsigned getResourceCount(unsigned ResIdx) const {
1009 return ExecutedResCounts[ResIdx];
1010 }
1011
1012 /// Get the scaled count of scheduled micro-ops and resources, including
1013 /// executed resources.
1014 unsigned getCriticalCount() const {
1015 if (!ZoneCritResIdx)
1016 return RetiredMOps * SchedModel->getMicroOpFactor();
1017 return getResourceCount(ZoneCritResIdx);
1018 }
1019
1020 /// Get a scaled count for the minimum execution time of the scheduled
1021 /// micro-ops that are ready to execute by getExecutedCount. Notice the
1022 /// feedback loop.
1023 unsigned getExecutedCount() const {
1024 return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1025 MaxExecutedResCount);
1026 }
1027
1028 unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
1029
1030 // Is the scheduled region resource limited vs. latency limited.
1031 bool isResourceLimited() const { return IsResourceLimited; }
1032
1033 /// Get the difference between the given SUnit's ready time and the current
1034 /// cycle.
1035 LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU);
1036
1037 LLVM_ABI unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
1038 unsigned ReleaseAtCycle,
1039 unsigned AcquireAtCycle);
1040
1041 LLVM_ABI std::pair<unsigned, unsigned>
1042 getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx,
1043 unsigned ReleaseAtCycle, unsigned AcquireAtCycle);
1044
1045 bool isReservedGroup(unsigned PIdx) const {
1046 return SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin &&
1047 !SchedModel->getProcResource(PIdx)->BufferSize;
1048 }
1049
1050 LLVM_ABI bool checkHazard(SUnit *SU);
1051
1052 LLVM_ABI unsigned findMaxLatency(ArrayRef<SUnit *> ReadySUs);
1053
1054 LLVM_ABI unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1055
1056 /// Release SU to make it ready. If it's not in hazard, remove it from
1057 /// pending queue (if already in) and push into available queue.
1058 /// Otherwise, push the SU into pending queue.
1059 ///
1060 /// @param SU The unit to be released.
1061 /// @param ReadyCycle Until which cycle the unit is ready.
1062 /// @param InPQueue Whether SU is already in pending queue.
1063 /// @param Idx Position offset in pending queue (if in it).
1064 LLVM_ABI void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
1065 unsigned Idx = 0);
1066
1067 LLVM_ABI void bumpCycle(unsigned NextCycle);
1068
1069 LLVM_ABI void incExecutedResources(unsigned PIdx, unsigned Count);
1070
1071 LLVM_ABI unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
1072 unsigned Cycles, unsigned ReadyCycle,
1073 unsigned StartAtCycle);
1074
1075 LLVM_ABI void bumpNode(SUnit *SU);
1076
1077 LLVM_ABI void releasePending();
1078
1079 LLVM_ABI void removeReady(SUnit *SU);
1080
1081 /// Call this before applying any other heuristics to the Available queue.
1082 /// Updates the Available/Pending Q's if necessary and returns the single
1083 /// available instruction, or NULL if there are multiple candidates.
1085
1086 /// Dump the state of the information that tracks resource usage.
1087 LLVM_ABI void dumpReservedCycles() const;
1088 LLVM_ABI void dumpScheduledState() const;
1089};
1090
1091/// Base class for GenericScheduler. This class maintains information about
1092/// scheduling candidates based on TargetSchedModel making it easy to implement
1093/// heuristics for either preRA or postRA scheduling.
1095public:
1096 /// Represent the type of SchedCandidate found within a single queue.
1097 /// pickNodeBidirectional depends on these listed by decreasing priority.
1117
1118#ifndef NDEBUG
1119 static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
1120#endif
1121
1122 /// Policy for scheduling the next instruction in the candidate's zone.
1123 struct CandPolicy {
1124 bool ReduceLatency = false;
1125 unsigned ReduceResIdx = 0;
1126 unsigned DemandResIdx = 0;
1127
1128 CandPolicy() = default;
1129
1130 bool operator==(const CandPolicy &RHS) const {
1131 return ReduceLatency == RHS.ReduceLatency &&
1132 ReduceResIdx == RHS.ReduceResIdx &&
1133 DemandResIdx == RHS.DemandResIdx;
1134 }
1135 bool operator!=(const CandPolicy &RHS) const {
1136 return !(*this == RHS);
1137 }
1138 };
1139
1140 /// Status of an instruction's critical resource consumption.
1142 // Count critical resources in the scheduled region required by SU.
1143 unsigned CritResources = 0;
1144
1145 // Count critical resources from another region consumed by SU.
1146 unsigned DemandedResources = 0;
1147
1149
1150 bool operator==(const SchedResourceDelta &RHS) const {
1151 return CritResources == RHS.CritResources
1152 && DemandedResources == RHS.DemandedResources;
1153 }
1154 bool operator!=(const SchedResourceDelta &RHS) const {
1155 return !operator==(RHS);
1156 }
1157 };
1158
1159 /// Store the state used by GenericScheduler heuristics, required for the
1160 /// lifetime of one invocation of pickNode().
1163
1164 // The best SUnit candidate.
1166
1167 // The reason for this candidate.
1169
1170 // Whether this candidate should be scheduled at top/bottom.
1171 bool AtTop;
1172
1173 // Register pressure values for the best candidate.
1175
1176 // Critical resource consumption of the best candidate.
1178
1181
1182 void reset(const CandPolicy &NewPolicy) {
1183 Policy = NewPolicy;
1184 SU = nullptr;
1185 Reason = NoCand;
1186 AtTop = false;
1189 }
1190
1191 bool isValid() const { return SU; }
1192
1193 // Copy the status of another candidate without changing policy.
1195 assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1196 SU = Best.SU;
1197 Reason = Best.Reason;
1198 AtTop = Best.AtTop;
1199 RPDelta = Best.RPDelta;
1200 ResDelta = Best.ResDelta;
1201 }
1202
1205 };
1206
1207protected:
1210 const TargetRegisterInfo *TRI = nullptr;
1211 unsigned TopIdx = 0;
1212 unsigned BotIdx = 0;
1213 unsigned NumRegionInstrs = 0;
1214
1216
1218
1220
1221 LLVM_ABI void setPolicy(CandPolicy &Policy, bool IsPostRA,
1222 SchedBoundary &CurrZone, SchedBoundary *OtherZone);
1223
1224 MachineSchedPolicy getPolicy() const override { return RegionPolicy; }
1225
1226#ifndef NDEBUG
1227 void traceCandidate(const SchedCandidate &Cand);
1228#endif
1229
1230private:
1231 bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
1232 bool ComputeRemLatency, unsigned &RemLatency) const;
1233};
1234
1235// Utility functions used by heuristics in tryCandidate().
1236LLVM_ABI bool tryLess(int TryVal, int CandVal,
1237 GenericSchedulerBase::SchedCandidate &TryCand,
1238 GenericSchedulerBase::SchedCandidate &Cand,
1240LLVM_ABI bool tryGreater(int TryVal, int CandVal,
1241 GenericSchedulerBase::SchedCandidate &TryCand,
1242 GenericSchedulerBase::SchedCandidate &Cand,
1244LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
1245 GenericSchedulerBase::SchedCandidate &Cand,
1246 SchedBoundary &Zone);
1247LLVM_ABI bool tryPressure(const PressureChange &TryP,
1248 const PressureChange &CandP,
1249 GenericSchedulerBase::SchedCandidate &TryCand,
1250 GenericSchedulerBase::SchedCandidate &Cand,
1252 const TargetRegisterInfo *TRI,
1253 const MachineFunction &MF);
1254LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop);
1255LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop);
1256
1257/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
1258/// the schedule.
1260public:
1262 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1263 Bot(SchedBoundary::BotQID, "BotQ") {}
1264
1265 void initPolicy(MachineBasicBlock::iterator Begin,
1267 unsigned NumRegionInstrs) override;
1268
1269 void dumpPolicy() const override;
1270
1271 bool shouldTrackPressure() const override {
1272 return RegionPolicy.ShouldTrackPressure;
1273 }
1274
1275 bool shouldTrackLaneMasks() const override {
1276 return RegionPolicy.ShouldTrackLaneMasks;
1277 }
1278
1279 void initialize(ScheduleDAGMI *dag) override;
1280
1281 SUnit *pickNode(bool &IsTopNode) override;
1282
1283 void schedNode(SUnit *SU, bool IsTopNode) override;
1284
1285 void releaseTopNode(SUnit *SU) override {
1286 if (SU->isScheduled)
1287 return;
1288
1289 Top.releaseNode(SU, SU->TopReadyCycle, false);
1290 TopCand.SU = nullptr;
1291 }
1292
1293 void releaseBottomNode(SUnit *SU) override {
1294 if (SU->isScheduled)
1295 return;
1296
1297 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1298 BotCand.SU = nullptr;
1299 }
1300
1301 void registerRoots() override;
1302
1303protected:
1305
1306 // State of the top and bottom scheduled instruction boundaries.
1309
1312
1313 /// Candidate last picked from Top boundary.
1315 /// Candidate last picked from Bot boundary.
1317
1318 void checkAcyclicLatency();
1319
1320 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
1321 const RegPressureTracker &RPTracker,
1322 RegPressureTracker &TempTracker);
1323
1324 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
1325 SchedBoundary *Zone) const;
1326
1327 SUnit *pickNodeBidirectional(bool &IsTopNode);
1328
1330 const CandPolicy &ZonePolicy,
1331 const RegPressureTracker &RPTracker,
1332 SchedCandidate &Candidate);
1333
1334 void reschedulePhysReg(SUnit *SU, bool isTop);
1335};
1336
1337/// PostGenericScheduler - Interface to the scheduling algorithm used by
1338/// ScheduleDAGMI.
1339///
1340/// Callbacks from ScheduleDAGMI:
1341/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1343protected:
1344 ScheduleDAGMI *DAG = nullptr;
1347
1348 /// Candidate last picked from Top boundary.
1350 /// Candidate last picked from Bot boundary.
1352
1355
1356public:
1358 : GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1359 Bot(SchedBoundary::BotQID, "BotQ") {}
1360
1361 ~PostGenericScheduler() override = default;
1362
1365 unsigned NumRegionInstrs) override;
1366
1367 /// PostRA scheduling does not track pressure.
1368 bool shouldTrackPressure() const override { return false; }
1369
1370 void initialize(ScheduleDAGMI *Dag) override;
1371
1372 void registerRoots() override;
1373
1374 SUnit *pickNode(bool &IsTopNode) override;
1375
1376 SUnit *pickNodeBidirectional(bool &IsTopNode);
1377
1378 void scheduleTree(unsigned SubtreeID) override {
1379 llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1380 }
1381
1382 void schedNode(SUnit *SU, bool IsTopNode) override;
1383
1384 void releaseTopNode(SUnit *SU) override {
1385 if (SU->isScheduled)
1386 return;
1387 Top.releaseNode(SU, SU->TopReadyCycle, false);
1388 TopCand.SU = nullptr;
1389 }
1390
1391 void releaseBottomNode(SUnit *SU) override {
1392 if (SU->isScheduled)
1393 return;
1394 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1395 BotCand.SU = nullptr;
1396 }
1397
1398protected:
1399 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1400
1401 void pickNodeFromQueue(SchedBoundary &Zone, SchedCandidate &Cand);
1402};
1403
1404/// If ReorderWhileClustering is set to true, no attempt will be made to
1405/// reduce reordering due to store clustering.
1406LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1407createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1408 const TargetRegisterInfo *TRI,
1409 bool ReorderWhileClustering = false);
1410
1411/// If ReorderWhileClustering is set to true, no attempt will be made to
1412/// reduce reordering due to store clustering.
1413LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1414createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1415 const TargetRegisterInfo *TRI,
1416 bool ReorderWhileClustering = false);
1417
1418LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1419createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1420 const TargetRegisterInfo *TRI);
1421
1422/// Create the standard converging machine scheduler. This will be used as the
1423/// default scheduler if the target does not set a default.
1424/// Adds default DAG mutations.
1425template <typename Strategy = GenericScheduler>
1427 ScheduleDAGMILive *DAG =
1428 new ScheduleDAGMILive(C, std::make_unique<Strategy>(C));
1429 // Register DAG post-processors.
1430 //
1431 // FIXME: extend the mutation API to allow earlier mutations to instantiate
1432 // data and pass it to later mutations. Have a single mutation that gathers
1433 // the interesting nodes in one pass.
1435
1436 const TargetSubtargetInfo &STI = C->MF->getSubtarget();
1437 // Add MacroFusion mutation if fusions are not empty.
1438 const auto &MacroFusions = STI.getMacroFusions();
1439 if (!MacroFusions.empty())
1440 DAG->addMutation(createMacroFusionDAGMutation(MacroFusions));
1441 return DAG;
1442}
1443
1444/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1445template <typename Strategy = PostGenericScheduler>
1447 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, std::make_unique<Strategy>(C),
1448 /*RemoveKillFlags=*/true);
1449 const TargetSubtargetInfo &STI = C->MF->getSubtarget();
1450 // Add MacroFusion mutation if fusions are not empty.
1451 const auto &MacroFusions = STI.getMacroFusions();
1452 if (!MacroFusions.empty())
1453 DAG->addMutation(createMacroFusionDAGMutation(MacroFusions));
1454 return DAG;
1455}
1456
1457class MachineSchedulerPass : public PassInfoMixin<MachineSchedulerPass> {
1458 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1459 // analysis.
1460 std::unique_ptr<impl_detail::MachineSchedulerImpl> Impl;
1461 const TargetMachine *TM;
1462
1463public:
1469};
1470
1472 : public PassInfoMixin<PostMachineSchedulerPass> {
1473 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1474 // analysis.
1475 std::unique_ptr<impl_detail::PostMachineSchedulerImpl> Impl;
1476 const TargetMachine *TM;
1477
1478public:
1484};
1485} // end namespace llvm
1486
1487#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
PowerPC VSX FMA Mutation
static const char * name
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, const llvm::StringTable &StandardNames, VectorLibrary VecLib)
Initialize the set of available library functions based on the specified target triple.
Value * RHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
void traceCandidate(const SchedCandidate &Cand)
LLVM_ABI void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
MachineSchedPolicy RegionPolicy
const TargetSchedModel * SchedModel
static const char * getReasonStr(GenericSchedulerBase::CandReason Reason)
MachineSchedPolicy getPolicy() const override
GenericSchedulerBase(const MachineSchedContext *C)
const MachineSchedContext * Context
CandReason
Represent the type of SchedCandidate found within a single queue.
const TargetRegisterInfo * TRI
void checkAcyclicLatency()
Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic critical path by more cycle...
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Apply a set of heuristics to a new candidate.
ScheduleDAGMILive * DAG
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, RegPressureTracker &TempTracker)
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
void reschedulePhysReg(SUnit *SU, bool isTop)
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Candidate)
Pick the best candidate from the queue.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
GenericScheduler(const MachineSchedContext *C)
SUnit * pickNodeBidirectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
MachineInstrBundleIterator< MachineInstr > iterator
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Representation of each machine instruction.
MachinePassRegistryListener - Listener to adds and removals of nodes in registration list.
MachinePassRegistryNode(const char *N, const char *D, ScheduleDAGInstrs *C)
MachinePassRegistryNode * getNext() const
MachinePassRegistry - Track the registration of machine passes.
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
static LLVM_ABI MachinePassRegistry< ScheduleDAGCtor > Registry
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
static MachineSchedRegistry * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
MachineSchedRegistry * getNext() const
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy.
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
virtual void schedNode(SUnit *SU, bool IsTopNode)=0
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
virtual ~MachineSchedStrategy()=default
virtual void initialize(ScheduleDAGMI *DAG)=0
Initialize the strategy after building the DAG for a new region.
virtual MachineSchedPolicy getPolicy() const
virtual void releaseTopNode(SUnit *SU)=0
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
virtual void dumpPolicy() const
virtual bool doMBBSchedRegionsTopDown() const
virtual SUnit * pickNode(bool &IsTopNode)=0
Pick the next node to schedule, or return NULL.
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
virtual void releaseBottomNode(SUnit *SU)=0
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI MachineSchedulerPass(const TargetMachine *TM)
LLVM_ABI MachineSchedulerPass(MachineSchedulerPass &&Other)
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
~PostGenericScheduler() override=default
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
PostGenericScheduler(const MachineSchedContext *C)
LLVM_ABI PostMachineSchedulerPass(PostMachineSchedulerPass &&Other)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PostMachineSchedulerPass(const TargetMachine *TM)
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
List of PressureChanges in order of increasing, unique PSetID.
Array of PressureDiffs.
Helpers for implementing custom MachineSchedStrategy classes.
void push(SUnit *SU)
iterator find(SUnit *SU)
ArrayRef< SUnit * > elements()
LLVM_ABI void dump() const
ReadyQueue(unsigned id, const Twine &name)
bool isInQueue(SUnit *SU) const
std::vector< SUnit * >::iterator iterator
StringRef getName() const
unsigned size() const
iterator remove(iterator I)
unsigned getID() const
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void add(IntervalTy A, const unsigned CutOff=10)
Adds an interval [a, b) to the collection of the instance.
static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
These function return the interval used by a resource in bottom and top scheduling.
friend bool operator==(const ResourceSegments &c1, const ResourceSegments &c2)
static LLVM_ABI bool intersects(IntervalTy A, IntervalTy B)
Checks whether intervals intersect.
unsigned getFirstAvailableAtFromTop(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
friend llvm::raw_ostream & operator<<(llvm::raw_ostream &os, const ResourceSegments &Segments)
std::pair< int64_t, int64_t > IntervalTy
Represents an interval of discrete integer values closed on the left and open on the right: [a,...
static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
ResourceSegments(const std::list< IntervalTy > &Intervals)
unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop should be merged in a single function in...
Scheduling dependency.
Definition ScheduleDAG.h:51
Scheduling unit. This is a node in the scheduling DAG.
unsigned NodeQueueId
Queue id of node.
unsigned TopReadyCycle
Cycle relative to start when node is ready.
unsigned NodeNum
Entry # of node in the node vector.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
bool isScheduled
True once scheduled.
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Each Scheduling boundary is associated with ready queues.
LLVM_ABI unsigned getNextResourceCycleByInstance(unsigned InstanceIndex, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource unit can be scheduled.
LLVM_ABI void releasePending()
Release pending ready nodes in to the available queue.
unsigned getDependentLatency() const
bool isReservedGroup(unsigned PIdx) const
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
LLVM_ABI void incExecutedResources(unsigned PIdx, unsigned Count)
bool isResourceLimited() const
const TargetSchedModel * SchedModel
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
SchedBoundary(const SchedBoundary &other)=delete
LLVM_ABI unsigned findMaxLatency(ArrayRef< SUnit * > ReadySUs)
LLVM_ABI void dumpReservedCycles() const
Dump the state of the information that tracks resource usage.
LLVM_ABI unsigned getOtherResourceCount(unsigned &OtherCritIdx)
SchedRemainder * Rem
LLVM_ABI void bumpNode(SUnit *SU)
Move the boundary of scheduled code by one SUnit.
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
LLVM_ABI SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
LLVM_ABI void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, unsigned Idx=0)
Release SU to make it ready.
LLVM_ABI unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles, unsigned ReadyCycle, unsigned StartAtCycle)
Add the given processor resource to this scheduled zone.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
ScheduleHazardRecognizer * HazardRec
LLVM_ABI void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem)
SchedBoundary & operator=(const SchedBoundary &other)=delete
unsigned getResourceCount(unsigned ResIdx) const
LLVM_ABI void bumpCycle(unsigned NextCycle)
Move the boundary of scheduled code by one cycle.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
LLVM_ABI bool checkHazard(SUnit *SU)
Does this SU have a hazard within the current instruction group.
LLVM_ABI std::pair< unsigned, unsigned > getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource can be scheduled.
LLVM_ABI void dumpScheduledState() const
LLVM_ABI void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
unsigned getZoneCritResIdx() const
unsigned getUnscheduledLatency(SUnit *SU) const
Compute the values of each DAG node for various metrics during DFS.
Definition ScheduleDFS.h:65
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGInstrs(MachineFunction &mf, const MachineLoopInfo *mli, bool RemoveKillFlags=false)
const MachineLoopInfo * MLI
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
PressureDiff & getPressureDiff(const SUnit *SU)
SchedDFSResult * DFSResult
Information about DAG subtrees.
RegPressureTracker BotRPTracker
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target's pressure limit before scheduling, listed in increasing...
IntervalPressure TopPressure
The top of the unscheduled zone.
const RegPressureTracker & getBotRPTracker() const
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
IntervalPressure BotPressure
The bottom of the unscheduled zone.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
RegisterClassInfo * RegClassInfo
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
const PressureDiff & getPressureDiff(const SUnit *SU) const
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
bool ShouldTrackPressure
Register pressure in this region computed by initRegPressure.
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
MachineBasicBlock::iterator LiveRegionEnd
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
const std::vector< PressureChange > & getRegionCriticalPSets() const
IntervalPressure RegPressure
RegPressureTracker TopRPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::unique_ptr< MachineSchedStrategy > SchedImpl
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
MachineBasicBlock::iterator top() const
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
MachineBasicBlock::iterator bottom() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
LiveIntervals * getLIS() const
~ScheduleDAGMI() override
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
MachineBlockFrequencyInfo * MBFI
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual std::vector< MacroFusionPredTy > getMacroFusions() const
Get the list of MacroFusion predicates.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
Impl class for MachineScheduler.
Impl class for PostMachineScheduler.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
template class LLVM_TEMPLATE_ABI opt< bool >
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1763
LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop)
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createMacroFusionDAGMutation(ArrayRef< MacroFusionPredTy > Predicates, bool BranchOnly=false)
Create a DAG scheduling mutation to pair instructions back to back for instructions that benefit acco...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
SparseMultiSet< VReg2SUnit, Register, VirtReg2IndexFunctor > VReg2SUnitMultiMap
Track local uses of virtual registers.
ScheduleDAGMI * createSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
cl::opt< bool > ViewMISchedDAGs
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI cl::opt< bool > VerifyScheduling
LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
LLVM_ABI bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
LLVM_ABI cl::opt< MISched::Direction > PreRADirection
LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
cl::opt< bool > PrintDAGs
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define N
Policy for scheduling the next instruction in the candidate's zone.
bool operator==(const CandPolicy &RHS) const
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
void reset(const CandPolicy &NewPolicy)
LLVM_ABI void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
Status of an instruction's critical resource consumption.
bool operator!=(const SchedResourceDelta &RHS) const
bool operator==(const SchedResourceDelta &RHS) const
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx.
Summarize the scheduling resources required for an instruction of a particular scheduling class.
Definition MCSchedule.h:123
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
const MachineDominatorTree * MDT
RegisterClassInfo * RegClassInfo
MachineBlockFrequencyInfo * MBFI
const MachineLoopInfo * MLI
const TargetMachine * TM
MachineSchedContext & operator=(const MachineSchedContext &other)=delete
MachineSchedContext(const MachineSchedContext &other)=delete
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
Store the effects of a change in pressure on things that MI scheduler cares about.
MachineBasicBlock::iterator RegionBegin
RegionBegin is the first instruction in the scheduling region, and RegionEnd is either MBB->end() or ...
MachineBasicBlock::iterator RegionEnd
SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, unsigned N)
Summarize the unscheduled region.
LLVM_ABI void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SmallVector< unsigned, 16 > RemainingCounts