Line data Source code
1 : //===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : /// \file Implements the ScheduleDAG class, which is used as the common base
11 : /// class for instruction schedulers. This encapsulates the scheduling DAG,
12 : /// which is shared between SelectionDAG and MachineInstr scheduling.
13 : //
14 : //===----------------------------------------------------------------------===//
15 :
16 : #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 : #define LLVM_CODEGEN_SCHEDULEDAG_H
18 :
19 : #include "llvm/ADT/BitVector.h"
20 : #include "llvm/ADT/GraphTraits.h"
21 : #include "llvm/ADT/PointerIntPair.h"
22 : #include "llvm/ADT/SmallVector.h"
23 : #include "llvm/ADT/iterator.h"
24 : #include "llvm/CodeGen/MachineInstr.h"
25 : #include "llvm/CodeGen/TargetLowering.h"
26 : #include "llvm/Support/ErrorHandling.h"
27 : #include <cassert>
28 : #include <cstddef>
29 : #include <iterator>
30 : #include <string>
31 : #include <vector>
32 :
33 : namespace llvm {
34 :
35 : template<class Graph> class GraphWriter;
36 : class MachineFunction;
37 : class MachineRegisterInfo;
38 : class MCInstrDesc;
39 : struct MCSchedClassDesc;
40 : class ScheduleDAG;
41 : class SDNode;
42 : class SUnit;
43 : class TargetInstrInfo;
44 : class TargetMachine;
45 : class TargetRegisterClass;
46 : class TargetRegisterInfo;
47 :
48 : /// Scheduling dependency. This represents one direction of an edge in the
49 : /// scheduling DAG.
50 : class SDep {
51 : public:
52 : /// These are the different kinds of scheduling dependencies.
53 : enum Kind {
54 : Data, ///< Regular data dependence (aka true-dependence).
55 : Anti, ///< A register anti-dependence (aka WAR).
56 : Output, ///< A register output-dependence (aka WAW).
57 : Order ///< Any other ordering dependency.
58 : };
59 :
60 : // Strong dependencies must be respected by the scheduler. Artificial
61 : // dependencies may be removed only if they are redundant with another
62 : // strong dependence.
63 : //
64 : // Weak dependencies may be violated by the scheduling strategy, but only if
65 : // the strategy can prove it is correct to do so.
66 : //
67 : // Strong OrderKinds must occur before "Weak".
68 : // Weak OrderKinds must occur after "Weak".
69 : enum OrderKind {
70 : Barrier, ///< An unknown scheduling barrier.
71 : MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
72 : MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
73 : Artificial, ///< Arbitrary strong DAG edge (no real dependence).
74 : Weak, ///< Arbitrary weak DAG edge.
75 : Cluster ///< Weak DAG edge linking a chain of clustered instrs.
76 : };
77 :
78 : private:
79 : /// A pointer to the depending/depended-on SUnit, and an enum
80 : /// indicating the kind of the dependency.
81 : PointerIntPair<SUnit *, 2, Kind> Dep;
82 :
83 : /// A union discriminated by the dependence kind.
84 : union {
85 : /// For Data, Anti, and Output dependencies, the associated register. For
86 : /// Data dependencies that don't currently have a register/ assigned, this
87 : /// is set to zero.
88 : unsigned Reg;
89 :
90 : /// Additional information about Order dependencies.
91 : unsigned OrdKind; // enum OrderKind
92 : } Contents;
93 :
94 : /// The time associated with this edge. Often this is just the value of the
95 : /// Latency field of the predecessor, however advanced models may provide
96 : /// additional information about specific edges.
97 : unsigned Latency;
98 :
99 : public:
100 : /// Constructs a null SDep. This is only for use by container classes which
101 : /// require default constructors. SUnits may not/ have null SDep edges.
102 2420907 : SDep() : Dep(nullptr, Data) {}
103 :
104 : /// Constructs an SDep with the specified values.
105 : SDep(SUnit *S, Kind kind, unsigned Reg)
106 12770428 : : Dep(S, kind), Contents() {
107 : switch (kind) {
108 : default:
109 : llvm_unreachable("Reg given for non-register dependence!");
110 : case Anti:
111 : case Output:
112 : assert(Reg != 0 &&
113 : "SDep::Anti and SDep::Output must use a non-zero Reg!");
114 1974768 : Contents.Reg = Reg;
115 1973532 : Latency = 0;
116 : break;
117 : case Data:
118 10795388 : Contents.Reg = Reg;
119 1736002 : Latency = 1;
120 : break;
121 : }
122 : }
123 :
124 : SDep(SUnit *S, OrderKind kind)
125 14591142 : : Dep(S, Order), Contents(), Latency(0) {
126 11929898 : Contents.OrdKind = kind;
127 : }
128 :
129 : /// Returns true if the specified SDep is equivalent except for latency.
130 : bool overlaps(const SDep &Other) const;
131 :
132 12581 : bool operator==(const SDep &Other) const {
133 6608 : return overlaps(Other) && Latency == Other.Latency;
134 : }
135 :
136 : bool operator!=(const SDep &Other) const {
137 : return !operator==(Other);
138 : }
139 :
140 : /// Returns the latency value for this edge, which roughly means the
141 : /// minimum number of cycles that must elapse between the predecessor and
142 : /// the successor, given that they have this edge between them.
143 0 : unsigned getLatency() const {
144 0 : return Latency;
145 : }
146 :
147 : /// Sets the latency for this edge.
148 0 : void setLatency(unsigned Lat) {
149 28958865 : Latency = Lat;
150 0 : }
151 :
152 : //// Returns the SUnit to which this edge points.
153 : SUnit *getSUnit() const;
154 :
155 : //// Assigns the SUnit to which this edge points.
156 : void setSUnit(SUnit *SU);
157 :
158 : /// Returns an enum value representing the kind of the dependence.
159 : Kind getKind() const;
160 :
161 : /// Shorthand for getKind() != SDep::Data.
162 : bool isCtrl() const {
163 : return getKind() != Data;
164 : }
165 :
166 : /// Tests if this is an Order dependence between two memory accesses
167 : /// where both sides of the dependence access memory in non-volatile and
168 : /// fully modeled ways.
169 : bool isNormalMemory() const {
170 0 : return getKind() == Order && (Contents.OrdKind == MayAliasMem
171 0 : || Contents.OrdKind == MustAliasMem);
172 : }
173 :
174 : /// Tests if this is an Order dependence that is marked as a barrier.
175 : bool isBarrier() const {
176 0 : return getKind() == Order && Contents.OrdKind == Barrier;
177 : }
178 :
179 : /// Tests if this is could be any kind of memory dependence.
180 : bool isNormalMemoryOrBarrier() const {
181 : return (isNormalMemory() || isBarrier());
182 : }
183 :
184 : /// Tests if this is an Order dependence that is marked as
185 : /// "must alias", meaning that the SUnits at either end of the edge have a
186 : /// memory dependence on a known memory location.
187 : bool isMustAlias() const {
188 : return getKind() == Order && Contents.OrdKind == MustAliasMem;
189 : }
190 :
191 : /// Tests if this a weak dependence. Weak dependencies are considered DAG
192 : /// edges for height computation and other heuristics, but do not force
193 : /// ordering. Breaking a weak edge may require the scheduler to compensate,
194 : /// for example by inserting a copy.
195 : bool isWeak() const {
196 58886705 : return getKind() == Order && Contents.OrdKind >= Weak;
197 : }
198 :
199 : /// Tests if this is an Order dependence that is marked as
200 : /// "artificial", meaning it isn't necessary for correctness.
201 : bool isArtificial() const {
202 258003 : return getKind() == Order && Contents.OrdKind == Artificial;
203 : }
204 :
205 : /// Tests if this is an Order dependence that is marked as "cluster",
206 : /// meaning it is artificial and wants to be adjacent.
207 : bool isCluster() const {
208 161053 : return getKind() == Order && Contents.OrdKind == Cluster;
209 : }
210 :
211 : /// Tests if this is a Data dependence that is associated with a register.
212 : bool isAssignedRegDep() const {
213 41879702 : return getKind() == Data && Contents.Reg != 0;
214 : }
215 :
216 : /// Returns the register associated with this edge. This is only valid on
217 : /// Data, Anti, and Output edges. On Data edges, this value may be zero,
218 : /// meaning there is no associated register.
219 0 : unsigned getReg() const {
220 : assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
221 : "getReg called on non-register dependence edge!");
222 0 : return Contents.Reg;
223 : }
224 :
225 : /// Assigns the associated register for this edge. This is only valid on
226 : /// Data, Anti, and Output edges. On Anti and Output edges, this value must
227 : /// not be zero. On Data edges, the value may be zero, which would mean that
228 : /// no specific register is associated with this edge.
229 : void setReg(unsigned Reg) {
230 : assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
231 : "setReg called on non-register dependence edge!");
232 : assert((getKind() != Anti || Reg != 0) &&
233 : "SDep::Anti edge cannot use the zero register!");
234 : assert((getKind() != Output || Reg != 0) &&
235 : "SDep::Output edge cannot use the zero register!");
236 : Contents.Reg = Reg;
237 : }
238 :
239 : void dump(const TargetRegisterInfo *TRI = nullptr) const;
240 : };
241 :
242 : template <>
243 : struct isPodLike<SDep> { static const bool value = true; };
244 :
245 : /// Scheduling unit. This is a node in the scheduling DAG.
246 : class SUnit {
247 : private:
248 : enum : unsigned { BoundaryID = ~0u };
249 :
250 : SDNode *Node = nullptr; ///< Representative node.
251 : MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
252 :
253 : public:
254 : SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
255 : /// was cloned. (SD scheduling only)
256 :
257 : const MCSchedClassDesc *SchedClass =
258 : nullptr; ///< nullptr or resolved SchedClass.
259 :
260 : SmallVector<SDep, 4> Preds; ///< All sunit predecessors.
261 : SmallVector<SDep, 4> Succs; ///< All sunit successors.
262 :
263 : typedef SmallVectorImpl<SDep>::iterator pred_iterator;
264 : typedef SmallVectorImpl<SDep>::iterator succ_iterator;
265 : typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
266 : typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
267 :
268 : unsigned NodeNum = BoundaryID; ///< Entry # of node in the node vector.
269 : unsigned NodeQueueId = 0; ///< Queue id of node.
270 : unsigned NumPreds = 0; ///< # of SDep::Data preds.
271 : unsigned NumSuccs = 0; ///< # of SDep::Data sucss.
272 : unsigned NumPredsLeft = 0; ///< # of preds not scheduled.
273 : unsigned NumSuccsLeft = 0; ///< # of succs not scheduled.
274 : unsigned WeakPredsLeft = 0; ///< # of weak preds not scheduled.
275 : unsigned WeakSuccsLeft = 0; ///< # of weak succs not scheduled.
276 : unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
277 : unsigned short Latency = 0; ///< Node latency.
278 : bool isVRegCycle : 1; ///< May use and def the same vreg.
279 : bool isCall : 1; ///< Is a function call.
280 : bool isCallOp : 1; ///< Is a function call operand.
281 : bool isTwoAddress : 1; ///< Is a two-address instruction.
282 : bool isCommutable : 1; ///< Is a commutable instruction.
283 : bool hasPhysRegUses : 1; ///< Has physreg uses.
284 : bool hasPhysRegDefs : 1; ///< Has physreg defs that are being used.
285 : bool hasPhysRegClobbers : 1; ///< Has any physreg defs, used or not.
286 : bool isPending : 1; ///< True once pending.
287 : bool isAvailable : 1; ///< True once available.
288 : bool isScheduled : 1; ///< True once scheduled.
289 : bool isScheduleHigh : 1; ///< True if preferable to schedule high.
290 : bool isScheduleLow : 1; ///< True if preferable to schedule low.
291 : bool isCloned : 1; ///< True if this node has been cloned.
292 : bool isUnbuffered : 1; ///< Uses an unbuffered resource.
293 : bool hasReservedResource : 1; ///< Uses a reserved resource.
294 : Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
295 :
296 : private:
297 : bool isDepthCurrent : 1; ///< True if Depth is current.
298 : bool isHeightCurrent : 1; ///< True if Height is current.
299 : unsigned Depth = 0; ///< Node depth.
300 : unsigned Height = 0; ///< Node height.
301 :
302 : public:
303 : unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
304 : unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
305 :
306 : const TargetRegisterClass *CopyDstRC =
307 : nullptr; ///< Is a special copy node if != nullptr.
308 : const TargetRegisterClass *CopySrcRC = nullptr;
309 :
310 : /// Constructs an SUnit for pre-regalloc scheduling to represent an
311 : /// SDNode and any nodes flagged to it.
312 16310482 : SUnit(SDNode *node, unsigned nodenum)
313 16310482 : : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
314 : isCallOp(false), isTwoAddress(false), isCommutable(false),
315 : hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
316 : isPending(false), isAvailable(false), isScheduled(false),
317 : isScheduleHigh(false), isScheduleLow(false), isCloned(false),
318 : isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
319 32620964 : isHeightCurrent(false) {}
320 :
321 : /// Constructs an SUnit for post-regalloc scheduling to represent a
322 : /// MachineInstr.
323 3819338 : SUnit(MachineInstr *instr, unsigned nodenum)
324 3819338 : : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
325 : isCallOp(false), isTwoAddress(false), isCommutable(false),
326 : hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
327 : isPending(false), isAvailable(false), isScheduled(false),
328 : isScheduleHigh(false), isScheduleLow(false), isCloned(false),
329 : isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
330 7638676 : isHeightCurrent(false) {}
331 :
332 : /// Constructs a placeholder SUnit.
333 7354172 : SUnit()
334 7354172 : : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
335 : isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
336 : hasPhysRegClobbers(false), isPending(false), isAvailable(false),
337 : isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
338 : isCloned(false), isUnbuffered(false), hasReservedResource(false),
339 14708344 : isDepthCurrent(false), isHeightCurrent(false) {}
340 :
341 : /// Boundary nodes are placeholders for the boundary of the
342 : /// scheduling region.
343 : ///
344 : /// BoundaryNodes can have DAG edges, including Data edges, but they do not
345 : /// correspond to schedulable entities (e.g. instructions) and do not have a
346 : /// valid ID. Consequently, always check for boundary nodes before accessing
347 : /// an associative data structure keyed on node ID.
348 0 : bool isBoundaryNode() const { return NodeNum == BoundaryID; }
349 :
350 : /// Assigns the representative SDNode for this SUnit. This may be used
351 : /// during pre-regalloc scheduling.
352 0 : void setNode(SDNode *N) {
353 : assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
354 16309808 : Node = N;
355 0 : }
356 :
357 : /// Returns the representative SDNode for this SUnit. This may be used
358 : /// during pre-regalloc scheduling.
359 0 : SDNode *getNode() const {
360 : assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
361 0 : return Node;
362 : }
363 :
364 : /// Returns true if this SUnit refers to a machine instruction as
365 : /// opposed to an SDNode.
366 0 : bool isInstr() const { return Instr; }
367 :
368 : /// Assigns the instruction for the SUnit. This may be used during
369 : /// post-regalloc scheduling.
370 0 : void setInstr(MachineInstr *MI) {
371 : assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
372 895309 : Instr = MI;
373 0 : }
374 :
375 : /// Returns the representative MachineInstr for this SUnit. This may be used
376 : /// during post-regalloc scheduling.
377 0 : MachineInstr *getInstr() const {
378 : assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
379 0 : return Instr;
380 : }
381 :
382 : /// Adds the specified edge as a pred of the current node if not already.
383 : /// It also adds the current node as a successor of the specified node.
384 : bool addPred(const SDep &D, bool Required = true);
385 :
386 : /// Adds a barrier edge to SU by calling addPred(), with latency 0
387 : /// generally or latency 1 for a store followed by a load.
388 152301 : bool addPredBarrier(SUnit *SU) {
389 : SDep Dep(SU, SDep::Barrier);
390 : unsigned TrueMemOrderLatency =
391 152301 : ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
392 : Dep.setLatency(TrueMemOrderLatency);
393 152301 : return addPred(Dep);
394 : }
395 :
396 : /// Removes the specified edge as a pred of the current node if it exists.
397 : /// It also removes the current node as a successor of the specified node.
398 : void removePred(const SDep &D);
399 :
400 : /// Returns the depth of this node, which is the length of the maximum path
401 : /// up to any node which has no predecessors.
402 : unsigned getDepth() const {
403 24275627 : if (!isDepthCurrent)
404 4129312 : const_cast<SUnit *>(this)->ComputeDepth();
405 24052291 : return Depth;
406 : }
407 :
408 : /// Returns the height of this node, which is the length of the
409 : /// maximum path down to any node which has no successors.
410 : unsigned getHeight() const {
411 77518259 : if (!isHeightCurrent)
412 18885513 : const_cast<SUnit *>(this)->ComputeHeight();
413 63182512 : return Height;
414 : }
415 :
416 : /// If NewDepth is greater than this node's depth value, sets it to
417 : /// be the new depth value. This also recursively marks successor nodes
418 : /// dirty.
419 : void setDepthToAtLeast(unsigned NewDepth);
420 :
421 : /// If NewDepth is greater than this node's depth value, set it to be
422 : /// the new height value. This also recursively marks predecessor nodes
423 : /// dirty.
424 : void setHeightToAtLeast(unsigned NewHeight);
425 :
426 : /// Sets a flag in this node to indicate that its stored Depth value
427 : /// will require recomputation the next time getDepth() is called.
428 : void setDepthDirty();
429 :
430 : /// Sets a flag in this node to indicate that its stored Height value
431 : /// will require recomputation the next time getHeight() is called.
432 : void setHeightDirty();
433 :
434 : /// Tests if node N is a predecessor of this node.
435 : bool isPred(const SUnit *N) const {
436 20638 : for (const SDep &Pred : Preds)
437 17260 : if (Pred.getSUnit() == N)
438 : return true;
439 : return false;
440 : }
441 :
442 : /// Tests if node N is a successor of this node.
443 : bool isSucc(const SUnit *N) const {
444 344085 : for (const SDep &Succ : Succs)
445 263081 : if (Succ.getSUnit() == N)
446 : return true;
447 : return false;
448 : }
449 :
450 0 : bool isTopReady() const {
451 0 : return NumPredsLeft == 0;
452 : }
453 0 : bool isBottomReady() const {
454 0 : return NumSuccsLeft == 0;
455 : }
456 :
457 : /// Orders this node's predecessor edges such that the critical path
458 : /// edge occurs first.
459 : void biasCriticalPath();
460 :
461 : void dumpAttributes() const;
462 :
463 : private:
464 : void ComputeDepth();
465 : void ComputeHeight();
466 : };
467 :
468 : /// Returns true if the specified SDep is equivalent except for latency.
469 : inline bool SDep::overlaps(const SDep &Other) const {
470 76237170 : if (Dep != Other.Dep)
471 : return false;
472 3096352 : switch (Dep.getInt()) {
473 930505 : case Data:
474 : case Anti:
475 : case Output:
476 930505 : return Contents.Reg == Other.Contents.Reg;
477 2165847 : case Order:
478 2165847 : return Contents.OrdKind == Other.Contents.OrdKind;
479 : }
480 0 : llvm_unreachable("Invalid dependency kind!");
481 : }
482 :
483 : //// Returns the SUnit to which this edge points.
484 156829021 : inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
485 :
486 : //// Assigns the SUnit to which this edge points.
487 : inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
488 :
489 : /// Returns an enum value representing the kind of the dependence.
490 178669307 : inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
491 :
492 : //===--------------------------------------------------------------------===//
493 :
494 : /// This interface is used to plug different priorities computation
495 : /// algorithms into the list scheduler. It implements the interface of a
496 : /// standard priority queue, where nodes are inserted in arbitrary order and
497 : /// returned in priority order. The computation of the priority and the
498 : /// representation of the queue are totally up to the implementation to
499 : /// decide.
500 : class SchedulingPriorityQueue {
501 : virtual void anchor();
502 :
503 : unsigned CurCycle = 0;
504 : bool HasReadyFilter;
505 :
506 : public:
507 1306402 : SchedulingPriorityQueue(bool rf = false) : HasReadyFilter(rf) {}
508 :
509 0 : virtual ~SchedulingPriorityQueue() = default;
510 :
511 : virtual bool isBottomUp() const = 0;
512 :
513 : virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
514 : virtual void addNode(const SUnit *SU) = 0;
515 : virtual void updateNode(const SUnit *SU) = 0;
516 : virtual void releaseState() = 0;
517 :
518 : virtual bool empty() const = 0;
519 :
520 0 : bool hasReadyFilter() const { return HasReadyFilter; }
521 :
522 0 : virtual bool tracksRegPressure() const { return false; }
523 :
524 0 : virtual bool isReady(SUnit *) const {
525 : assert(!HasReadyFilter && "The ready filter must override isReady()");
526 0 : return true;
527 : }
528 :
529 : virtual void push(SUnit *U) = 0;
530 :
531 : void push_all(const std::vector<SUnit *> &Nodes) {
532 : for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
533 53453 : E = Nodes.end(); I != E; ++I)
534 35686 : push(*I);
535 : }
536 :
537 : virtual SUnit *pop() = 0;
538 :
539 : virtual void remove(SUnit *SU) = 0;
540 :
541 0 : virtual void dump(ScheduleDAG *) const {}
542 :
543 : /// As each node is scheduled, this method is invoked. This allows the
544 : /// priority function to adjust the priority of related unscheduled nodes,
545 : /// for example.
546 0 : virtual void scheduledNode(SUnit *) {}
547 :
548 0 : virtual void unscheduledNode(SUnit *) {}
549 :
550 0 : void setCurCycle(unsigned Cycle) {
551 16287476 : CurCycle = Cycle;
552 0 : }
553 :
554 0 : unsigned getCurCycle() const {
555 0 : return CurCycle;
556 : }
557 : };
558 :
559 1503384 : class ScheduleDAG {
560 : public:
561 : const TargetMachine &TM; ///< Target processor
562 : const TargetInstrInfo *TII; ///< Target instruction information
563 : const TargetRegisterInfo *TRI; ///< Target processor register info
564 : MachineFunction &MF; ///< Machine function
565 : MachineRegisterInfo &MRI; ///< Virtual/real register map
566 : std::vector<SUnit> SUnits; ///< The scheduling units.
567 : SUnit EntrySU; ///< Special node for the region entry.
568 : SUnit ExitSU; ///< Special node for the region exit.
569 :
570 : #ifdef NDEBUG
571 : static const bool StressSched = false;
572 : #else
573 : bool StressSched;
574 : #endif
575 :
576 : explicit ScheduleDAG(MachineFunction &mf);
577 :
578 : virtual ~ScheduleDAG();
579 :
580 : /// Clears the DAG state (between regions).
581 : void clearDAG();
582 :
583 : /// Returns the MCInstrDesc of this SUnit.
584 : /// Returns NULL for SDNodes without a machine opcode.
585 : const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
586 762756 : if (SU->isInstr()) return &SU->getInstr()->getDesc();
587 341509 : return getNodeDesc(SU->getNode());
588 : }
589 :
590 : /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
591 : virtual void viewGraph(const Twine &Name, const Twine &Title);
592 : virtual void viewGraph();
593 :
594 : virtual void dumpNode(const SUnit &SU) const = 0;
595 : virtual void dump() const = 0;
596 : void dumpNodeName(const SUnit &SU) const;
597 :
598 : /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
599 : virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
600 :
601 : /// Returns a label for the region of code covered by the DAG.
602 : virtual std::string getDAGName() const = 0;
603 :
604 : /// Adds custom features for a visualization of the ScheduleDAG.
605 0 : virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
606 :
607 : #ifndef NDEBUG
608 : /// Verifies that all SUnits were scheduled and that their state is
609 : /// consistent. Returns the number of scheduled SUnits.
610 : unsigned VerifyScheduledDAG(bool isBottomUp);
611 : #endif
612 :
613 : protected:
614 : void dumpNodeAll(const SUnit &SU) const;
615 :
616 : private:
617 : /// Returns the MCInstrDesc of this SDNode or NULL.
618 : const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
619 : };
620 :
621 : class SUnitIterator : public std::iterator<std::forward_iterator_tag,
622 : SUnit, ptrdiff_t> {
623 : SUnit *Node;
624 : unsigned Operand;
625 :
626 : SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
627 :
628 : public:
629 : bool operator==(const SUnitIterator& x) const {
630 : return Operand == x.Operand;
631 : }
632 : bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
633 :
634 : pointer operator*() const {
635 : return Node->Preds[Operand].getSUnit();
636 : }
637 : pointer operator->() const { return operator*(); }
638 :
639 : SUnitIterator& operator++() { // Preincrement
640 : ++Operand;
641 : return *this;
642 : }
643 : SUnitIterator operator++(int) { // Postincrement
644 : SUnitIterator tmp = *this; ++*this; return tmp;
645 : }
646 :
647 : static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
648 : static SUnitIterator end (SUnit *N) {
649 : return SUnitIterator(N, (unsigned)N->Preds.size());
650 : }
651 :
652 : unsigned getOperand() const { return Operand; }
653 : const SUnit *getNode() const { return Node; }
654 :
655 : /// Tests if this is not an SDep::Data dependence.
656 : bool isCtrlDep() const {
657 : return getSDep().isCtrl();
658 : }
659 : bool isArtificialDep() const {
660 : return getSDep().isArtificial();
661 : }
662 : const SDep &getSDep() const {
663 : return Node->Preds[Operand];
664 : }
665 : };
666 :
667 : template <> struct GraphTraits<SUnit*> {
668 : typedef SUnit *NodeRef;
669 : typedef SUnitIterator ChildIteratorType;
670 : static NodeRef getEntryNode(SUnit *N) { return N; }
671 : static ChildIteratorType child_begin(NodeRef N) {
672 : return SUnitIterator::begin(N);
673 : }
674 : static ChildIteratorType child_end(NodeRef N) {
675 : return SUnitIterator::end(N);
676 : }
677 : };
678 :
679 : template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
680 : typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
681 : static nodes_iterator nodes_begin(ScheduleDAG *G) {
682 : return nodes_iterator(G->SUnits.begin());
683 : }
684 : static nodes_iterator nodes_end(ScheduleDAG *G) {
685 : return nodes_iterator(G->SUnits.end());
686 : }
687 : };
688 :
689 : /// This class can compute a topological ordering for SUnits and provides
690 : /// methods for dynamically updating the ordering as new edges are added.
691 : ///
692 : /// This allows a very fast implementation of IsReachable, for example.
693 : class ScheduleDAGTopologicalSort {
694 : /// A reference to the ScheduleDAG's SUnits.
695 : std::vector<SUnit> &SUnits;
696 : SUnit *ExitSU;
697 :
698 : /// Maps topological index to the node number.
699 : std::vector<int> Index2Node;
700 : /// Maps the node number to its topological index.
701 : std::vector<int> Node2Index;
702 : /// a set of nodes visited during a DFS traversal.
703 : BitVector Visited;
704 :
705 : /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
706 : /// These nodes will later get new topological indexes by means of the Shift
707 : /// method.
708 : void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
709 :
710 : /// Reassigns topological indexes for the nodes in the DAG to
711 : /// preserve the topological ordering.
712 : void Shift(BitVector& Visited, int LowerBound, int UpperBound);
713 :
714 : /// Assigns the topological index to the node n.
715 : void Allocate(int n, int index);
716 :
717 : public:
718 : ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
719 :
720 : /// Creates the initial topological ordering from the DAG to be scheduled.
721 : void InitDAGTopologicalSorting();
722 :
723 : /// Returns an array of SUs that are both in the successor
724 : /// subtree of StartSU and in the predecessor subtree of TargetSU.
725 : /// StartSU and TargetSU are not in the array.
726 : /// Success is false if TargetSU is not in the successor subtree of
727 : /// StartSU, else it is true.
728 : std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
729 : bool &Success);
730 :
731 : /// Checks if \p SU is reachable from \p TargetSU.
732 : bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
733 :
734 : /// Returns true if addPred(TargetSU, SU) creates a cycle.
735 : bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
736 :
737 : /// Updates the topological ordering to accommodate an edge to be
738 : /// added from SUnit \p X to SUnit \p Y.
739 : void AddPred(SUnit *Y, SUnit *X);
740 :
741 : /// Updates the topological ordering to accommodate an an edge to be
742 : /// removed from the specified node \p N from the predecessors of the
743 : /// current node \p M.
744 : void RemovePred(SUnit *M, SUnit *N);
745 :
746 : typedef std::vector<int>::iterator iterator;
747 : typedef std::vector<int>::const_iterator const_iterator;
748 : iterator begin() { return Index2Node.begin(); }
749 : const_iterator begin() const { return Index2Node.begin(); }
750 : iterator end() { return Index2Node.end(); }
751 : const_iterator end() const { return Index2Node.end(); }
752 :
753 : typedef std::vector<int>::reverse_iterator reverse_iterator;
754 : typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
755 : reverse_iterator rbegin() { return Index2Node.rbegin(); }
756 : const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
757 : reverse_iterator rend() { return Index2Node.rend(); }
758 : const_reverse_iterator rend() const { return Index2Node.rend(); }
759 : };
760 :
761 : } // end namespace llvm
762 :
763 : #endif // LLVM_CODEGEN_SCHEDULEDAG_H
|