Bug Summary

File:lib/CodeGen/MachinePipeliner.cpp
Warning:line 2665, column 11
Value stored to 'NewReg' is never read

Annotated Source Code

1//===-- MachinePipeliner.cpp - Machine Software Pipeliner Pass ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// An implementation of the Swing Modulo Scheduling (SMS) software pipeliner.
11//
12// Software pipelining (SWP) is an instruction scheduling technique for loops
13// that overlap loop iterations and explioits ILP via a compiler transformation.
14//
15// Swing Modulo Scheduling is an implementation of software pipelining
16// that generates schedules that are near optimal in terms of initiation
17// interval, register requirements, and stage count. See the papers:
18//
19// "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa,
20// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Processings of the 1996
21// Conference on Parallel Architectures and Compilation Techiniques.
22//
23// "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J.
24// Llosa, E. Ayguade, A. Gonzalez, M. Valero, and J. Eckhardt. In IEEE
25// Transactions on Computers, Vol. 50, No. 3, 2001.
26//
27// "An Implementation of Swing Modulo Scheduling With Extensions for
28// Superblocks", by T. Lattner, Master's Thesis, University of Illinois at
29// Urbana-Chambpain, 2005.
30//
31//
32// The SMS algorithm consists of three main steps after computing the minimal
33// initiation interval (MII).
34// 1) Analyze the dependence graph and compute information about each
35// instruction in the graph.
36// 2) Order the nodes (instructions) by priority based upon the heuristics
37// described in the algorithm.
38// 3) Attempt to schedule the nodes in the specified order using the MII.
39//
40// This SMS implementation is a target-independent back-end pass. When enabled,
41// the pass runs just prior to the register allocation pass, while the machine
42// IR is in SSA form. If software pipelining is successful, then the original
43// loop is replaced by the optimized loop. The optimized loop contains one or
44// more prolog blocks, the pipelined kernel, and one or more epilog blocks. If
45// the instructions cannot be scheduled in a given MII, we increase the MII by
46// one and try again.
47//
48// The SMS implementation is an extension of the ScheduleDAGInstrs class. We
49// represent loop carried dependences in the DAG as order edges to the Phi
50// nodes. We also perform several passes over the DAG to eliminate unnecessary
51// edges that inhibit the ability to pipeline. The implementation uses the
52// DFAPacketizer class to compute the minimum initiation interval and the check
53// where an instruction may be inserted in the pipelined schedule.
54//
55// In order for the SMS pass to work, several target specific hooks need to be
56// implemented to get information about the loop structure and to rewrite
57// instructions.
58//
59//===----------------------------------------------------------------------===//
60
61#include "llvm/ADT/ArrayRef.h"
62#include "llvm/ADT/BitVector.h"
63#include "llvm/ADT/DenseMap.h"
64#include "llvm/ADT/iterator_range.h"
65#include "llvm/ADT/MapVector.h"
66#include "llvm/ADT/PriorityQueue.h"
67#include "llvm/ADT/SetVector.h"
68#include "llvm/ADT/SmallPtrSet.h"
69#include "llvm/ADT/SmallSet.h"
70#include "llvm/ADT/SmallVector.h"
71#include "llvm/ADT/Statistic.h"
72#include "llvm/Analysis/AliasAnalysis.h"
73#include "llvm/Analysis/MemoryLocation.h"
74#include "llvm/Analysis/ValueTracking.h"
75#include "llvm/CodeGen/DFAPacketizer.h"
76#include "llvm/CodeGen/LiveIntervalAnalysis.h"
77#include "llvm/CodeGen/MachineBasicBlock.h"
78#include "llvm/CodeGen/MachineDominators.h"
79#include "llvm/CodeGen/MachineFunction.h"
80#include "llvm/CodeGen/MachineFunctionPass.h"
81#include "llvm/CodeGen/MachineInstr.h"
82#include "llvm/CodeGen/MachineInstrBuilder.h"
83#include "llvm/CodeGen/MachineInstrBundle.h"
84#include "llvm/CodeGen/MachineLoopInfo.h"
85#include "llvm/CodeGen/MachineMemOperand.h"
86#include "llvm/CodeGen/MachineOperand.h"
87#include "llvm/CodeGen/MachineRegisterInfo.h"
88#include "llvm/CodeGen/RegisterClassInfo.h"
89#include "llvm/CodeGen/RegisterPressure.h"
90#include "llvm/CodeGen/ScheduleDAG.h"
91#include "llvm/CodeGen/ScheduleDAGInstrs.h"
92#include "llvm/CodeGen/ScheduleDAGMutation.h"
93#include "llvm/IR/Attributes.h"
94#include "llvm/IR/DebugLoc.h"
95#include "llvm/MC/MCInstrItineraries.h"
96#include "llvm/PassAnalysisSupport.h"
97#include "llvm/PassRegistry.h"
98#include "llvm/PassSupport.h"
99#include "llvm/Support/CommandLine.h"
100#include "llvm/Support/Debug.h"
101#include "llvm/Support/MathExtras.h"
102#include "llvm/Support/raw_ostream.h"
103#include "llvm/Target/TargetInstrInfo.h"
104#include "llvm/Target/TargetRegisterInfo.h"
105#include "llvm/Target/TargetSubtargetInfo.h"
106#include <algorithm>
107#include <cassert>
108#include <climits>
109#include <cstdint>
110#include <deque>
111#include <functional>
112#include <iterator>
113#include <map>
114#include <tuple>
115#include <utility>
116#include <vector>
117
118using namespace llvm;
119
120#define DEBUG_TYPE"pipeliner" "pipeliner"
121
122STATISTIC(NumTrytoPipeline, "Number of loops that we attempt to pipeline")static llvm::Statistic NumTrytoPipeline = {"pipeliner", "NumTrytoPipeline"
, "Number of loops that we attempt to pipeline", {0}, false}
;
123STATISTIC(NumPipelined, "Number of loops software pipelined")static llvm::Statistic NumPipelined = {"pipeliner", "NumPipelined"
, "Number of loops software pipelined", {0}, false}
;
124
125/// A command line option to turn software pipelining on or off.
126static cl::opt<bool> EnableSWP("enable-pipeliner", cl::Hidden, cl::init(true),
127 cl::ZeroOrMore,
128 cl::desc("Enable Software Pipelining"));
129
130/// A command line option to enable SWP at -Os.
131static cl::opt<bool> EnableSWPOptSize("enable-pipeliner-opt-size",
132 cl::desc("Enable SWP at Os."), cl::Hidden,
133 cl::init(false));
134
135/// A command line argument to limit minimum initial interval for pipelining.
136static cl::opt<int> SwpMaxMii("pipeliner-max-mii",
137 cl::desc("Size limit for the the MII."),
138 cl::Hidden, cl::init(27));
139
140/// A command line argument to limit the number of stages in the pipeline.
141static cl::opt<int>
142 SwpMaxStages("pipeliner-max-stages",
143 cl::desc("Maximum stages allowed in the generated scheduled."),
144 cl::Hidden, cl::init(3));
145
146/// A command line option to disable the pruning of chain dependences due to
147/// an unrelated Phi.
148static cl::opt<bool>
149 SwpPruneDeps("pipeliner-prune-deps",
150 cl::desc("Prune dependences between unrelated Phi nodes."),
151 cl::Hidden, cl::init(true));
152
153/// A command line option to disable the pruning of loop carried order
154/// dependences.
155static cl::opt<bool>
156 SwpPruneLoopCarried("pipeliner-prune-loop-carried",
157 cl::desc("Prune loop carried order dependences."),
158 cl::Hidden, cl::init(true));
159
160#ifndef NDEBUG
161static cl::opt<int> SwpLoopLimit("pipeliner-max", cl::Hidden, cl::init(-1));
162#endif
163
164static cl::opt<bool> SwpIgnoreRecMII("pipeliner-ignore-recmii",
165 cl::ReallyHidden, cl::init(false),
166 cl::ZeroOrMore, cl::desc("Ignore RecMII"));
167
168namespace {
169
170class NodeSet;
171class SMSchedule;
172class SwingSchedulerDAG;
173
174/// The main class in the implementation of the target independent
175/// software pipeliner pass.
176class MachinePipeliner : public MachineFunctionPass {
177public:
178 MachineFunction *MF = nullptr;
179 const MachineLoopInfo *MLI = nullptr;
180 const MachineDominatorTree *MDT = nullptr;
181 const InstrItineraryData *InstrItins;
182 const TargetInstrInfo *TII = nullptr;
183 RegisterClassInfo RegClassInfo;
184
185#ifndef NDEBUG
186 static int NumTries;
187#endif
188 /// Cache the target analysis information about the loop.
189 struct LoopInfo {
190 MachineBasicBlock *TBB = nullptr;
191 MachineBasicBlock *FBB = nullptr;
192 SmallVector<MachineOperand, 4> BrCond;
193 MachineInstr *LoopInductionVar = nullptr;
194 MachineInstr *LoopCompare = nullptr;
195 };
196 LoopInfo LI;
197
198 static char ID;
199 MachinePipeliner() : MachineFunctionPass(ID) {
200 initializeMachinePipelinerPass(*PassRegistry::getPassRegistry());
201 }
202
203 bool runOnMachineFunction(MachineFunction &MF) override;
204
205 void getAnalysisUsage(AnalysisUsage &AU) const override {
206 AU.addRequired<AAResultsWrapperPass>();
207 AU.addPreserved<AAResultsWrapperPass>();
208 AU.addRequired<MachineLoopInfo>();
209 AU.addRequired<MachineDominatorTree>();
210 AU.addRequired<LiveIntervals>();
211 MachineFunctionPass::getAnalysisUsage(AU);
212 }
213
214private:
215 bool canPipelineLoop(MachineLoop &L);
216 bool scheduleLoop(MachineLoop &L);
217 bool swingModuloScheduler(MachineLoop &L);
218};
219
220/// This class builds the dependence graph for the instructions in a loop,
221/// and attempts to schedule the instructions using the SMS algorithm.
222class SwingSchedulerDAG : public ScheduleDAGInstrs {
223 MachinePipeliner &Pass;
224 /// The minimum initiation interval between iterations for this schedule.
225 unsigned MII;
226 /// Set to true if a valid pipelined schedule is found for the loop.
227 bool Scheduled;
228 MachineLoop &Loop;
229 LiveIntervals &LIS;
230 const RegisterClassInfo &RegClassInfo;
231
232 /// A toplogical ordering of the SUnits, which is needed for changing
233 /// dependences and iterating over the SUnits.
234 ScheduleDAGTopologicalSort Topo;
235
236 struct NodeInfo {
237 int ASAP;
238 int ALAP;
239 NodeInfo() : ASAP(0), ALAP(0) {}
240 };
241 /// Computed properties for each node in the graph.
242 std::vector<NodeInfo> ScheduleInfo;
243
244 enum OrderKind { BottomUp = 0, TopDown = 1 };
245 /// Computed node ordering for scheduling.
246 SetVector<SUnit *> NodeOrder;
247
248 typedef SmallVector<NodeSet, 8> NodeSetType;
249 typedef DenseMap<unsigned, unsigned> ValueMapTy;
250 typedef SmallVectorImpl<MachineBasicBlock *> MBBVectorTy;
251 typedef DenseMap<MachineInstr *, MachineInstr *> InstrMapTy;
252
253 /// Instructions to change when emitting the final schedule.
254 DenseMap<SUnit *, std::pair<unsigned, int64_t>> InstrChanges;
255
256 /// We may create a new instruction, so remember it because it
257 /// must be deleted when the pass is finished.
258 SmallPtrSet<MachineInstr *, 4> NewMIs;
259
260 /// Ordered list of DAG postprocessing steps.
261 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
262
263 /// Helper class to implement Johnson's circuit finding algorithm.
264 class Circuits {
265 std::vector<SUnit> &SUnits;
266 SetVector<SUnit *> Stack;
267 BitVector Blocked;
268 SmallVector<SmallPtrSet<SUnit *, 4>, 10> B;
269 SmallVector<SmallVector<int, 4>, 16> AdjK;
270 unsigned NumPaths;
271 static unsigned MaxPaths;
272
273 public:
274 Circuits(std::vector<SUnit> &SUs)
275 : SUnits(SUs), Stack(), Blocked(SUs.size()), B(SUs.size()),
276 AdjK(SUs.size()) {}
277 /// Reset the data structures used in the circuit algorithm.
278 void reset() {
279 Stack.clear();
280 Blocked.reset();
281 B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>());
282 NumPaths = 0;
283 }
284 void createAdjacencyStructure(SwingSchedulerDAG *DAG);
285 bool circuit(int V, int S, NodeSetType &NodeSets, bool HasBackedge = false);
286 void unblock(int U);
287 };
288
289public:
290 SwingSchedulerDAG(MachinePipeliner &P, MachineLoop &L, LiveIntervals &lis,
291 const RegisterClassInfo &rci)
292 : ScheduleDAGInstrs(*P.MF, P.MLI, false), Pass(P), MII(0),
293 Scheduled(false), Loop(L), LIS(lis), RegClassInfo(rci),
294 Topo(SUnits, &ExitSU) {
295 P.MF->getSubtarget().getSMSMutations(Mutations);
296 }
297
298 void schedule() override;
299 void finishBlock() override;
300
301 /// Return true if the loop kernel has been scheduled.
302 bool hasNewSchedule() { return Scheduled; }
303
304 /// Return the earliest time an instruction may be scheduled.
305 int getASAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ASAP; }
306
307 /// Return the latest time an instruction my be scheduled.
308 int getALAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ALAP; }
309
310 /// The mobility function, which the the number of slots in which
311 /// an instruction may be scheduled.
312 int getMOV(SUnit *Node) { return getALAP(Node) - getASAP(Node); }
313
314 /// The depth, in the dependence graph, for a node.
315 int getDepth(SUnit *Node) { return Node->getDepth(); }
316
317 /// The height, in the dependence graph, for a node.
318 int getHeight(SUnit *Node) { return Node->getHeight(); }
319
320 /// Return true if the dependence is a back-edge in the data dependence graph.
321 /// Since the DAG doesn't contain cycles, we represent a cycle in the graph
322 /// using an anti dependence from a Phi to an instruction.
323 bool isBackedge(SUnit *Source, const SDep &Dep) {
324 if (Dep.getKind() != SDep::Anti)
325 return false;
326 return Source->getInstr()->isPHI() || Dep.getSUnit()->getInstr()->isPHI();
327 }
328
329 /// Return true if the dependence is an order dependence between non-Phis.
330 static bool isOrder(SUnit *Source, const SDep &Dep) {
331 if (Dep.getKind() != SDep::Order)
332 return false;
333 return (!Source->getInstr()->isPHI() &&
334 !Dep.getSUnit()->getInstr()->isPHI());
335 }
336
337 bool isLoopCarriedOrder(SUnit *Source, const SDep &Dep, bool isSucc = true);
338
339 /// The latency of the dependence.
340 unsigned getLatency(SUnit *Source, const SDep &Dep) {
341 // Anti dependences represent recurrences, so use the latency of the
342 // instruction on the back-edge.
343 if (Dep.getKind() == SDep::Anti) {
344 if (Source->getInstr()->isPHI())
345 return Dep.getSUnit()->Latency;
346 if (Dep.getSUnit()->getInstr()->isPHI())
347 return Source->Latency;
348 return Dep.getLatency();
349 }
350 return Dep.getLatency();
351 }
352
353 /// The distance function, which indicates that operation V of iteration I
354 /// depends on operations U of iteration I-distance.
355 unsigned getDistance(SUnit *U, SUnit *V, const SDep &Dep) {
356 // Instructions that feed a Phi have a distance of 1. Computing larger
357 // values for arrays requires data dependence information.
358 if (V->getInstr()->isPHI() && Dep.getKind() == SDep::Anti)
359 return 1;
360 return 0;
361 }
362
363 /// Set the Minimum Initiation Interval for this schedule attempt.
364 void setMII(unsigned mii) { MII = mii; }
365
366 MachineInstr *applyInstrChange(MachineInstr *MI, SMSchedule &Schedule,
367 bool UpdateDAG = false);
368
369 /// Return the new base register that was stored away for the changed
370 /// instruction.
371 unsigned getInstrBaseReg(SUnit *SU) {
372 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
373 InstrChanges.find(SU);
374 if (It != InstrChanges.end())
375 return It->second.first;
376 return 0;
377 }
378
379 void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
380 Mutations.push_back(std::move(Mutation));
381 }
382
383private:
384 void addLoopCarriedDependences(AliasAnalysis *AA);
385 void updatePhiDependences();
386 void changeDependences();
387 unsigned calculateResMII();
388 unsigned calculateRecMII(NodeSetType &RecNodeSets);
389 void findCircuits(NodeSetType &NodeSets);
390 void fuseRecs(NodeSetType &NodeSets);
391 void removeDuplicateNodes(NodeSetType &NodeSets);
392 void computeNodeFunctions(NodeSetType &NodeSets);
393 void registerPressureFilter(NodeSetType &NodeSets);
394 void colocateNodeSets(NodeSetType &NodeSets);
395 void checkNodeSets(NodeSetType &NodeSets);
396 void groupRemainingNodes(NodeSetType &NodeSets);
397 void addConnectedNodes(SUnit *SU, NodeSet &NewSet,
398 SetVector<SUnit *> &NodesAdded);
399 void computeNodeOrder(NodeSetType &NodeSets);
400 bool schedulePipeline(SMSchedule &Schedule);
401 void generatePipelinedLoop(SMSchedule &Schedule);
402 void generateProlog(SMSchedule &Schedule, unsigned LastStage,
403 MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
404 MBBVectorTy &PrologBBs);
405 void generateEpilog(SMSchedule &Schedule, unsigned LastStage,
406 MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
407 MBBVectorTy &EpilogBBs, MBBVectorTy &PrologBBs);
408 void generateExistingPhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
409 MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
410 SMSchedule &Schedule, ValueMapTy *VRMap,
411 InstrMapTy &InstrMap, unsigned LastStageNum,
412 unsigned CurStageNum, bool IsLast);
413 void generatePhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
414 MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
415 SMSchedule &Schedule, ValueMapTy *VRMap,
416 InstrMapTy &InstrMap, unsigned LastStageNum,
417 unsigned CurStageNum, bool IsLast);
418 void removeDeadInstructions(MachineBasicBlock *KernelBB,
419 MBBVectorTy &EpilogBBs);
420 void splitLifetimes(MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs,
421 SMSchedule &Schedule);
422 void addBranches(MBBVectorTy &PrologBBs, MachineBasicBlock *KernelBB,
423 MBBVectorTy &EpilogBBs, SMSchedule &Schedule,
424 ValueMapTy *VRMap);
425 bool computeDelta(MachineInstr &MI, unsigned &Delta);
426 void updateMemOperands(MachineInstr &NewMI, MachineInstr &OldMI,
427 unsigned Num);
428 MachineInstr *cloneInstr(MachineInstr *OldMI, unsigned CurStageNum,
429 unsigned InstStageNum);
430 MachineInstr *cloneAndChangeInstr(MachineInstr *OldMI, unsigned CurStageNum,
431 unsigned InstStageNum,
432 SMSchedule &Schedule);
433 void updateInstruction(MachineInstr *NewMI, bool LastDef,
434 unsigned CurStageNum, unsigned InstStageNum,
435 SMSchedule &Schedule, ValueMapTy *VRMap);
436 MachineInstr *findDefInLoop(unsigned Reg);
437 unsigned getPrevMapVal(unsigned StageNum, unsigned PhiStage, unsigned LoopVal,
438 unsigned LoopStage, ValueMapTy *VRMap,
439 MachineBasicBlock *BB);
440 void rewritePhiValues(MachineBasicBlock *NewBB, unsigned StageNum,
441 SMSchedule &Schedule, ValueMapTy *VRMap,
442 InstrMapTy &InstrMap);
443 void rewriteScheduledInstr(MachineBasicBlock *BB, SMSchedule &Schedule,
444 InstrMapTy &InstrMap, unsigned CurStageNum,
445 unsigned PhiNum, MachineInstr *Phi,
446 unsigned OldReg, unsigned NewReg,
447 unsigned PrevReg = 0);
448 bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
449 unsigned &OffsetPos, unsigned &NewBase,
450 int64_t &NewOffset);
451 void postprocessDAG();
452};
453
454/// A NodeSet contains a set of SUnit DAG nodes with additional information
455/// that assigns a priority to the set.
456class NodeSet {
457 SetVector<SUnit *> Nodes;
458 bool HasRecurrence;
459 unsigned RecMII = 0;
460 int MaxMOV = 0;
461 int MaxDepth = 0;
462 unsigned Colocate = 0;
463 SUnit *ExceedPressure = nullptr;
464
465public:
466 typedef SetVector<SUnit *>::const_iterator iterator;
467
468 NodeSet() : Nodes(), HasRecurrence(false) {}
469
470 NodeSet(iterator S, iterator E) : Nodes(S, E), HasRecurrence(true) {}
471
472 bool insert(SUnit *SU) { return Nodes.insert(SU); }
473
474 void insert(iterator S, iterator E) { Nodes.insert(S, E); }
475
476 template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) {
477 return Nodes.remove_if(P);
478 }
479
480 unsigned count(SUnit *SU) const { return Nodes.count(SU); }
481
482 bool hasRecurrence() { return HasRecurrence; };
483
484 unsigned size() const { return Nodes.size(); }
485
486 bool empty() const { return Nodes.empty(); }
487
488 SUnit *getNode(unsigned i) const { return Nodes[i]; };
489
490 void setRecMII(unsigned mii) { RecMII = mii; };
491
492 void setColocate(unsigned c) { Colocate = c; };
493
494 void setExceedPressure(SUnit *SU) { ExceedPressure = SU; }
495
496 bool isExceedSU(SUnit *SU) { return ExceedPressure == SU; }
497
498 int compareRecMII(NodeSet &RHS) { return RecMII - RHS.RecMII; }
499
500 int getRecMII() { return RecMII; }
501
502 /// Summarize node functions for the entire node set.
503 void computeNodeSetInfo(SwingSchedulerDAG *SSD) {
504 for (SUnit *SU : *this) {
505 MaxMOV = std::max(MaxMOV, SSD->getMOV(SU));
506 MaxDepth = std::max(MaxDepth, SSD->getDepth(SU));
507 }
508 }
509
510 void clear() {
511 Nodes.clear();
512 RecMII = 0;
513 HasRecurrence = false;
514 MaxMOV = 0;
515 MaxDepth = 0;
516 Colocate = 0;
517 ExceedPressure = nullptr;
518 }
519
520 operator SetVector<SUnit *> &() { return Nodes; }
521
522 /// Sort the node sets by importance. First, rank them by recurrence MII,
523 /// then by mobility (least mobile done first), and finally by depth.
524 /// Each node set may contain a colocate value which is used as the first
525 /// tie breaker, if it's set.
526 bool operator>(const NodeSet &RHS) const {
527 if (RecMII == RHS.RecMII) {
528 if (Colocate != 0 && RHS.Colocate != 0 && Colocate != RHS.Colocate)
529 return Colocate < RHS.Colocate;
530 if (MaxMOV == RHS.MaxMOV)
531 return MaxDepth > RHS.MaxDepth;
532 return MaxMOV < RHS.MaxMOV;
533 }
534 return RecMII > RHS.RecMII;
535 }
536
537 bool operator==(const NodeSet &RHS) const {
538 return RecMII == RHS.RecMII && MaxMOV == RHS.MaxMOV &&
539 MaxDepth == RHS.MaxDepth;
540 }
541
542 bool operator!=(const NodeSet &RHS) const { return !operator==(RHS); }
543
544 iterator begin() { return Nodes.begin(); }
545 iterator end() { return Nodes.end(); }
546
547 void print(raw_ostream &os) const {
548 os << "Num nodes " << size() << " rec " << RecMII << " mov " << MaxMOV
549 << " depth " << MaxDepth << " col " << Colocate << "\n";
550 for (const auto &I : Nodes)
551 os << " SU(" << I->NodeNum << ") " << *(I->getInstr());
552 os << "\n";
553 }
554
555#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
556 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { print(dbgs()); }
557#endif
558};
559
560/// This class repesents the scheduled code. The main data structure is a
561/// map from scheduled cycle to instructions. During scheduling, the
562/// data structure explicitly represents all stages/iterations. When
563/// the algorithm finshes, the schedule is collapsed into a single stage,
564/// which represents instructions from different loop iterations.
565///
566/// The SMS algorithm allows negative values for cycles, so the first cycle
567/// in the schedule is the smallest cycle value.
568class SMSchedule {
569private:
570 /// Map from execution cycle to instructions.
571 DenseMap<int, std::deque<SUnit *>> ScheduledInstrs;
572
573 /// Map from instruction to execution cycle.
574 std::map<SUnit *, int> InstrToCycle;
575
576 /// Map for each register and the max difference between its uses and def.
577 /// The first element in the pair is the max difference in stages. The
578 /// second is true if the register defines a Phi value and loop value is
579 /// scheduled before the Phi.
580 std::map<unsigned, std::pair<unsigned, bool>> RegToStageDiff;
581
582 /// Keep track of the first cycle value in the schedule. It starts
583 /// as zero, but the algorithm allows negative values.
584 int FirstCycle;
585
586 /// Keep track of the last cycle value in the schedule.
587 int LastCycle;
588
589 /// The initiation interval (II) for the schedule.
590 int InitiationInterval;
591
592 /// Target machine information.
593 const TargetSubtargetInfo &ST;
594
595 /// Virtual register information.
596 MachineRegisterInfo &MRI;
597
598 std::unique_ptr<DFAPacketizer> Resources;
599
600public:
601 SMSchedule(MachineFunction *mf)
602 : ST(mf->getSubtarget()), MRI(mf->getRegInfo()),
603 Resources(ST.getInstrInfo()->CreateTargetScheduleState(ST)) {
604 FirstCycle = 0;
605 LastCycle = 0;
606 InitiationInterval = 0;
607 }
608
609 void reset() {
610 ScheduledInstrs.clear();
611 InstrToCycle.clear();
612 RegToStageDiff.clear();
613 FirstCycle = 0;
614 LastCycle = 0;
615 InitiationInterval = 0;
616 }
617
618 /// Set the initiation interval for this schedule.
619 void setInitiationInterval(int ii) { InitiationInterval = ii; }
620
621 /// Return the first cycle in the completed schedule. This
622 /// can be a negative value.
623 int getFirstCycle() const { return FirstCycle; }
624
625 /// Return the last cycle in the finalized schedule.
626 int getFinalCycle() const { return FirstCycle + InitiationInterval - 1; }
627
628 /// Return the cycle of the earliest scheduled instruction in the dependence
629 /// chain.
630 int earliestCycleInChain(const SDep &Dep);
631
632 /// Return the cycle of the latest scheduled instruction in the dependence
633 /// chain.
634 int latestCycleInChain(const SDep &Dep);
635
636 void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
637 int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
638 bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);
639
640 /// Iterators for the cycle to instruction map.
641 typedef DenseMap<int, std::deque<SUnit *>>::iterator sched_iterator;
642 typedef DenseMap<int, std::deque<SUnit *>>::const_iterator
643 const_sched_iterator;
644
645 /// Return true if the instruction is scheduled at the specified stage.
646 bool isScheduledAtStage(SUnit *SU, unsigned StageNum) {
647 return (stageScheduled(SU) == (int)StageNum);
648 }
649
650 /// Return the stage for a scheduled instruction. Return -1 if
651 /// the instruction has not been scheduled.
652 int stageScheduled(SUnit *SU) const {
653 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
654 if (it == InstrToCycle.end())
655 return -1;
656 return (it->second - FirstCycle) / InitiationInterval;
657 }
658
659 /// Return the cycle for a scheduled instruction. This function normalizes
660 /// the first cycle to be 0.
661 unsigned cycleScheduled(SUnit *SU) const {
662 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
663 assert(it != InstrToCycle.end() && "Instruction hasn't been scheduled.")((it != InstrToCycle.end() && "Instruction hasn't been scheduled."
) ? static_cast<void> (0) : __assert_fail ("it != InstrToCycle.end() && \"Instruction hasn't been scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 663, __PRETTY_FUNCTION__))
;
664 return (it->second - FirstCycle) % InitiationInterval;
665 }
666
667 /// Return the maximum stage count needed for this schedule.
668 unsigned getMaxStageCount() {
669 return (LastCycle - FirstCycle) / InitiationInterval;
670 }
671
672 /// Return the max. number of stages/iterations that can occur between a
673 /// register definition and its uses.
674 unsigned getStagesForReg(int Reg, unsigned CurStage) {
675 std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
676 if (CurStage > getMaxStageCount() && Stages.first == 0 && Stages.second)
677 return 1;
678 return Stages.first;
679 }
680
681 /// The number of stages for a Phi is a little different than other
682 /// instructions. The minimum value computed in RegToStageDiff is 1
683 /// because we assume the Phi is needed for at least 1 iteration.
684 /// This is not the case if the loop value is scheduled prior to the
685 /// Phi in the same stage. This function returns the number of stages
686 /// or iterations needed between the Phi definition and any uses.
687 unsigned getStagesForPhi(int Reg) {
688 std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
689 if (Stages.second)
690 return Stages.first;
691 return Stages.first - 1;
692 }
693
694 /// Return the instructions that are scheduled at the specified cycle.
695 std::deque<SUnit *> &getInstructions(int cycle) {
696 return ScheduledInstrs[cycle];
697 }
698
699 bool isValidSchedule(SwingSchedulerDAG *SSD);
700 void finalizeSchedule(SwingSchedulerDAG *SSD);
701 bool orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
702 std::deque<SUnit *> &Insts);
703 bool isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi);
704 bool isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Inst,
705 MachineOperand &MO);
706 void print(raw_ostream &os) const;
707 void dump() const;
708};
709
710} // end anonymous namespace
711
712unsigned SwingSchedulerDAG::Circuits::MaxPaths = 5;
713char MachinePipeliner::ID = 0;
714#ifndef NDEBUG
715int MachinePipeliner::NumTries = 0;
716#endif
717char &llvm::MachinePipelinerID = MachinePipeliner::ID;
718INITIALIZE_PASS_BEGIN(MachinePipeliner, "pipeliner",static void *initializeMachinePipelinerPassOnce(PassRegistry &
Registry) {
719 "Modulo Software Pipelining", false, false)static void *initializeMachinePipelinerPassOnce(PassRegistry &
Registry) {
720INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
721INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)initializeMachineLoopInfoPass(Registry);
722INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)initializeMachineDominatorTreePass(Registry);
723INITIALIZE_PASS_DEPENDENCY(LiveIntervals)initializeLiveIntervalsPass(Registry);
724INITIALIZE_PASS_END(MachinePipeliner, "pipeliner",PassInfo *PI = new PassInfo( "Modulo Software Pipelining", "pipeliner"
, &MachinePipeliner::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MachinePipeliner>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMachinePipelinerPassFlag
; void llvm::initializeMachinePipelinerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMachinePipelinerPassFlag
, initializeMachinePipelinerPassOnce, std::ref(Registry)); }
725 "Modulo Software Pipelining", false, false)PassInfo *PI = new PassInfo( "Modulo Software Pipelining", "pipeliner"
, &MachinePipeliner::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MachinePipeliner>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMachinePipelinerPassFlag
; void llvm::initializeMachinePipelinerPass(PassRegistry &
Registry) { llvm::call_once(InitializeMachinePipelinerPassFlag
, initializeMachinePipelinerPassOnce, std::ref(Registry)); }
726
727/// The "main" function for implementing Swing Modulo Scheduling.
728bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) {
729 if (skipFunction(*mf.getFunction()))
730 return false;
731
732 if (!EnableSWP)
733 return false;
734
735 if (mf.getFunction()->getAttributes().hasAttribute(
736 AttributeList::FunctionIndex, Attribute::OptimizeForSize) &&
737 !EnableSWPOptSize.getPosition())
738 return false;
739
740 MF = &mf;
741 MLI = &getAnalysis<MachineLoopInfo>();
742 MDT = &getAnalysis<MachineDominatorTree>();
743 TII = MF->getSubtarget().getInstrInfo();
744 RegClassInfo.runOnMachineFunction(*MF);
745
746 for (auto &L : *MLI)
747 scheduleLoop(*L);
748
749 return false;
750}
751
752/// Attempt to perform the SMS algorithm on the specified loop. This function is
753/// the main entry point for the algorithm. The function identifies candidate
754/// loops, calculates the minimum initiation interval, and attempts to schedule
755/// the loop.
756bool MachinePipeliner::scheduleLoop(MachineLoop &L) {
757 bool Changed = false;
758 for (auto &InnerLoop : L)
759 Changed |= scheduleLoop(*InnerLoop);
760
761#ifndef NDEBUG
762 // Stop trying after reaching the limit (if any).
763 int Limit = SwpLoopLimit;
764 if (Limit >= 0) {
765 if (NumTries >= SwpLoopLimit)
766 return Changed;
767 NumTries++;
768 }
769#endif
770
771 if (!canPipelineLoop(L))
772 return Changed;
773
774 ++NumTrytoPipeline;
775
776 Changed = swingModuloScheduler(L);
777
778 return Changed;
779}
780
781/// Return true if the loop can be software pipelined. The algorithm is
782/// restricted to loops with a single basic block. Make sure that the
783/// branch in the loop can be analyzed.
784bool MachinePipeliner::canPipelineLoop(MachineLoop &L) {
785 if (L.getNumBlocks() != 1)
786 return false;
787
788 // Check if the branch can't be understood because we can't do pipelining
789 // if that's the case.
790 LI.TBB = nullptr;
791 LI.FBB = nullptr;
792 LI.BrCond.clear();
793 if (TII->analyzeBranch(*L.getHeader(), LI.TBB, LI.FBB, LI.BrCond))
794 return false;
795
796 LI.LoopInductionVar = nullptr;
797 LI.LoopCompare = nullptr;
798 if (TII->analyzeLoop(L, LI.LoopInductionVar, LI.LoopCompare))
799 return false;
800
801 if (!L.getLoopPreheader())
802 return false;
803
804 // If any of the Phis contain subregs, then we can't pipeline
805 // because we don't know how to maintain subreg information in the
806 // VMap structure.
807 MachineBasicBlock *MBB = L.getHeader();
808 for (MachineBasicBlock::iterator BBI = MBB->instr_begin(),
809 BBE = MBB->getFirstNonPHI();
810 BBI != BBE; ++BBI)
811 for (unsigned i = 1; i != BBI->getNumOperands(); i += 2)
812 if (BBI->getOperand(i).getSubReg() != 0)
813 return false;
814
815 return true;
816}
817
818/// The SMS algorithm consists of the following main steps:
819/// 1. Computation and analysis of the dependence graph.
820/// 2. Ordering of the nodes (instructions).
821/// 3. Attempt to Schedule the loop.
822bool MachinePipeliner::swingModuloScheduler(MachineLoop &L) {
823 assert(L.getBlocks().size() == 1 && "SMS works on single blocks only.")((L.getBlocks().size() == 1 && "SMS works on single blocks only."
) ? static_cast<void> (0) : __assert_fail ("L.getBlocks().size() == 1 && \"SMS works on single blocks only.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 823, __PRETTY_FUNCTION__))
;
824
825 SwingSchedulerDAG SMS(*this, L, getAnalysis<LiveIntervals>(), RegClassInfo);
826
827 MachineBasicBlock *MBB = L.getHeader();
828 // The kernel should not include any terminator instructions. These
829 // will be added back later.
830 SMS.startBlock(MBB);
831
832 // Compute the number of 'real' instructions in the basic block by
833 // ignoring terminators.
834 unsigned size = MBB->size();
835 for (MachineBasicBlock::iterator I = MBB->getFirstTerminator(),
836 E = MBB->instr_end();
837 I != E; ++I, --size)
838 ;
839
840 SMS.enterRegion(MBB, MBB->begin(), MBB->getFirstTerminator(), size);
841 SMS.schedule();
842 SMS.exitRegion();
843
844 SMS.finishBlock();
845 return SMS.hasNewSchedule();
846}
847
848/// We override the schedule function in ScheduleDAGInstrs to implement the
849/// scheduling part of the Swing Modulo Scheduling algorithm.
850void SwingSchedulerDAG::schedule() {
851 AliasAnalysis *AA = &Pass.getAnalysis<AAResultsWrapperPass>().getAAResults();
852 buildSchedGraph(AA);
853 addLoopCarriedDependences(AA);
854 updatePhiDependences();
855 Topo.InitDAGTopologicalSorting();
856 postprocessDAG();
857 changeDependences();
858 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned su = 0, e = SUnits.size(); su
!= e; ++su) SUnits[su].dumpAll(this); }; } } while (false)
859 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned su = 0, e = SUnits.size(); su
!= e; ++su) SUnits[su].dumpAll(this); }; } } while (false)
860 SUnits[su].dumpAll(this);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned su = 0, e = SUnits.size(); su
!= e; ++su) SUnits[su].dumpAll(this); }; } } while (false)
861 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned su = 0, e = SUnits.size(); su
!= e; ++su) SUnits[su].dumpAll(this); }; } } while (false)
;
862
863 NodeSetType NodeSets;
864 findCircuits(NodeSets);
865
866 // Calculate the MII.
867 unsigned ResMII = calculateResMII();
868 unsigned RecMII = calculateRecMII(NodeSets);
869
870 fuseRecs(NodeSets);
871
872 // This flag is used for testing and can cause correctness problems.
873 if (SwpIgnoreRecMII)
874 RecMII = 0;
875
876 MII = std::max(ResMII, RecMII);
877 DEBUG(dbgs() << "MII = " << MII << " (rec=" << RecMII << ", res=" << ResMIIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "MII = " << MII <<
" (rec=" << RecMII << ", res=" << ResMII <<
")\n"; } } while (false)
878 << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "MII = " << MII <<
" (rec=" << RecMII << ", res=" << ResMII <<
")\n"; } } while (false)
;
879
880 // Can't schedule a loop without a valid MII.
881 if (MII == 0)
882 return;
883
884 // Don't pipeline large loops.
885 if (SwpMaxMii != -1 && (int)MII > SwpMaxMii)
886 return;
887
888 computeNodeFunctions(NodeSets);
889
890 registerPressureFilter(NodeSets);
891
892 colocateNodeSets(NodeSets);
893
894 checkNodeSets(NodeSets);
895
896 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
897 for (auto &I : NodeSets) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
898 dbgs() << " Rec NodeSet ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
899 I.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
900 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
901 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" Rec NodeSet "; I.dump(); } }; } } while (false)
;
902
903 std::sort(NodeSets.begin(), NodeSets.end(), std::greater<NodeSet>());
904
905 groupRemainingNodes(NodeSets);
906
907 removeDuplicateNodes(NodeSets);
908
909 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
910 for (auto &I : NodeSets) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
911 dbgs() << " NodeSet ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
912 I.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
913 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
914 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (auto &I : NodeSets) { dbgs() <<
" NodeSet "; I.dump(); } }; } } while (false)
;
915
916 computeNodeOrder(NodeSets);
917
918 SMSchedule Schedule(Pass.MF);
919 Scheduled = schedulePipeline(Schedule);
920
921 if (!Scheduled)
922 return;
923
924 unsigned numStages = Schedule.getMaxStageCount();
925 // No need to generate pipeline if there are no overlapped iterations.
926 if (numStages == 0)
927 return;
928
929 // Check that the maximum stage count is less than user-defined limit.
930 if (SwpMaxStages > -1 && (int)numStages > SwpMaxStages)
931 return;
932
933 generatePipelinedLoop(Schedule);
934 ++NumPipelined;
935}
936
937/// Clean up after the software pipeliner runs.
938void SwingSchedulerDAG::finishBlock() {
939 for (MachineInstr *I : NewMIs)
940 MF.DeleteMachineInstr(I);
941 NewMIs.clear();
942
943 // Call the superclass.
944 ScheduleDAGInstrs::finishBlock();
945}
946
947/// Return the register values for the operands of a Phi instruction.
948/// This function assume the instruction is a Phi.
949static void getPhiRegs(MachineInstr &Phi, MachineBasicBlock *Loop,
950 unsigned &InitVal, unsigned &LoopVal) {
951 assert(Phi.isPHI() && "Expecting a Phi.")((Phi.isPHI() && "Expecting a Phi.") ? static_cast<
void> (0) : __assert_fail ("Phi.isPHI() && \"Expecting a Phi.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 951, __PRETTY_FUNCTION__))
;
952
953 InitVal = 0;
954 LoopVal = 0;
955 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
956 if (Phi.getOperand(i + 1).getMBB() != Loop)
957 InitVal = Phi.getOperand(i).getReg();
958 else
959 LoopVal = Phi.getOperand(i).getReg();
960
961 assert(InitVal != 0 && LoopVal != 0 && "Unexpected Phi structure.")((InitVal != 0 && LoopVal != 0 && "Unexpected Phi structure."
) ? static_cast<void> (0) : __assert_fail ("InitVal != 0 && LoopVal != 0 && \"Unexpected Phi structure.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 961, __PRETTY_FUNCTION__))
;
962}
963
964/// Return the Phi register value that comes from the incoming block.
965static unsigned getInitPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) {
966 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
967 if (Phi.getOperand(i + 1).getMBB() != LoopBB)
968 return Phi.getOperand(i).getReg();
969 return 0;
970}
971
972/// Return the Phi register value that comes the the loop block.
973static unsigned getLoopPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) {
974 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
975 if (Phi.getOperand(i + 1).getMBB() == LoopBB)
976 return Phi.getOperand(i).getReg();
977 return 0;
978}
979
980/// Return true if SUb can be reached from SUa following the chain edges.
981static bool isSuccOrder(SUnit *SUa, SUnit *SUb) {
982 SmallPtrSet<SUnit *, 8> Visited;
983 SmallVector<SUnit *, 8> Worklist;
984 Worklist.push_back(SUa);
985 while (!Worklist.empty()) {
986 const SUnit *SU = Worklist.pop_back_val();
987 for (auto &SI : SU->Succs) {
988 SUnit *SuccSU = SI.getSUnit();
989 if (SI.getKind() == SDep::Order) {
990 if (Visited.count(SuccSU))
991 continue;
992 if (SuccSU == SUb)
993 return true;
994 Worklist.push_back(SuccSU);
995 Visited.insert(SuccSU);
996 }
997 }
998 }
999 return false;
1000}
1001
1002/// Return true if the instruction causes a chain between memory
1003/// references before and after it.
1004static bool isDependenceBarrier(MachineInstr &MI, AliasAnalysis *AA) {
1005 return MI.isCall() || MI.hasUnmodeledSideEffects() ||
1006 (MI.hasOrderedMemoryRef() &&
1007 (!MI.mayLoad() || !MI.isDereferenceableInvariantLoad(AA)));
1008}
1009
1010/// Return the underlying objects for the memory references of an instruction.
1011/// This function calls the code in ValueTracking, but first checks that the
1012/// instruction has a memory operand.
1013static void getUnderlyingObjects(MachineInstr *MI,
1014 SmallVectorImpl<Value *> &Objs,
1015 const DataLayout &DL) {
1016 if (!MI->hasOneMemOperand())
1017 return;
1018 MachineMemOperand *MM = *MI->memoperands_begin();
1019 if (!MM->getValue())
1020 return;
1021 GetUnderlyingObjects(const_cast<Value *>(MM->getValue()), Objs, DL);
1022}
1023
1024/// Add a chain edge between a load and store if the store can be an
1025/// alias of the load on a subsequent iteration, i.e., a loop carried
1026/// dependence. This code is very similar to the code in ScheduleDAGInstrs
1027/// but that code doesn't create loop carried dependences.
1028void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
1029 MapVector<Value *, SmallVector<SUnit *, 4>> PendingLoads;
1030 for (auto &SU : SUnits) {
1031 MachineInstr &MI = *SU.getInstr();
1032 if (isDependenceBarrier(MI, AA))
1033 PendingLoads.clear();
1034 else if (MI.mayLoad()) {
1035 SmallVector<Value *, 4> Objs;
1036 getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
1037 for (auto V : Objs) {
1038 SmallVector<SUnit *, 4> &SUs = PendingLoads[V];
1039 SUs.push_back(&SU);
1040 }
1041 } else if (MI.mayStore()) {
1042 SmallVector<Value *, 4> Objs;
1043 getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
1044 for (auto V : Objs) {
1045 MapVector<Value *, SmallVector<SUnit *, 4>>::iterator I =
1046 PendingLoads.find(V);
1047 if (I == PendingLoads.end())
1048 continue;
1049 for (auto Load : I->second) {
1050 if (isSuccOrder(Load, &SU))
1051 continue;
1052 MachineInstr &LdMI = *Load->getInstr();
1053 // First, perform the cheaper check that compares the base register.
1054 // If they are the same and the load offset is less than the store
1055 // offset, then mark the dependence as loop carried potentially.
1056 unsigned BaseReg1, BaseReg2;
1057 int64_t Offset1, Offset2;
1058 if (!TII->getMemOpBaseRegImmOfs(LdMI, BaseReg1, Offset1, TRI) ||
1059 !TII->getMemOpBaseRegImmOfs(MI, BaseReg2, Offset2, TRI)) {
1060 SU.addPred(SDep(Load, SDep::Barrier));
1061 continue;
1062 }
1063 if (BaseReg1 == BaseReg2 && (int)Offset1 < (int)Offset2) {
1064 assert(TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) &&((TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) &&
"What happened to the chain edge?") ? static_cast<void>
(0) : __assert_fail ("TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) && \"What happened to the chain edge?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 1065, __PRETTY_FUNCTION__))
1065 "What happened to the chain edge?")((TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) &&
"What happened to the chain edge?") ? static_cast<void>
(0) : __assert_fail ("TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) && \"What happened to the chain edge?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 1065, __PRETTY_FUNCTION__))
;
1066 SU.addPred(SDep(Load, SDep::Barrier));
1067 continue;
1068 }
1069 // Second, the more expensive check that uses alias analysis on the
1070 // base registers. If they alias, and the load offset is less than
1071 // the store offset, the mark the dependence as loop carried.
1072 if (!AA) {
1073 SU.addPred(SDep(Load, SDep::Barrier));
1074 continue;
1075 }
1076 MachineMemOperand *MMO1 = *LdMI.memoperands_begin();
1077 MachineMemOperand *MMO2 = *MI.memoperands_begin();
1078 if (!MMO1->getValue() || !MMO2->getValue()) {
1079 SU.addPred(SDep(Load, SDep::Barrier));
1080 continue;
1081 }
1082 if (MMO1->getValue() == MMO2->getValue() &&
1083 MMO1->getOffset() <= MMO2->getOffset()) {
1084 SU.addPred(SDep(Load, SDep::Barrier));
1085 continue;
1086 }
1087 AliasResult AAResult = AA->alias(
1088 MemoryLocation(MMO1->getValue(), MemoryLocation::UnknownSize,
1089 MMO1->getAAInfo()),
1090 MemoryLocation(MMO2->getValue(), MemoryLocation::UnknownSize,
1091 MMO2->getAAInfo()));
1092
1093 if (AAResult != NoAlias)
1094 SU.addPred(SDep(Load, SDep::Barrier));
1095 }
1096 }
1097 }
1098 }
1099}
1100
1101/// Update the phi dependences to the DAG because ScheduleDAGInstrs no longer
1102/// processes dependences for PHIs. This function adds true dependences
1103/// from a PHI to a use, and a loop carried dependence from the use to the
1104/// PHI. The loop carried dependence is represented as an anti dependence
1105/// edge. This function also removes chain dependences between unrelated
1106/// PHIs.
1107void SwingSchedulerDAG::updatePhiDependences() {
1108 SmallVector<SDep, 4> RemoveDeps;
1109 const TargetSubtargetInfo &ST = MF.getSubtarget<TargetSubtargetInfo>();
1110
1111 // Iterate over each DAG node.
1112 for (SUnit &I : SUnits) {
1113 RemoveDeps.clear();
1114 // Set to true if the instruction has an operand defined by a Phi.
1115 unsigned HasPhiUse = 0;
1116 unsigned HasPhiDef = 0;
1117 MachineInstr *MI = I.getInstr();
1118 // Iterate over each operand, and we process the definitions.
1119 for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
1120 MOE = MI->operands_end();
1121 MOI != MOE; ++MOI) {
1122 if (!MOI->isReg())
1123 continue;
1124 unsigned Reg = MOI->getReg();
1125 if (MOI->isDef()) {
1126 // If the register is used by a Phi, then create an anti dependence.
1127 for (MachineRegisterInfo::use_instr_iterator
1128 UI = MRI.use_instr_begin(Reg),
1129 UE = MRI.use_instr_end();
1130 UI != UE; ++UI) {
1131 MachineInstr *UseMI = &*UI;
1132 SUnit *SU = getSUnit(UseMI);
1133 if (SU != nullptr && UseMI->isPHI()) {
1134 if (!MI->isPHI()) {
1135 SDep Dep(SU, SDep::Anti, Reg);
1136 I.addPred(Dep);
1137 } else {
1138 HasPhiDef = Reg;
1139 // Add a chain edge to a dependent Phi that isn't an existing
1140 // predecessor.
1141 if (SU->NodeNum < I.NodeNum && !I.isPred(SU))
1142 I.addPred(SDep(SU, SDep::Barrier));
1143 }
1144 }
1145 }
1146 } else if (MOI->isUse()) {
1147 // If the register is defined by a Phi, then create a true dependence.
1148 MachineInstr *DefMI = MRI.getUniqueVRegDef(Reg);
1149 if (DefMI == nullptr)
1150 continue;
1151 SUnit *SU = getSUnit(DefMI);
1152 if (SU != nullptr && DefMI->isPHI()) {
1153 if (!MI->isPHI()) {
1154 SDep Dep(SU, SDep::Data, Reg);
1155 Dep.setLatency(0);
1156 ST.adjustSchedDependency(SU, &I, Dep);
1157 I.addPred(Dep);
1158 } else {
1159 HasPhiUse = Reg;
1160 // Add a chain edge to a dependent Phi that isn't an existing
1161 // predecessor.
1162 if (SU->NodeNum < I.NodeNum && !I.isPred(SU))
1163 I.addPred(SDep(SU, SDep::Barrier));
1164 }
1165 }
1166 }
1167 }
1168 // Remove order dependences from an unrelated Phi.
1169 if (!SwpPruneDeps)
1170 continue;
1171 for (auto &PI : I.Preds) {
1172 MachineInstr *PMI = PI.getSUnit()->getInstr();
1173 if (PMI->isPHI() && PI.getKind() == SDep::Order) {
1174 if (I.getInstr()->isPHI()) {
1175 if (PMI->getOperand(0).getReg() == HasPhiUse)
1176 continue;
1177 if (getLoopPhiReg(*PMI, PMI->getParent()) == HasPhiDef)
1178 continue;
1179 }
1180 RemoveDeps.push_back(PI);
1181 }
1182 }
1183 for (int i = 0, e = RemoveDeps.size(); i != e; ++i)
1184 I.removePred(RemoveDeps[i]);
1185 }
1186}
1187
1188/// Iterate over each DAG node and see if we can change any dependences
1189/// in order to reduce the recurrence MII.
1190void SwingSchedulerDAG::changeDependences() {
1191 // See if an instruction can use a value from the previous iteration.
1192 // If so, we update the base and offset of the instruction and change
1193 // the dependences.
1194 for (SUnit &I : SUnits) {
1195 unsigned BasePos = 0, OffsetPos = 0, NewBase = 0;
1196 int64_t NewOffset = 0;
1197 if (!canUseLastOffsetValue(I.getInstr(), BasePos, OffsetPos, NewBase,
1198 NewOffset))
1199 continue;
1200
1201 // Get the MI and SUnit for the instruction that defines the original base.
1202 unsigned OrigBase = I.getInstr()->getOperand(BasePos).getReg();
1203 MachineInstr *DefMI = MRI.getUniqueVRegDef(OrigBase);
1204 if (!DefMI)
1205 continue;
1206 SUnit *DefSU = getSUnit(DefMI);
1207 if (!DefSU)
1208 continue;
1209 // Get the MI and SUnit for the instruction that defins the new base.
1210 MachineInstr *LastMI = MRI.getUniqueVRegDef(NewBase);
1211 if (!LastMI)
1212 continue;
1213 SUnit *LastSU = getSUnit(LastMI);
1214 if (!LastSU)
1215 continue;
1216
1217 if (Topo.IsReachable(&I, LastSU))
1218 continue;
1219
1220 // Remove the dependence. The value now depends on a prior iteration.
1221 SmallVector<SDep, 4> Deps;
1222 for (SUnit::pred_iterator P = I.Preds.begin(), E = I.Preds.end(); P != E;
1223 ++P)
1224 if (P->getSUnit() == DefSU)
1225 Deps.push_back(*P);
1226 for (int i = 0, e = Deps.size(); i != e; i++) {
1227 Topo.RemovePred(&I, Deps[i].getSUnit());
1228 I.removePred(Deps[i]);
1229 }
1230 // Remove the chain dependence between the instructions.
1231 Deps.clear();
1232 for (auto &P : LastSU->Preds)
1233 if (P.getSUnit() == &I && P.getKind() == SDep::Order)
1234 Deps.push_back(P);
1235 for (int i = 0, e = Deps.size(); i != e; i++) {
1236 Topo.RemovePred(LastSU, Deps[i].getSUnit());
1237 LastSU->removePred(Deps[i]);
1238 }
1239
1240 // Add a dependence between the new instruction and the instruction
1241 // that defines the new base.
1242 SDep Dep(&I, SDep::Anti, NewBase);
1243 LastSU->addPred(Dep);
1244
1245 // Remember the base and offset information so that we can update the
1246 // instruction during code generation.
1247 InstrChanges[&I] = std::make_pair(NewBase, NewOffset);
1248 }
1249}
1250
1251namespace {
1252
1253// FuncUnitSorter - Comparison operator used to sort instructions by
1254// the number of functional unit choices.
1255struct FuncUnitSorter {
1256 const InstrItineraryData *InstrItins;
1257 DenseMap<unsigned, unsigned> Resources;
1258
1259 // Compute the number of functional unit alternatives needed
1260 // at each stage, and take the minimum value. We prioritize the
1261 // instructions by the least number of choices first.
1262 unsigned minFuncUnits(const MachineInstr *Inst, unsigned &F) const {
1263 unsigned schedClass = Inst->getDesc().getSchedClass();
1264 unsigned min = UINT_MAX(2147483647 *2U +1U);
1265 for (const InstrStage *IS = InstrItins->beginStage(schedClass),
1266 *IE = InstrItins->endStage(schedClass);
1267 IS != IE; ++IS) {
1268 unsigned funcUnits = IS->getUnits();
1269 unsigned numAlternatives = countPopulation(funcUnits);
1270 if (numAlternatives < min) {
1271 min = numAlternatives;
1272 F = funcUnits;
1273 }
1274 }
1275 return min;
1276 }
1277
1278 // Compute the critical resources needed by the instruction. This
1279 // function records the functional units needed by instructions that
1280 // must use only one functional unit. We use this as a tie breaker
1281 // for computing the resource MII. The instrutions that require
1282 // the same, highly used, functional unit have high priority.
1283 void calcCriticalResources(MachineInstr &MI) {
1284 unsigned SchedClass = MI.getDesc().getSchedClass();
1285 for (const InstrStage *IS = InstrItins->beginStage(SchedClass),
1286 *IE = InstrItins->endStage(SchedClass);
1287 IS != IE; ++IS) {
1288 unsigned FuncUnits = IS->getUnits();
1289 if (countPopulation(FuncUnits) == 1)
1290 Resources[FuncUnits]++;
1291 }
1292 }
1293
1294 FuncUnitSorter(const InstrItineraryData *IID) : InstrItins(IID) {}
1295 /// Return true if IS1 has less priority than IS2.
1296 bool operator()(const MachineInstr *IS1, const MachineInstr *IS2) const {
1297 unsigned F1 = 0, F2 = 0;
1298 unsigned MFUs1 = minFuncUnits(IS1, F1);
1299 unsigned MFUs2 = minFuncUnits(IS2, F2);
1300 if (MFUs1 == 1 && MFUs2 == 1)
1301 return Resources.lookup(F1) < Resources.lookup(F2);
1302 return MFUs1 > MFUs2;
1303 }
1304};
1305
1306} // end anonymous namespace
1307
1308/// Calculate the resource constrained minimum initiation interval for the
1309/// specified loop. We use the DFA to model the resources needed for
1310/// each instruction, and we ignore dependences. A different DFA is created
1311/// for each cycle that is required. When adding a new instruction, we attempt
1312/// to add it to each existing DFA, until a legal space is found. If the
1313/// instruction cannot be reserved in an existing DFA, we create a new one.
1314unsigned SwingSchedulerDAG::calculateResMII() {
1315 SmallVector<DFAPacketizer *, 8> Resources;
1316 MachineBasicBlock *MBB = Loop.getHeader();
1317 Resources.push_back(TII->CreateTargetScheduleState(MF.getSubtarget()));
1318
1319 // Sort the instructions by the number of available choices for scheduling,
1320 // least to most. Use the number of critical resources as the tie breaker.
1321 FuncUnitSorter FUS =
1322 FuncUnitSorter(MF.getSubtarget().getInstrItineraryData());
1323 for (MachineBasicBlock::iterator I = MBB->getFirstNonPHI(),
1324 E = MBB->getFirstTerminator();
1325 I != E; ++I)
1326 FUS.calcCriticalResources(*I);
1327 PriorityQueue<MachineInstr *, std::vector<MachineInstr *>, FuncUnitSorter>
1328 FuncUnitOrder(FUS);
1329
1330 for (MachineBasicBlock::iterator I = MBB->getFirstNonPHI(),
1331 E = MBB->getFirstTerminator();
1332 I != E; ++I)
1333 FuncUnitOrder.push(&*I);
1334
1335 while (!FuncUnitOrder.empty()) {
1336 MachineInstr *MI = FuncUnitOrder.top();
1337 FuncUnitOrder.pop();
1338 if (TII->isZeroCost(MI->getOpcode()))
1339 continue;
1340 // Attempt to reserve the instruction in an existing DFA. At least one
1341 // DFA is needed for each cycle.
1342 unsigned NumCycles = getSUnit(MI)->Latency;
1343 unsigned ReservedCycles = 0;
1344 SmallVectorImpl<DFAPacketizer *>::iterator RI = Resources.begin();
1345 SmallVectorImpl<DFAPacketizer *>::iterator RE = Resources.end();
1346 for (unsigned C = 0; C < NumCycles; ++C)
1347 while (RI != RE) {
1348 if ((*RI++)->canReserveResources(*MI)) {
1349 ++ReservedCycles;
1350 break;
1351 }
1352 }
1353 // Start reserving resources using existing DFAs.
1354 for (unsigned C = 0; C < ReservedCycles; ++C) {
1355 --RI;
1356 (*RI)->reserveResources(*MI);
1357 }
1358 // Add new DFAs, if needed, to reserve resources.
1359 for (unsigned C = ReservedCycles; C < NumCycles; ++C) {
1360 DFAPacketizer *NewResource =
1361 TII->CreateTargetScheduleState(MF.getSubtarget());
1362 assert(NewResource->canReserveResources(*MI) && "Reserve error.")((NewResource->canReserveResources(*MI) && "Reserve error."
) ? static_cast<void> (0) : __assert_fail ("NewResource->canReserveResources(*MI) && \"Reserve error.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 1362, __PRETTY_FUNCTION__))
;
1363 NewResource->reserveResources(*MI);
1364 Resources.push_back(NewResource);
1365 }
1366 }
1367 int Resmii = Resources.size();
1368 // Delete the memory for each of the DFAs that were created earlier.
1369 for (DFAPacketizer *RI : Resources) {
1370 DFAPacketizer *D = RI;
1371 delete D;
1372 }
1373 Resources.clear();
1374 return Resmii;
1375}
1376
1377/// Calculate the recurrence-constrainted minimum initiation interval.
1378/// Iterate over each circuit. Compute the delay(c) and distance(c)
1379/// for each circuit. The II needs to satisfy the inequality
1380/// delay(c) - II*distance(c) <= 0. For each circuit, choose the smallest
1381/// II that satistifies the inequality, and the RecMII is the maximum
1382/// of those values.
1383unsigned SwingSchedulerDAG::calculateRecMII(NodeSetType &NodeSets) {
1384 unsigned RecMII = 0;
1385
1386 for (NodeSet &Nodes : NodeSets) {
1387 if (Nodes.size() == 0)
1388 continue;
1389
1390 unsigned Delay = Nodes.size() - 1;
1391 unsigned Distance = 1;
1392
1393 // ii = ceil(delay / distance)
1394 unsigned CurMII = (Delay + Distance - 1) / Distance;
1395 Nodes.setRecMII(CurMII);
1396 if (CurMII > RecMII)
1397 RecMII = CurMII;
1398 }
1399
1400 return RecMII;
1401}
1402
1403/// Swap all the anti dependences in the DAG. That means it is no longer a DAG,
1404/// but we do this to find the circuits, and then change them back.
1405static void swapAntiDependences(std::vector<SUnit> &SUnits) {
1406 SmallVector<std::pair<SUnit *, SDep>, 8> DepsAdded;
1407 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1408 SUnit *SU = &SUnits[i];
1409 for (SUnit::pred_iterator IP = SU->Preds.begin(), EP = SU->Preds.end();
1410 IP != EP; ++IP) {
1411 if (IP->getKind() != SDep::Anti)
1412 continue;
1413 DepsAdded.push_back(std::make_pair(SU, *IP));
1414 }
1415 }
1416 for (SmallVector<std::pair<SUnit *, SDep>, 8>::iterator I = DepsAdded.begin(),
1417 E = DepsAdded.end();
1418 I != E; ++I) {
1419 // Remove this anti dependency and add one in the reverse direction.
1420 SUnit *SU = I->first;
1421 SDep &D = I->second;
1422 SUnit *TargetSU = D.getSUnit();
1423 unsigned Reg = D.getReg();
1424 unsigned Lat = D.getLatency();
1425 SU->removePred(D);
1426 SDep Dep(SU, SDep::Anti, Reg);
1427 Dep.setLatency(Lat);
1428 TargetSU->addPred(Dep);
1429 }
1430}
1431
1432/// Create the adjacency structure of the nodes in the graph.
1433void SwingSchedulerDAG::Circuits::createAdjacencyStructure(
1434 SwingSchedulerDAG *DAG) {
1435 BitVector Added(SUnits.size());
1436 for (int i = 0, e = SUnits.size(); i != e; ++i) {
1437 Added.reset();
1438 // Add any successor to the adjacency matrix and exclude duplicates.
1439 for (auto &SI : SUnits[i].Succs) {
1440 // Do not process a boundary node and a back-edge is processed only
1441 // if it goes to a Phi.
1442 if (SI.getSUnit()->isBoundaryNode() ||
1443 (SI.getKind() == SDep::Anti && !SI.getSUnit()->getInstr()->isPHI()))
1444 continue;
1445 int N = SI.getSUnit()->NodeNum;
1446 if (!Added.test(N)) {
1447 AdjK[i].push_back(N);
1448 Added.set(N);
1449 }
1450 }
1451 // A chain edge between a store and a load is treated as a back-edge in the
1452 // adjacency matrix.
1453 for (auto &PI : SUnits[i].Preds) {
1454 if (!SUnits[i].getInstr()->mayStore() ||
1455 !DAG->isLoopCarriedOrder(&SUnits[i], PI, false))
1456 continue;
1457 if (PI.getKind() == SDep::Order && PI.getSUnit()->getInstr()->mayLoad()) {
1458 int N = PI.getSUnit()->NodeNum;
1459 if (!Added.test(N)) {
1460 AdjK[i].push_back(N);
1461 Added.set(N);
1462 }
1463 }
1464 }
1465 }
1466}
1467
1468/// Identify an elementary circuit in the dependence graph starting at the
1469/// specified node.
1470bool SwingSchedulerDAG::Circuits::circuit(int V, int S, NodeSetType &NodeSets,
1471 bool HasBackedge) {
1472 SUnit *SV = &SUnits[V];
1473 bool F = false;
1474 Stack.insert(SV);
1475 Blocked.set(V);
1476
1477 for (auto W : AdjK[V]) {
1478 if (NumPaths > MaxPaths)
1479 break;
1480 if (W < S)
1481 continue;
1482 if (W == S) {
1483 if (!HasBackedge)
1484 NodeSets.push_back(NodeSet(Stack.begin(), Stack.end()));
1485 F = true;
1486 ++NumPaths;
1487 break;
1488 } else if (!Blocked.test(W)) {
1489 if (circuit(W, S, NodeSets, W < V ? true : HasBackedge))
1490 F = true;
1491 }
1492 }
1493
1494 if (F)
1495 unblock(V);
1496 else {
1497 for (auto W : AdjK[V]) {
1498 if (W < S)
1499 continue;
1500 if (B[W].count(SV) == 0)
1501 B[W].insert(SV);
1502 }
1503 }
1504 Stack.pop_back();
1505 return F;
1506}
1507
1508/// Unblock a node in the circuit finding algorithm.
1509void SwingSchedulerDAG::Circuits::unblock(int U) {
1510 Blocked.reset(U);
1511 SmallPtrSet<SUnit *, 4> &BU = B[U];
1512 while (!BU.empty()) {
1513 SmallPtrSet<SUnit *, 4>::iterator SI = BU.begin();
1514 assert(SI != BU.end() && "Invalid B set.")((SI != BU.end() && "Invalid B set.") ? static_cast<
void> (0) : __assert_fail ("SI != BU.end() && \"Invalid B set.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 1514, __PRETTY_FUNCTION__))
;
1515 SUnit *W = *SI;
1516 BU.erase(W);
1517 if (Blocked.test(W->NodeNum))
1518 unblock(W->NodeNum);
1519 }
1520}
1521
1522/// Identify all the elementary circuits in the dependence graph using
1523/// Johnson's circuit algorithm.
1524void SwingSchedulerDAG::findCircuits(NodeSetType &NodeSets) {
1525 // Swap all the anti dependences in the DAG. That means it is no longer a DAG,
1526 // but we do this to find the circuits, and then change them back.
1527 swapAntiDependences(SUnits);
1528
1529 Circuits Cir(SUnits);
1530 // Create the adjacency structure.
1531 Cir.createAdjacencyStructure(this);
1532 for (int i = 0, e = SUnits.size(); i != e; ++i) {
1533 Cir.reset();
1534 Cir.circuit(i, i, NodeSets);
1535 }
1536
1537 // Change the dependences back so that we've created a DAG again.
1538 swapAntiDependences(SUnits);
1539}
1540
1541/// Return true for DAG nodes that we ignore when computing the cost functions.
1542/// We ignore the back-edge recurrence in order to avoid unbounded recurison
1543/// in the calculation of the ASAP, ALAP, etc functions.
1544static bool ignoreDependence(const SDep &D, bool isPred) {
1545 if (D.isArtificial())
1546 return true;
1547 return D.getKind() == SDep::Anti && isPred;
1548}
1549
1550/// Compute several functions need to order the nodes for scheduling.
1551/// ASAP - Earliest time to schedule a node.
1552/// ALAP - Latest time to schedule a node.
1553/// MOV - Mobility function, difference between ALAP and ASAP.
1554/// D - Depth of each node.
1555/// H - Height of each node.
1556void SwingSchedulerDAG::computeNodeFunctions(NodeSetType &NodeSets) {
1557
1558 ScheduleInfo.resize(SUnits.size());
1559
1560 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1561 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1562 E = Topo.end();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1563 I != E; ++I) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1564 SUnit *SU = &SUnits[*I];do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1565 SU->dump(this);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1566 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
1567 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (ScheduleDAGTopologicalSort::const_iterator
I = Topo.begin(), E = Topo.end(); I != E; ++I) { SUnit *SU =
&SUnits[*I]; SU->dump(this); } }; } } while (false)
;
1568
1569 int maxASAP = 0;
1570 // Compute ASAP.
1571 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),
1572 E = Topo.end();
1573 I != E; ++I) {
1574 int asap = 0;
1575 SUnit *SU = &SUnits[*I];
1576 for (SUnit::const_pred_iterator IP = SU->Preds.begin(),
1577 EP = SU->Preds.end();
1578 IP != EP; ++IP) {
1579 if (ignoreDependence(*IP, true))
1580 continue;
1581 SUnit *pred = IP->getSUnit();
1582 asap = std::max(asap, (int)(getASAP(pred) + getLatency(SU, *IP) -
1583 getDistance(pred, SU, *IP) * MII));
1584 }
1585 maxASAP = std::max(maxASAP, asap);
1586 ScheduleInfo[*I].ASAP = asap;
1587 }
1588
1589 // Compute ALAP and MOV.
1590 for (ScheduleDAGTopologicalSort::const_reverse_iterator I = Topo.rbegin(),
1591 E = Topo.rend();
1592 I != E; ++I) {
1593 int alap = maxASAP;
1594 SUnit *SU = &SUnits[*I];
1595 for (SUnit::const_succ_iterator IS = SU->Succs.begin(),
1596 ES = SU->Succs.end();
1597 IS != ES; ++IS) {
1598 if (ignoreDependence(*IS, true))
1599 continue;
1600 SUnit *succ = IS->getSUnit();
1601 alap = std::min(alap, (int)(getALAP(succ) - getLatency(SU, *IS) +
1602 getDistance(SU, succ, *IS) * MII));
1603 }
1604
1605 ScheduleInfo[*I].ALAP = alap;
1606 }
1607
1608 // After computing the node functions, compute the summary for each node set.
1609 for (NodeSet &I : NodeSets)
1610 I.computeNodeSetInfo(this);
1611
1612 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1613 for (unsigned i = 0; i < SUnits.size(); i++) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1614 dbgs() << "\tNode " << i << ":\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1615 dbgs() << "\t ASAP = " << getASAP(&SUnits[i]) << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1616 dbgs() << "\t ALAP = " << getALAP(&SUnits[i]) << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1617 dbgs() << "\t MOV = " << getMOV(&SUnits[i]) << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1618 dbgs() << "\t D = " << getDepth(&SUnits[i]) << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1619 dbgs() << "\t H = " << getHeight(&SUnits[i]) << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1620 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
1621 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { for (unsigned i = 0; i < SUnits.size();
i++) { dbgs() << "\tNode " << i << ":\n"; dbgs
() << "\t ASAP = " << getASAP(&SUnits[i]) <<
"\n"; dbgs() << "\t ALAP = " << getALAP(&SUnits
[i]) << "\n"; dbgs() << "\t MOV = " << getMOV
(&SUnits[i]) << "\n"; dbgs() << "\t D = "
<< getDepth(&SUnits[i]) << "\n"; dbgs() <<
"\t H = " << getHeight(&SUnits[i]) << "\n"
; } }; } } while (false)
;
1622}
1623
1624/// Compute the Pred_L(O) set, as defined in the paper. The set is defined
1625/// as the predecessors of the elements of NodeOrder that are not also in
1626/// NodeOrder.
1627static bool pred_L(SetVector<SUnit *> &NodeOrder,
1628 SmallSetVector<SUnit *, 8> &Preds,
1629 const NodeSet *S = nullptr) {
1630 Preds.clear();
1631 for (SetVector<SUnit *>::iterator I = NodeOrder.begin(), E = NodeOrder.end();
1632 I != E; ++I) {
1633 for (SUnit::pred_iterator PI = (*I)->Preds.begin(), PE = (*I)->Preds.end();
1634 PI != PE; ++PI) {
1635 if (S && S->count(PI->getSUnit()) == 0)
1636 continue;
1637 if (ignoreDependence(*PI, true))
1638 continue;
1639 if (NodeOrder.count(PI->getSUnit()) == 0)
1640 Preds.insert(PI->getSUnit());
1641 }
1642 // Back-edges are predecessors with an anti-dependence.
1643 for (SUnit::const_succ_iterator IS = (*I)->Succs.begin(),
1644 ES = (*I)->Succs.end();
1645 IS != ES; ++IS) {
1646 if (IS->getKind() != SDep::Anti)
1647 continue;
1648 if (S && S->count(IS->getSUnit()) == 0)
1649 continue;
1650 if (NodeOrder.count(IS->getSUnit()) == 0)
1651 Preds.insert(IS->getSUnit());
1652 }
1653 }
1654 return Preds.size() > 0;
1655}
1656
1657/// Compute the Succ_L(O) set, as defined in the paper. The set is defined
1658/// as the successors of the elements of NodeOrder that are not also in
1659/// NodeOrder.
1660static bool succ_L(SetVector<SUnit *> &NodeOrder,
1661 SmallSetVector<SUnit *, 8> &Succs,
1662 const NodeSet *S = nullptr) {
1663 Succs.clear();
1664 for (SetVector<SUnit *>::iterator I = NodeOrder.begin(), E = NodeOrder.end();
1665 I != E; ++I) {
1666 for (SUnit::succ_iterator SI = (*I)->Succs.begin(), SE = (*I)->Succs.end();
1667 SI != SE; ++SI) {
1668 if (S && S->count(SI->getSUnit()) == 0)
1669 continue;
1670 if (ignoreDependence(*SI, false))
1671 continue;
1672 if (NodeOrder.count(SI->getSUnit()) == 0)
1673 Succs.insert(SI->getSUnit());
1674 }
1675 for (SUnit::const_pred_iterator PI = (*I)->Preds.begin(),
1676 PE = (*I)->Preds.end();
1677 PI != PE; ++PI) {
1678 if (PI->getKind() != SDep::Anti)
1679 continue;
1680 if (S && S->count(PI->getSUnit()) == 0)
1681 continue;
1682 if (NodeOrder.count(PI->getSUnit()) == 0)
1683 Succs.insert(PI->getSUnit());
1684 }
1685 }
1686 return Succs.size() > 0;
1687}
1688
1689/// Return true if there is a path from the specified node to any of the nodes
1690/// in DestNodes. Keep track and return the nodes in any path.
1691static bool computePath(SUnit *Cur, SetVector<SUnit *> &Path,
1692 SetVector<SUnit *> &DestNodes,
1693 SetVector<SUnit *> &Exclude,
1694 SmallPtrSet<SUnit *, 8> &Visited) {
1695 if (Cur->isBoundaryNode())
1696 return false;
1697 if (Exclude.count(Cur) != 0)
1698 return false;
1699 if (DestNodes.count(Cur) != 0)
1700 return true;
1701 if (!Visited.insert(Cur).second)
1702 return Path.count(Cur) != 0;
1703 bool FoundPath = false;
1704 for (auto &SI : Cur->Succs)
1705 FoundPath |= computePath(SI.getSUnit(), Path, DestNodes, Exclude, Visited);
1706 for (auto &PI : Cur->Preds)
1707 if (PI.getKind() == SDep::Anti)
1708 FoundPath |=
1709 computePath(PI.getSUnit(), Path, DestNodes, Exclude, Visited);
1710 if (FoundPath)
1711 Path.insert(Cur);
1712 return FoundPath;
1713}
1714
1715/// Return true if Set1 is a subset of Set2.
1716template <class S1Ty, class S2Ty> static bool isSubset(S1Ty &Set1, S2Ty &Set2) {
1717 for (typename S1Ty::iterator I = Set1.begin(), E = Set1.end(); I != E; ++I)
1718 if (Set2.count(*I) == 0)
1719 return false;
1720 return true;
1721}
1722
1723/// Compute the live-out registers for the instructions in a node-set.
1724/// The live-out registers are those that are defined in the node-set,
1725/// but not used. Except for use operands of Phis.
1726static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker,
1727 NodeSet &NS) {
1728 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1729 MachineRegisterInfo &MRI = MF.getRegInfo();
1730 SmallVector<RegisterMaskPair, 8> LiveOutRegs;
1731 SmallSet<unsigned, 4> Uses;
1732 for (SUnit *SU : NS) {
1733 const MachineInstr *MI = SU->getInstr();
1734 if (MI->isPHI())
1735 continue;
1736 for (const MachineOperand &MO : MI->operands())
1737 if (MO.isReg() && MO.isUse()) {
1738 unsigned Reg = MO.getReg();
1739 if (TargetRegisterInfo::isVirtualRegister(Reg))
1740 Uses.insert(Reg);
1741 else if (MRI.isAllocatable(Reg))
1742 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
1743 Uses.insert(*Units);
1744 }
1745 }
1746 for (SUnit *SU : NS)
1747 for (const MachineOperand &MO : SU->getInstr()->operands())
1748 if (MO.isReg() && MO.isDef() && !MO.isDead()) {
1749 unsigned Reg = MO.getReg();
1750 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1751 if (!Uses.count(Reg))
1752 LiveOutRegs.push_back(RegisterMaskPair(Reg,
1753 LaneBitmask::getNone()));
1754 } else if (MRI.isAllocatable(Reg)) {
1755 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
1756 if (!Uses.count(*Units))
1757 LiveOutRegs.push_back(RegisterMaskPair(*Units,
1758 LaneBitmask::getNone()));
1759 }
1760 }
1761 RPTracker.addLiveRegs(LiveOutRegs);
1762}
1763
1764/// A heuristic to filter nodes in recurrent node-sets if the register
1765/// pressure of a set is too high.
1766void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) {
1767 for (auto &NS : NodeSets) {
1768 // Skip small node-sets since they won't cause register pressure problems.
1769 if (NS.size() <= 2)
1770 continue;
1771 IntervalPressure RecRegPressure;
1772 RegPressureTracker RecRPTracker(RecRegPressure);
1773 RecRPTracker.init(&MF, &RegClassInfo, &LIS, BB, BB->end(), false, true);
1774 computeLiveOuts(MF, RecRPTracker, NS);
1775 RecRPTracker.closeBottom();
1776
1777 std::vector<SUnit *> SUnits(NS.begin(), NS.end());
1778 std::sort(SUnits.begin(), SUnits.end(), [](const SUnit *A, const SUnit *B) {
1779 return A->NodeNum > B->NodeNum;
1780 });
1781
1782 for (auto &SU : SUnits) {
1783 // Since we're computing the register pressure for a subset of the
1784 // instructions in a block, we need to set the tracker for each
1785 // instruction in the node-set. The tracker is set to the instruction
1786 // just after the one we're interested in.
1787 MachineBasicBlock::const_iterator CurInstI = SU->getInstr();
1788 RecRPTracker.setPos(std::next(CurInstI));
1789
1790 RegPressureDelta RPDelta;
1791 ArrayRef<PressureChange> CriticalPSets;
1792 RecRPTracker.getMaxUpwardPressureDelta(SU->getInstr(), nullptr, RPDelta,
1793 CriticalPSets,
1794 RecRegPressure.MaxSetPressure);
1795 if (RPDelta.Excess.isValid()) {
1796 DEBUG(dbgs() << "Excess register pressure: SU(" << SU->NodeNum << ") "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Excess register pressure: SU("
<< SU->NodeNum << ") " << TRI->getRegPressureSetName
(RPDelta.Excess.getPSet()) << ":" << RPDelta.Excess
.getUnitInc(); } } while (false)
1797 << TRI->getRegPressureSetName(RPDelta.Excess.getPSet())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Excess register pressure: SU("
<< SU->NodeNum << ") " << TRI->getRegPressureSetName
(RPDelta.Excess.getPSet()) << ":" << RPDelta.Excess
.getUnitInc(); } } while (false)
1798 << ":" << RPDelta.Excess.getUnitInc())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Excess register pressure: SU("
<< SU->NodeNum << ") " << TRI->getRegPressureSetName
(RPDelta.Excess.getPSet()) << ":" << RPDelta.Excess
.getUnitInc(); } } while (false)
;
1799 NS.setExceedPressure(SU);
1800 break;
1801 }
1802 RecRPTracker.recede();
1803 }
1804 }
1805}
1806
1807/// A heuristic to colocate node sets that have the same set of
1808/// successors.
1809void SwingSchedulerDAG::colocateNodeSets(NodeSetType &NodeSets) {
1810 unsigned Colocate = 0;
1811 for (int i = 0, e = NodeSets.size(); i < e; ++i) {
1812 NodeSet &N1 = NodeSets[i];
1813 SmallSetVector<SUnit *, 8> S1;
1814 if (N1.empty() || !succ_L(N1, S1))
1815 continue;
1816 for (int j = i + 1; j < e; ++j) {
1817 NodeSet &N2 = NodeSets[j];
1818 if (N1.compareRecMII(N2) != 0)
1819 continue;
1820 SmallSetVector<SUnit *, 8> S2;
1821 if (N2.empty() || !succ_L(N2, S2))
1822 continue;
1823 if (isSubset(S1, S2) && S1.size() == S2.size()) {
1824 N1.setColocate(++Colocate);
1825 N2.setColocate(Colocate);
1826 break;
1827 }
1828 }
1829 }
1830}
1831
1832/// Check if the existing node-sets are profitable. If not, then ignore the
1833/// recurrent node-sets, and attempt to schedule all nodes together. This is
1834/// a heuristic. If the MII is large and there is a non-recurrent node with
1835/// a large depth compared to the MII, then it's best to try and schedule
1836/// all instruction together instead of starting with the recurrent node-sets.
1837void SwingSchedulerDAG::checkNodeSets(NodeSetType &NodeSets) {
1838 // Look for loops with a large MII.
1839 if (MII <= 20)
1840 return;
1841 // Check if the node-set contains only a simple add recurrence.
1842 for (auto &NS : NodeSets)
1843 if (NS.size() > 2)
1844 return;
1845 // If the depth of any instruction is significantly larger than the MII, then
1846 // ignore the recurrent node-sets and treat all instructions equally.
1847 for (auto &SU : SUnits)
1848 if (SU.getDepth() > MII * 1.5) {
1849 NodeSets.clear();
1850 DEBUG(dbgs() << "Clear recurrence node-sets\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Clear recurrence node-sets\n"
; } } while (false)
;
1851 return;
1852 }
1853}
1854
1855/// Add the nodes that do not belong to a recurrence set into groups
1856/// based upon connected componenets.
1857void SwingSchedulerDAG::groupRemainingNodes(NodeSetType &NodeSets) {
1858 SetVector<SUnit *> NodesAdded;
1859 SmallPtrSet<SUnit *, 8> Visited;
1860 // Add the nodes that are on a path between the previous node sets and
1861 // the current node set.
1862 for (NodeSet &I : NodeSets) {
1863 SmallSetVector<SUnit *, 8> N;
1864 // Add the nodes from the current node set to the previous node set.
1865 if (succ_L(I, N)) {
1866 SetVector<SUnit *> Path;
1867 for (SUnit *NI : N) {
1868 Visited.clear();
1869 computePath(NI, Path, NodesAdded, I, Visited);
1870 }
1871 if (Path.size() > 0)
1872 I.insert(Path.begin(), Path.end());
1873 }
1874 // Add the nodes from the previous node set to the current node set.
1875 N.clear();
1876 if (succ_L(NodesAdded, N)) {
1877 SetVector<SUnit *> Path;
1878 for (SUnit *NI : N) {
1879 Visited.clear();
1880 computePath(NI, Path, I, NodesAdded, Visited);
1881 }
1882 if (Path.size() > 0)
1883 I.insert(Path.begin(), Path.end());
1884 }
1885 NodesAdded.insert(I.begin(), I.end());
1886 }
1887
1888 // Create a new node set with the connected nodes of any successor of a node
1889 // in a recurrent set.
1890 NodeSet NewSet;
1891 SmallSetVector<SUnit *, 8> N;
1892 if (succ_L(NodesAdded, N))
1893 for (SUnit *I : N)
1894 addConnectedNodes(I, NewSet, NodesAdded);
1895 if (NewSet.size() > 0)
1896 NodeSets.push_back(NewSet);
1897
1898 // Create a new node set with the connected nodes of any predecessor of a node
1899 // in a recurrent set.
1900 NewSet.clear();
1901 if (pred_L(NodesAdded, N))
1902 for (SUnit *I : N)
1903 addConnectedNodes(I, NewSet, NodesAdded);
1904 if (NewSet.size() > 0)
1905 NodeSets.push_back(NewSet);
1906
1907 // Create new nodes sets with the connected nodes any any remaining node that
1908 // has no predecessor.
1909 for (unsigned i = 0; i < SUnits.size(); ++i) {
1910 SUnit *SU = &SUnits[i];
1911 if (NodesAdded.count(SU) == 0) {
1912 NewSet.clear();
1913 addConnectedNodes(SU, NewSet, NodesAdded);
1914 if (NewSet.size() > 0)
1915 NodeSets.push_back(NewSet);
1916 }
1917 }
1918}
1919
1920/// Add the node to the set, and add all is its connected nodes to the set.
1921void SwingSchedulerDAG::addConnectedNodes(SUnit *SU, NodeSet &NewSet,
1922 SetVector<SUnit *> &NodesAdded) {
1923 NewSet.insert(SU);
1924 NodesAdded.insert(SU);
1925 for (auto &SI : SU->Succs) {
1926 SUnit *Successor = SI.getSUnit();
1927 if (!SI.isArtificial() && NodesAdded.count(Successor) == 0)
1928 addConnectedNodes(Successor, NewSet, NodesAdded);
1929 }
1930 for (auto &PI : SU->Preds) {
1931 SUnit *Predecessor = PI.getSUnit();
1932 if (!PI.isArtificial() && NodesAdded.count(Predecessor) == 0)
1933 addConnectedNodes(Predecessor, NewSet, NodesAdded);
1934 }
1935}
1936
1937/// Return true if Set1 contains elements in Set2. The elements in common
1938/// are returned in a different container.
1939static bool isIntersect(SmallSetVector<SUnit *, 8> &Set1, const NodeSet &Set2,
1940 SmallSetVector<SUnit *, 8> &Result) {
1941 Result.clear();
1942 for (unsigned i = 0, e = Set1.size(); i != e; ++i) {
1943 SUnit *SU = Set1[i];
1944 if (Set2.count(SU) != 0)
1945 Result.insert(SU);
1946 }
1947 return !Result.empty();
1948}
1949
1950/// Merge the recurrence node sets that have the same initial node.
1951void SwingSchedulerDAG::fuseRecs(NodeSetType &NodeSets) {
1952 for (NodeSetType::iterator I = NodeSets.begin(), E = NodeSets.end(); I != E;
1953 ++I) {
1954 NodeSet &NI = *I;
1955 for (NodeSetType::iterator J = I + 1; J != E;) {
1956 NodeSet &NJ = *J;
1957 if (NI.getNode(0)->NodeNum == NJ.getNode(0)->NodeNum) {
1958 if (NJ.compareRecMII(NI) > 0)
1959 NI.setRecMII(NJ.getRecMII());
1960 for (NodeSet::iterator NII = J->begin(), ENI = J->end(); NII != ENI;
1961 ++NII)
1962 I->insert(*NII);
1963 NodeSets.erase(J);
1964 E = NodeSets.end();
1965 } else {
1966 ++J;
1967 }
1968 }
1969 }
1970}
1971
1972/// Remove nodes that have been scheduled in previous NodeSets.
1973void SwingSchedulerDAG::removeDuplicateNodes(NodeSetType &NodeSets) {
1974 for (NodeSetType::iterator I = NodeSets.begin(), E = NodeSets.end(); I != E;
1975 ++I)
1976 for (NodeSetType::iterator J = I + 1; J != E;) {
1977 J->remove_if([&](SUnit *SUJ) { return I->count(SUJ); });
1978
1979 if (J->size() == 0) {
1980 NodeSets.erase(J);
1981 E = NodeSets.end();
1982 } else {
1983 ++J;
1984 }
1985 }
1986}
1987
1988/// Return true if Inst1 defines a value that is used in Inst2.
1989static bool hasDataDependence(SUnit *Inst1, SUnit *Inst2) {
1990 for (auto &SI : Inst1->Succs)
1991 if (SI.getSUnit() == Inst2 && SI.getKind() == SDep::Data)
1992 return true;
1993 return false;
1994}
1995
1996/// Compute an ordered list of the dependence graph nodes, which
1997/// indicates the order that the nodes will be scheduled. This is a
1998/// two-level algorithm. First, a partial order is created, which
1999/// consists of a list of sets ordered from highest to lowest priority.
2000void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
2001 SmallSetVector<SUnit *, 8> R;
2002 NodeOrder.clear();
2003
2004 for (auto &Nodes : NodeSets) {
2005 DEBUG(dbgs() << "NodeSet size " << Nodes.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "NodeSet size " << Nodes
.size() << "\n"; } } while (false)
;
2006 OrderKind Order;
2007 SmallSetVector<SUnit *, 8> N;
2008 if (pred_L(NodeOrder, N) && isSubset(N, Nodes)) {
2009 R.insert(N.begin(), N.end());
2010 Order = BottomUp;
2011 DEBUG(dbgs() << " Bottom up (preds) ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << " Bottom up (preds) "; } } while
(false)
;
2012 } else if (succ_L(NodeOrder, N) && isSubset(N, Nodes)) {
2013 R.insert(N.begin(), N.end());
2014 Order = TopDown;
2015 DEBUG(dbgs() << " Top down (succs) ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << " Top down (succs) "; } } while
(false)
;
2016 } else if (isIntersect(N, Nodes, R)) {
2017 // If some of the successors are in the existing node-set, then use the
2018 // top-down ordering.
2019 Order = TopDown;
2020 DEBUG(dbgs() << " Top down (intersect) ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << " Top down (intersect) "; }
} while (false)
;
2021 } else if (NodeSets.size() == 1) {
2022 for (auto &N : Nodes)
2023 if (N->Succs.size() == 0)
2024 R.insert(N);
2025 Order = BottomUp;
2026 DEBUG(dbgs() << " Bottom up (all) ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << " Bottom up (all) "; } } while
(false)
;
2027 } else {
2028 // Find the node with the highest ASAP.
2029 SUnit *maxASAP = nullptr;
2030 for (SUnit *SU : Nodes) {
2031 if (maxASAP == nullptr || getASAP(SU) >= getASAP(maxASAP))
2032 maxASAP = SU;
2033 }
2034 R.insert(maxASAP);
2035 Order = BottomUp;
2036 DEBUG(dbgs() << " Bottom up (default) ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << " Bottom up (default) "; } }
while (false)
;
2037 }
2038
2039 while (!R.empty()) {
2040 if (Order == TopDown) {
2041 // Choose the node with the maximum height. If more than one, choose
2042 // the node with the lowest MOV. If still more than one, check if there
2043 // is a dependence between the instructions.
2044 while (!R.empty()) {
2045 SUnit *maxHeight = nullptr;
2046 for (SUnit *I : R) {
2047 if (maxHeight == nullptr || getHeight(I) > getHeight(maxHeight))
2048 maxHeight = I;
2049 else if (getHeight(I) == getHeight(maxHeight) &&
2050 getMOV(I) < getMOV(maxHeight) &&
2051 !hasDataDependence(maxHeight, I))
2052 maxHeight = I;
2053 else if (hasDataDependence(I, maxHeight))
2054 maxHeight = I;
2055 }
2056 NodeOrder.insert(maxHeight);
2057 DEBUG(dbgs() << maxHeight->NodeNum << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << maxHeight->NodeNum <<
" "; } } while (false)
;
2058 R.remove(maxHeight);
2059 for (const auto &I : maxHeight->Succs) {
2060 if (Nodes.count(I.getSUnit()) == 0)
2061 continue;
2062 if (NodeOrder.count(I.getSUnit()) != 0)
2063 continue;
2064 if (ignoreDependence(I, false))
2065 continue;
2066 R.insert(I.getSUnit());
2067 }
2068 // Back-edges are predecessors with an anti-dependence.
2069 for (const auto &I : maxHeight->Preds) {
2070 if (I.getKind() != SDep::Anti)
2071 continue;
2072 if (Nodes.count(I.getSUnit()) == 0)
2073 continue;
2074 if (NodeOrder.count(I.getSUnit()) != 0)
2075 continue;
2076 R.insert(I.getSUnit());
2077 }
2078 }
2079 Order = BottomUp;
2080 DEBUG(dbgs() << "\n Switching order to bottom up ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "\n Switching order to bottom up "
; } } while (false)
;
2081 SmallSetVector<SUnit *, 8> N;
2082 if (pred_L(NodeOrder, N, &Nodes))
2083 R.insert(N.begin(), N.end());
2084 } else {
2085 // Choose the node with the maximum depth. If more than one, choose
2086 // the node with the lowest MOV. If there is still more than one, check
2087 // for a dependence between the instructions.
2088 while (!R.empty()) {
2089 SUnit *maxDepth = nullptr;
2090 for (SUnit *I : R) {
2091 if (maxDepth == nullptr || getDepth(I) > getDepth(maxDepth))
2092 maxDepth = I;
2093 else if (getDepth(I) == getDepth(maxDepth) &&
2094 getMOV(I) < getMOV(maxDepth) &&
2095 !hasDataDependence(I, maxDepth))
2096 maxDepth = I;
2097 else if (hasDataDependence(maxDepth, I))
2098 maxDepth = I;
2099 }
2100 NodeOrder.insert(maxDepth);
2101 DEBUG(dbgs() << maxDepth->NodeNum << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << maxDepth->NodeNum <<
" "; } } while (false)
;
2102 R.remove(maxDepth);
2103 if (Nodes.isExceedSU(maxDepth)) {
2104 Order = TopDown;
2105 R.clear();
2106 R.insert(Nodes.getNode(0));
2107 break;
2108 }
2109 for (const auto &I : maxDepth->Preds) {
2110 if (Nodes.count(I.getSUnit()) == 0)
2111 continue;
2112 if (NodeOrder.count(I.getSUnit()) != 0)
2113 continue;
2114 if (I.getKind() == SDep::Anti)
2115 continue;
2116 R.insert(I.getSUnit());
2117 }
2118 // Back-edges are predecessors with an anti-dependence.
2119 for (const auto &I : maxDepth->Succs) {
2120 if (I.getKind() != SDep::Anti)
2121 continue;
2122 if (Nodes.count(I.getSUnit()) == 0)
2123 continue;
2124 if (NodeOrder.count(I.getSUnit()) != 0)
2125 continue;
2126 R.insert(I.getSUnit());
2127 }
2128 }
2129 Order = TopDown;
2130 DEBUG(dbgs() << "\n Switching order to top down ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "\n Switching order to top down "
; } } while (false)
;
2131 SmallSetVector<SUnit *, 8> N;
2132 if (succ_L(NodeOrder, N, &Nodes))
2133 R.insert(N.begin(), N.end());
2134 }
2135 }
2136 DEBUG(dbgs() << "\nDone with Nodeset\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "\nDone with Nodeset\n"; } }
while (false)
;
2137 }
2138
2139 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
2140 dbgs() << "Node order: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
2141 for (SUnit *I : NodeOrder)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
2142 dbgs() << " " << I->NodeNum << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
2143 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
2144 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Node order: "; for (SUnit
*I : NodeOrder) dbgs() << " " << I->NodeNum <<
" "; dbgs() << "\n"; }; } } while (false)
;
2145}
2146
2147/// Process the nodes in the computed order and create the pipelined schedule
2148/// of the instructions, if possible. Return true if a schedule is found.
2149bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
2150
2151 if (NodeOrder.size() == 0)
2152 return false;
2153
2154 bool scheduleFound = false;
2155 // Keep increasing II until a valid schedule is found.
2156 for (unsigned II = MII; II < MII + 10 && !scheduleFound; ++II) {
2157 Schedule.reset();
2158 Schedule.setInitiationInterval(II);
2159 DEBUG(dbgs() << "Try to schedule with " << II << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Try to schedule with " <<
II << "\n"; } } while (false)
;
2160
2161 SetVector<SUnit *>::iterator NI = NodeOrder.begin();
2162 SetVector<SUnit *>::iterator NE = NodeOrder.end();
2163 do {
2164 SUnit *SU = *NI;
2165
2166 // Compute the schedule time for the instruction, which is based
2167 // upon the scheduled time for any predecessors/successors.
2168 int EarlyStart = INT_MIN(-2147483647 -1);
2169 int LateStart = INT_MAX2147483647;
2170 // These values are set when the size of the schedule window is limited
2171 // due to chain dependences.
2172 int SchedEnd = INT_MAX2147483647;
2173 int SchedStart = INT_MIN(-2147483647 -1);
2174 Schedule.computeStart(SU, &EarlyStart, &LateStart, &SchedEnd, &SchedStart,
2175 II, this);
2176 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Inst (" << SU->NodeNum
<< ") "; SU->getInstr()->dump(); dbgs() <<
"\n"; }; } } while (false)
2177 dbgs() << "Inst (" << SU->NodeNum << ") ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Inst (" << SU->NodeNum
<< ") "; SU->getInstr()->dump(); dbgs() <<
"\n"; }; } } while (false)
2178 SU->getInstr()->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Inst (" << SU->NodeNum
<< ") "; SU->getInstr()->dump(); dbgs() <<
"\n"; }; } } while (false)
2179 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Inst (" << SU->NodeNum
<< ") "; SU->getInstr()->dump(); dbgs() <<
"\n"; }; } } while (false)
2180 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "Inst (" << SU->NodeNum
<< ") "; SU->getInstr()->dump(); dbgs() <<
"\n"; }; } } while (false)
;
2181 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tes: " << EarlyStart
<< " ls: " << LateStart << " me: " <<
SchedEnd << " ms: " << SchedStart << "\n";
}; } } while (false)
2182 dbgs() << "\tes: " << EarlyStart << " ls: " << LateStartdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tes: " << EarlyStart
<< " ls: " << LateStart << " me: " <<
SchedEnd << " ms: " << SchedStart << "\n";
}; } } while (false)
2183 << " me: " << SchedEnd << " ms: " << SchedStart << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tes: " << EarlyStart
<< " ls: " << LateStart << " me: " <<
SchedEnd << " ms: " << SchedStart << "\n";
}; } } while (false)
2184 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tes: " << EarlyStart
<< " ls: " << LateStart << " me: " <<
SchedEnd << " ms: " << SchedStart << "\n";
}; } } while (false)
;
2185
2186 if (EarlyStart > LateStart || SchedEnd < EarlyStart ||
2187 SchedStart > LateStart)
2188 scheduleFound = false;
2189 else if (EarlyStart != INT_MIN(-2147483647 -1) && LateStart == INT_MAX2147483647) {
2190 SchedEnd = std::min(SchedEnd, EarlyStart + (int)II - 1);
2191 scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II);
2192 } else if (EarlyStart == INT_MIN(-2147483647 -1) && LateStart != INT_MAX2147483647) {
2193 SchedStart = std::max(SchedStart, LateStart - (int)II + 1);
2194 scheduleFound = Schedule.insert(SU, LateStart, SchedStart, II);
2195 } else if (EarlyStart != INT_MIN(-2147483647 -1) && LateStart != INT_MAX2147483647) {
2196 SchedEnd =
2197 std::min(SchedEnd, std::min(LateStart, EarlyStart + (int)II - 1));
2198 // When scheduling a Phi it is better to start at the late cycle and go
2199 // backwards. The default order may insert the Phi too far away from
2200 // its first dependence.
2201 if (SU->getInstr()->isPHI())
2202 scheduleFound = Schedule.insert(SU, SchedEnd, EarlyStart, II);
2203 else
2204 scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II);
2205 } else {
2206 int FirstCycle = Schedule.getFirstCycle();
2207 scheduleFound = Schedule.insert(SU, FirstCycle + getASAP(SU),
2208 FirstCycle + getASAP(SU) + II - 1, II);
2209 }
2210 // Even if we find a schedule, make sure the schedule doesn't exceed the
2211 // allowable number of stages. We keep trying if this happens.
2212 if (scheduleFound)
2213 if (SwpMaxStages > -1 &&
2214 Schedule.getMaxStageCount() > (unsigned)SwpMaxStages)
2215 scheduleFound = false;
2216
2217 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { if (!scheduleFound) dbgs() << "\tCan't schedule\n"
; }; } } while (false)
2218 if (!scheduleFound)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { if (!scheduleFound) dbgs() << "\tCan't schedule\n"
; }; } } while (false)
2219 dbgs() << "\tCan't schedule\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { if (!scheduleFound) dbgs() << "\tCan't schedule\n"
; }; } } while (false)
2220 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { if (!scheduleFound) dbgs() << "\tCan't schedule\n"
; }; } } while (false)
;
2221 } while (++NI != NE && scheduleFound);
2222
2223 // If a schedule is found, check if it is a valid schedule too.
2224 if (scheduleFound)
2225 scheduleFound = Schedule.isValidSchedule(this);
2226 }
2227
2228 DEBUG(dbgs() << "Schedule Found? " << scheduleFound << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "Schedule Found? " << scheduleFound
<< "\n"; } } while (false)
;
2229
2230 if (scheduleFound)
2231 Schedule.finalizeSchedule(this);
2232 else
2233 Schedule.reset();
2234
2235 return scheduleFound && Schedule.getMaxStageCount() > 0;
2236}
2237
2238/// Given a schedule for the loop, generate a new version of the loop,
2239/// and replace the old version. This function generates a prolog
2240/// that contains the initial iterations in the pipeline, and kernel
2241/// loop, and the epilogue that contains the code for the final
2242/// iterations.
2243void SwingSchedulerDAG::generatePipelinedLoop(SMSchedule &Schedule) {
2244 // Create a new basic block for the kernel and add it to the CFG.
2245 MachineBasicBlock *KernelBB = MF.CreateMachineBasicBlock(BB->getBasicBlock());
2246
2247 unsigned MaxStageCount = Schedule.getMaxStageCount();
2248
2249 // Remember the registers that are used in different stages. The index is
2250 // the iteration, or stage, that the instruction is scheduled in. This is
2251 // a map between register names in the orignal block and the names created
2252 // in each stage of the pipelined loop.
2253 ValueMapTy *VRMap = new ValueMapTy[(MaxStageCount + 1) * 2];
2254 InstrMapTy InstrMap;
2255
2256 SmallVector<MachineBasicBlock *, 4> PrologBBs;
2257 // Generate the prolog instructions that set up the pipeline.
2258 generateProlog(Schedule, MaxStageCount, KernelBB, VRMap, PrologBBs);
2259 MF.insert(BB->getIterator(), KernelBB);
2260
2261 // Rearrange the instructions to generate the new, pipelined loop,
2262 // and update register names as needed.
2263 for (int Cycle = Schedule.getFirstCycle(),
2264 LastCycle = Schedule.getFinalCycle();
2265 Cycle <= LastCycle; ++Cycle) {
2266 std::deque<SUnit *> &CycleInstrs = Schedule.getInstructions(Cycle);
2267 // This inner loop schedules each instruction in the cycle.
2268 for (SUnit *CI : CycleInstrs) {
2269 if (CI->getInstr()->isPHI())
2270 continue;
2271 unsigned StageNum = Schedule.stageScheduled(getSUnit(CI->getInstr()));
2272 MachineInstr *NewMI = cloneInstr(CI->getInstr(), MaxStageCount, StageNum);
2273 updateInstruction(NewMI, false, MaxStageCount, StageNum, Schedule, VRMap);
2274 KernelBB->push_back(NewMI);
2275 InstrMap[NewMI] = CI->getInstr();
2276 }
2277 }
2278
2279 // Copy any terminator instructions to the new kernel, and update
2280 // names as needed.
2281 for (MachineBasicBlock::iterator I = BB->getFirstTerminator(),
2282 E = BB->instr_end();
2283 I != E; ++I) {
2284 MachineInstr *NewMI = MF.CloneMachineInstr(&*I);
2285 updateInstruction(NewMI, false, MaxStageCount, 0, Schedule, VRMap);
2286 KernelBB->push_back(NewMI);
2287 InstrMap[NewMI] = &*I;
2288 }
2289
2290 KernelBB->transferSuccessors(BB);
2291 KernelBB->replaceSuccessor(BB, KernelBB);
2292
2293 generateExistingPhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, Schedule,
2294 VRMap, InstrMap, MaxStageCount, MaxStageCount, false);
2295 generatePhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, Schedule, VRMap,
2296 InstrMap, MaxStageCount, MaxStageCount, false);
2297
2298 DEBUG(dbgs() << "New block\n"; KernelBB->dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dbgs() << "New block\n"; KernelBB->
dump();; } } while (false)
;
2299
2300 SmallVector<MachineBasicBlock *, 4> EpilogBBs;
2301 // Generate the epilog instructions to complete the pipeline.
2302 generateEpilog(Schedule, MaxStageCount, KernelBB, VRMap, EpilogBBs,
2303 PrologBBs);
2304
2305 // We need this step because the register allocation doesn't handle some
2306 // situations well, so we insert copies to help out.
2307 splitLifetimes(KernelBB, EpilogBBs, Schedule);
2308
2309 // Remove dead instructions due to loop induction variables.
2310 removeDeadInstructions(KernelBB, EpilogBBs);
2311
2312 // Add branches between prolog and epilog blocks.
2313 addBranches(PrologBBs, KernelBB, EpilogBBs, Schedule, VRMap);
2314
2315 // Remove the original loop since it's no longer referenced.
2316 BB->clear();
2317 BB->eraseFromParent();
2318
2319 delete[] VRMap;
2320}
2321
2322/// Generate the pipeline prolog code.
2323void SwingSchedulerDAG::generateProlog(SMSchedule &Schedule, unsigned LastStage,
2324 MachineBasicBlock *KernelBB,
2325 ValueMapTy *VRMap,
2326 MBBVectorTy &PrologBBs) {
2327 MachineBasicBlock *PreheaderBB = MLI->getLoopFor(BB)->getLoopPreheader();
2328 assert(PreheaderBB != NULL &&((PreheaderBB != __null && "Need to add code to handle loops w/o preheader"
) ? static_cast<void> (0) : __assert_fail ("PreheaderBB != NULL && \"Need to add code to handle loops w/o preheader\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2329, __PRETTY_FUNCTION__))
2329 "Need to add code to handle loops w/o preheader")((PreheaderBB != __null && "Need to add code to handle loops w/o preheader"
) ? static_cast<void> (0) : __assert_fail ("PreheaderBB != NULL && \"Need to add code to handle loops w/o preheader\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2329, __PRETTY_FUNCTION__))
;
2330 MachineBasicBlock *PredBB = PreheaderBB;
2331 InstrMapTy InstrMap;
2332
2333 // Generate a basic block for each stage, not including the last stage,
2334 // which will be generated in the kernel. Each basic block may contain
2335 // instructions from multiple stages/iterations.
2336 for (unsigned i = 0; i < LastStage; ++i) {
2337 // Create and insert the prolog basic block prior to the original loop
2338 // basic block. The original loop is removed later.
2339 MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(BB->getBasicBlock());
2340 PrologBBs.push_back(NewBB);
2341 MF.insert(BB->getIterator(), NewBB);
2342 NewBB->transferSuccessors(PredBB);
2343 PredBB->addSuccessor(NewBB);
2344 PredBB = NewBB;
2345
2346 // Generate instructions for each appropriate stage. Process instructions
2347 // in original program order.
2348 for (int StageNum = i; StageNum >= 0; --StageNum) {
2349 for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
2350 BBE = BB->getFirstTerminator();
2351 BBI != BBE; ++BBI) {
2352 if (Schedule.isScheduledAtStage(getSUnit(&*BBI), (unsigned)StageNum)) {
2353 if (BBI->isPHI())
2354 continue;
2355 MachineInstr *NewMI =
2356 cloneAndChangeInstr(&*BBI, i, (unsigned)StageNum, Schedule);
2357 updateInstruction(NewMI, false, i, (unsigned)StageNum, Schedule,
2358 VRMap);
2359 NewBB->push_back(NewMI);
2360 InstrMap[NewMI] = &*BBI;
2361 }
2362 }
2363 }
2364 rewritePhiValues(NewBB, i, Schedule, VRMap, InstrMap);
2365 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "prolog:\n"; NewBB->dump
(); }; } } while (false)
2366 dbgs() << "prolog:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "prolog:\n"; NewBB->dump
(); }; } } while (false)
2367 NewBB->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "prolog:\n"; NewBB->dump
(); }; } } while (false)
2368 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "prolog:\n"; NewBB->dump
(); }; } } while (false)
;
2369 }
2370
2371 PredBB->replaceSuccessor(BB, KernelBB);
2372
2373 // Check if we need to remove the branch from the preheader to the original
2374 // loop, and replace it with a branch to the new loop.
2375 unsigned numBranches = TII->removeBranch(*PreheaderBB);
2376 if (numBranches) {
2377 SmallVector<MachineOperand, 0> Cond;
2378 TII->insertBranch(*PreheaderBB, PrologBBs[0], nullptr, Cond, DebugLoc());
2379 }
2380}
2381
2382/// Generate the pipeline epilog code. The epilog code finishes the iterations
2383/// that were started in either the prolog or the kernel. We create a basic
2384/// block for each stage that needs to complete.
2385void SwingSchedulerDAG::generateEpilog(SMSchedule &Schedule, unsigned LastStage,
2386 MachineBasicBlock *KernelBB,
2387 ValueMapTy *VRMap,
2388 MBBVectorTy &EpilogBBs,
2389 MBBVectorTy &PrologBBs) {
2390 // We need to change the branch from the kernel to the first epilog block, so
2391 // this call to analyze branch uses the kernel rather than the original BB.
2392 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2393 SmallVector<MachineOperand, 4> Cond;
2394 bool checkBranch = TII->analyzeBranch(*KernelBB, TBB, FBB, Cond);
2395 assert(!checkBranch && "generateEpilog must be able to analyze the branch")((!checkBranch && "generateEpilog must be able to analyze the branch"
) ? static_cast<void> (0) : __assert_fail ("!checkBranch && \"generateEpilog must be able to analyze the branch\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2395, __PRETTY_FUNCTION__))
;
2396 if (checkBranch)
2397 return;
2398
2399 MachineBasicBlock::succ_iterator LoopExitI = KernelBB->succ_begin();
2400 if (*LoopExitI == KernelBB)
2401 ++LoopExitI;
2402 assert(LoopExitI != KernelBB->succ_end() && "Expecting a successor")((LoopExitI != KernelBB->succ_end() && "Expecting a successor"
) ? static_cast<void> (0) : __assert_fail ("LoopExitI != KernelBB->succ_end() && \"Expecting a successor\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2402, __PRETTY_FUNCTION__))
;
2403 MachineBasicBlock *LoopExitBB = *LoopExitI;
2404
2405 MachineBasicBlock *PredBB = KernelBB;
2406 MachineBasicBlock *EpilogStart = LoopExitBB;
2407 InstrMapTy InstrMap;
2408
2409 // Generate a basic block for each stage, not including the last stage,
2410 // which was generated for the kernel. Each basic block may contain
2411 // instructions from multiple stages/iterations.
2412 int EpilogStage = LastStage + 1;
2413 for (unsigned i = LastStage; i >= 1; --i, ++EpilogStage) {
2414 MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock();
2415 EpilogBBs.push_back(NewBB);
2416 MF.insert(BB->getIterator(), NewBB);
2417
2418 PredBB->replaceSuccessor(LoopExitBB, NewBB);
2419 NewBB->addSuccessor(LoopExitBB);
2420
2421 if (EpilogStart == LoopExitBB)
2422 EpilogStart = NewBB;
2423
2424 // Add instructions to the epilog depending on the current block.
2425 // Process instructions in original program order.
2426 for (unsigned StageNum = i; StageNum <= LastStage; ++StageNum) {
2427 for (auto &BBI : *BB) {
2428 if (BBI.isPHI())
2429 continue;
2430 MachineInstr *In = &BBI;
2431 if (Schedule.isScheduledAtStage(getSUnit(In), StageNum)) {
2432 MachineInstr *NewMI = cloneInstr(In, EpilogStage - LastStage, 0);
2433 updateInstruction(NewMI, i == 1, EpilogStage, 0, Schedule, VRMap);
2434 NewBB->push_back(NewMI);
2435 InstrMap[NewMI] = In;
2436 }
2437 }
2438 }
2439 generateExistingPhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, Schedule,
2440 VRMap, InstrMap, LastStage, EpilogStage, i == 1);
2441 generatePhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, Schedule, VRMap,
2442 InstrMap, LastStage, EpilogStage, i == 1);
2443 PredBB = NewBB;
2444
2445 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "epilog:\n"; NewBB->dump
(); }; } } while (false)
2446 dbgs() << "epilog:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "epilog:\n"; NewBB->dump
(); }; } } while (false)
2447 NewBB->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "epilog:\n"; NewBB->dump
(); }; } } while (false)
2448 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "epilog:\n"; NewBB->dump
(); }; } } while (false)
;
2449 }
2450
2451 // Fix any Phi nodes in the loop exit block.
2452 for (MachineInstr &MI : *LoopExitBB) {
2453 if (!MI.isPHI())
2454 break;
2455 for (unsigned i = 2, e = MI.getNumOperands() + 1; i != e; i += 2) {
2456 MachineOperand &MO = MI.getOperand(i);
2457 if (MO.getMBB() == BB)
2458 MO.setMBB(PredBB);
2459 }
2460 }
2461
2462 // Create a branch to the new epilog from the kernel.
2463 // Remove the original branch and add a new branch to the epilog.
2464 TII->removeBranch(*KernelBB);
2465 TII->insertBranch(*KernelBB, KernelBB, EpilogStart, Cond, DebugLoc());
2466 // Add a branch to the loop exit.
2467 if (EpilogBBs.size() > 0) {
2468 MachineBasicBlock *LastEpilogBB = EpilogBBs.back();
2469 SmallVector<MachineOperand, 4> Cond1;
2470 TII->insertBranch(*LastEpilogBB, LoopExitBB, nullptr, Cond1, DebugLoc());
2471 }
2472}
2473
2474/// Replace all uses of FromReg that appear outside the specified
2475/// basic block with ToReg.
2476static void replaceRegUsesAfterLoop(unsigned FromReg, unsigned ToReg,
2477 MachineBasicBlock *MBB,
2478 MachineRegisterInfo &MRI,
2479 LiveIntervals &LIS) {
2480 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(FromReg),
2481 E = MRI.use_end();
2482 I != E;) {
2483 MachineOperand &O = *I;
2484 ++I;
2485 if (O.getParent()->getParent() != MBB)
2486 O.setReg(ToReg);
2487 }
2488 if (!LIS.hasInterval(ToReg))
2489 LIS.createEmptyInterval(ToReg);
2490}
2491
2492/// Return true if the register has a use that occurs outside the
2493/// specified loop.
2494static bool hasUseAfterLoop(unsigned Reg, MachineBasicBlock *BB,
2495 MachineRegisterInfo &MRI) {
2496 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
2497 E = MRI.use_end();
2498 I != E; ++I)
2499 if (I->getParent()->getParent() != BB)
2500 return true;
2501 return false;
2502}
2503
2504/// Generate Phis for the specific block in the generated pipelined code.
2505/// This function looks at the Phis from the original code to guide the
2506/// creation of new Phis.
2507void SwingSchedulerDAG::generateExistingPhis(
2508 MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2,
2509 MachineBasicBlock *KernelBB, SMSchedule &Schedule, ValueMapTy *VRMap,
2510 InstrMapTy &InstrMap, unsigned LastStageNum, unsigned CurStageNum,
2511 bool IsLast) {
2512 // Compute the stage number for the initial value of the Phi, which
2513 // comes from the prolog. The prolog to use depends on to which kernel/
2514 // epilog that we're adding the Phi.
2515 unsigned PrologStage = 0;
2516 unsigned PrevStage = 0;
2517 bool InKernel = (LastStageNum == CurStageNum);
2518 if (InKernel) {
2519 PrologStage = LastStageNum - 1;
2520 PrevStage = CurStageNum;
2521 } else {
2522 PrologStage = LastStageNum - (CurStageNum - LastStageNum);
2523 PrevStage = LastStageNum + (CurStageNum - LastStageNum) - 1;
2524 }
2525
2526 for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
2527 BBE = BB->getFirstNonPHI();
2528 BBI != BBE; ++BBI) {
2529 unsigned Def = BBI->getOperand(0).getReg();
2530
2531 unsigned InitVal = 0;
2532 unsigned LoopVal = 0;
2533 getPhiRegs(*BBI, BB, InitVal, LoopVal);
2534
2535 unsigned PhiOp1 = 0;
2536 // The Phi value from the loop body typically is defined in the loop, but
2537 // not always. So, we need to check if the value is defined in the loop.
2538 unsigned PhiOp2 = LoopVal;
2539 if (VRMap[LastStageNum].count(LoopVal))
2540 PhiOp2 = VRMap[LastStageNum][LoopVal];
2541
2542 int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI));
2543 int LoopValStage =
2544 Schedule.stageScheduled(getSUnit(MRI.getVRegDef(LoopVal)));
2545 unsigned NumStages = Schedule.getStagesForReg(Def, CurStageNum);
2546 if (NumStages == 0) {
2547 // We don't need to generate a Phi anymore, but we need to rename any uses
2548 // of the Phi value.
2549 unsigned NewReg = VRMap[PrevStage][LoopVal];
2550 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, 0, &*BBI,
2551 Def, NewReg);
2552 if (VRMap[CurStageNum].count(LoopVal))
2553 VRMap[CurStageNum][Def] = VRMap[CurStageNum][LoopVal];
2554 }
2555 // Adjust the number of Phis needed depending on the number of prologs left,
2556 // and the distance from where the Phi is first scheduled.
2557 unsigned NumPhis = NumStages;
2558 if (!InKernel && (int)PrologStage < LoopValStage)
2559 // The NumPhis is the maximum number of new Phis needed during the steady
2560 // state. If the Phi has not been scheduled in current prolog, then we
2561 // need to generate less Phis.
2562 NumPhis = std::max((int)NumPhis - (int)(LoopValStage - PrologStage), 1);
2563 // The number of Phis cannot exceed the number of prolog stages. Each
2564 // stage can potentially define two values.
2565 NumPhis = std::min(NumPhis, PrologStage + 2);
2566
2567 unsigned NewReg = 0;
2568
2569 unsigned AccessStage = (LoopValStage != -1) ? LoopValStage : StageScheduled;
2570 // In the epilog, we may need to look back one stage to get the correct
2571 // Phi name because the epilog and prolog blocks execute the same stage.
2572 // The correct name is from the previous block only when the Phi has
2573 // been completely scheduled prior to the epilog, and Phi value is not
2574 // needed in multiple stages.
2575 int StageDiff = 0;
2576 if (!InKernel && StageScheduled >= LoopValStage && AccessStage == 0 &&
2577 NumPhis == 1)
2578 StageDiff = 1;
2579 // Adjust the computations below when the phi and the loop definition
2580 // are scheduled in different stages.
2581 if (InKernel && LoopValStage != -1 && StageScheduled > LoopValStage)
2582 StageDiff = StageScheduled - LoopValStage;
2583 for (unsigned np = 0; np < NumPhis; ++np) {
2584 // If the Phi hasn't been scheduled, then use the initial Phi operand
2585 // value. Otherwise, use the scheduled version of the instruction. This
2586 // is a little complicated when a Phi references another Phi.
2587 if (np > PrologStage || StageScheduled >= (int)LastStageNum)
2588 PhiOp1 = InitVal;
2589 // Check if the Phi has already been scheduled in a prolog stage.
2590 else if (PrologStage >= AccessStage + StageDiff + np &&
2591 VRMap[PrologStage - StageDiff - np].count(LoopVal) != 0)
2592 PhiOp1 = VRMap[PrologStage - StageDiff - np][LoopVal];
2593 // Check if the Phi has already been scheduled, but the loop intruction
2594 // is either another Phi, or doesn't occur in the loop.
2595 else if (PrologStage >= AccessStage + StageDiff + np) {
2596 // If the Phi references another Phi, we need to examine the other
2597 // Phi to get the correct value.
2598 PhiOp1 = LoopVal;
2599 MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1);
2600 int Indirects = 1;
2601 while (InstOp1 && InstOp1->isPHI() && InstOp1->getParent() == BB) {
2602 int PhiStage = Schedule.stageScheduled(getSUnit(InstOp1));
2603 if ((int)(PrologStage - StageDiff - np) < PhiStage + Indirects)
2604 PhiOp1 = getInitPhiReg(*InstOp1, BB);
2605 else
2606 PhiOp1 = getLoopPhiReg(*InstOp1, BB);
2607 InstOp1 = MRI.getVRegDef(PhiOp1);
2608 int PhiOpStage = Schedule.stageScheduled(getSUnit(InstOp1));
2609 int StageAdj = (PhiOpStage != -1 ? PhiStage - PhiOpStage : 0);
2610 if (PhiOpStage != -1 && PrologStage - StageAdj >= Indirects + np &&
2611 VRMap[PrologStage - StageAdj - Indirects - np].count(PhiOp1)) {
2612 PhiOp1 = VRMap[PrologStage - StageAdj - Indirects - np][PhiOp1];
2613 break;
2614 }
2615 ++Indirects;
2616 }
2617 } else
2618 PhiOp1 = InitVal;
2619 // If this references a generated Phi in the kernel, get the Phi operand
2620 // from the incoming block.
2621 if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1))
2622 if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB)
2623 PhiOp1 = getInitPhiReg(*InstOp1, KernelBB);
2624
2625 MachineInstr *PhiInst = MRI.getVRegDef(LoopVal);
2626 bool LoopDefIsPhi = PhiInst && PhiInst->isPHI();
2627 // In the epilog, a map lookup is needed to get the value from the kernel,
2628 // or previous epilog block. How is does this depends on if the
2629 // instruction is scheduled in the previous block.
2630 if (!InKernel) {
2631 int StageDiffAdj = 0;
2632 if (LoopValStage != -1 && StageScheduled > LoopValStage)
2633 StageDiffAdj = StageScheduled - LoopValStage;
2634 // Use the loop value defined in the kernel, unless the kernel
2635 // contains the last definition of the Phi.
2636 if (np == 0 && PrevStage == LastStageNum &&
2637 (StageScheduled != 0 || LoopValStage != 0) &&
2638 VRMap[PrevStage - StageDiffAdj].count(LoopVal))
2639 PhiOp2 = VRMap[PrevStage - StageDiffAdj][LoopVal];
2640 // Use the value defined by the Phi. We add one because we switch
2641 // from looking at the loop value to the Phi definition.
2642 else if (np > 0 && PrevStage == LastStageNum &&
2643 VRMap[PrevStage - np + 1].count(Def))
2644 PhiOp2 = VRMap[PrevStage - np + 1][Def];
2645 // Use the loop value defined in the kernel.
2646 else if ((unsigned)LoopValStage + StageDiffAdj > PrologStage + 1 &&
2647 VRMap[PrevStage - StageDiffAdj - np].count(LoopVal))
2648 PhiOp2 = VRMap[PrevStage - StageDiffAdj - np][LoopVal];
2649 // Use the value defined by the Phi, unless we're generating the first
2650 // epilog and the Phi refers to a Phi in a different stage.
2651 else if (VRMap[PrevStage - np].count(Def) &&
2652 (!LoopDefIsPhi || PrevStage != LastStageNum))
2653 PhiOp2 = VRMap[PrevStage - np][Def];
2654 }
2655
2656 // Check if we can reuse an existing Phi. This occurs when a Phi
2657 // references another Phi, and the other Phi is scheduled in an
2658 // earlier stage. We can try to reuse an existing Phi up until the last
2659 // stage of the current Phi.
2660 if (LoopDefIsPhi && (int)PrologStage >= StageScheduled) {
2661 int LVNumStages = Schedule.getStagesForPhi(LoopVal);
2662 int StageDiff = (StageScheduled - LoopValStage);
2663 LVNumStages -= StageDiff;
2664 if (LVNumStages > (int)np) {
2665 NewReg = PhiOp2;
Value stored to 'NewReg' is never read
2666 unsigned ReuseStage = CurStageNum;
2667 if (Schedule.isLoopCarried(this, *PhiInst))
2668 ReuseStage -= LVNumStages;
2669 // Check if the Phi to reuse has been generated yet. If not, then
2670 // there is nothing to reuse.
2671 if (VRMap[ReuseStage].count(LoopVal)) {
2672 NewReg = VRMap[ReuseStage][LoopVal];
2673
2674 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np,
2675 &*BBI, Def, NewReg);
2676 // Update the map with the new Phi name.
2677 VRMap[CurStageNum - np][Def] = NewReg;
2678 PhiOp2 = NewReg;
2679 if (VRMap[LastStageNum - np - 1].count(LoopVal))
2680 PhiOp2 = VRMap[LastStageNum - np - 1][LoopVal];
2681
2682 if (IsLast && np == NumPhis - 1)
2683 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
2684 continue;
2685 }
2686 } else if (InKernel && StageDiff > 0 &&
2687 VRMap[CurStageNum - StageDiff - np].count(LoopVal))
2688 PhiOp2 = VRMap[CurStageNum - StageDiff - np][LoopVal];
2689 }
2690
2691 const TargetRegisterClass *RC = MRI.getRegClass(Def);
2692 NewReg = MRI.createVirtualRegister(RC);
2693
2694 MachineInstrBuilder NewPhi =
2695 BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(),
2696 TII->get(TargetOpcode::PHI), NewReg);
2697 NewPhi.addReg(PhiOp1).addMBB(BB1);
2698 NewPhi.addReg(PhiOp2).addMBB(BB2);
2699 if (np == 0)
2700 InstrMap[NewPhi] = &*BBI;
2701
2702 // We define the Phis after creating the new pipelined code, so
2703 // we need to rename the Phi values in scheduled instructions.
2704
2705 unsigned PrevReg = 0;
2706 if (InKernel && VRMap[PrevStage - np].count(LoopVal))
2707 PrevReg = VRMap[PrevStage - np][LoopVal];
2708 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, &*BBI,
2709 Def, NewReg, PrevReg);
2710 // If the Phi has been scheduled, use the new name for rewriting.
2711 if (VRMap[CurStageNum - np].count(Def)) {
2712 unsigned R = VRMap[CurStageNum - np][Def];
2713 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, &*BBI,
2714 R, NewReg);
2715 }
2716
2717 // Check if we need to rename any uses that occurs after the loop. The
2718 // register to replace depends on whether the Phi is scheduled in the
2719 // epilog.
2720 if (IsLast && np == NumPhis - 1)
2721 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
2722
2723 // In the kernel, a dependent Phi uses the value from this Phi.
2724 if (InKernel)
2725 PhiOp2 = NewReg;
2726
2727 // Update the map with the new Phi name.
2728 VRMap[CurStageNum - np][Def] = NewReg;
2729 }
2730
2731 while (NumPhis++ < NumStages) {
2732 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, NumPhis,
2733 &*BBI, Def, NewReg, 0);
2734 }
2735
2736 // Check if we need to rename a Phi that has been eliminated due to
2737 // scheduling.
2738 if (NumStages == 0 && IsLast && VRMap[CurStageNum].count(LoopVal))
2739 replaceRegUsesAfterLoop(Def, VRMap[CurStageNum][LoopVal], BB, MRI, LIS);
2740 }
2741}
2742
2743/// Generate Phis for the specified block in the generated pipelined code.
2744/// These are new Phis needed because the definition is scheduled after the
2745/// use in the pipelened sequence.
2746void SwingSchedulerDAG::generatePhis(
2747 MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2,
2748 MachineBasicBlock *KernelBB, SMSchedule &Schedule, ValueMapTy *VRMap,
2749 InstrMapTy &InstrMap, unsigned LastStageNum, unsigned CurStageNum,
2750 bool IsLast) {
2751 // Compute the stage number that contains the initial Phi value, and
2752 // the Phi from the previous stage.
2753 unsigned PrologStage = 0;
2754 unsigned PrevStage = 0;
2755 unsigned StageDiff = CurStageNum - LastStageNum;
2756 bool InKernel = (StageDiff == 0);
2757 if (InKernel) {
2758 PrologStage = LastStageNum - 1;
2759 PrevStage = CurStageNum;
2760 } else {
2761 PrologStage = LastStageNum - StageDiff;
2762 PrevStage = LastStageNum + StageDiff - 1;
2763 }
2764
2765 for (MachineBasicBlock::iterator BBI = BB->getFirstNonPHI(),
2766 BBE = BB->instr_end();
2767 BBI != BBE; ++BBI) {
2768 for (unsigned i = 0, e = BBI->getNumOperands(); i != e; ++i) {
2769 MachineOperand &MO = BBI->getOperand(i);
2770 if (!MO.isReg() || !MO.isDef() ||
2771 !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2772 continue;
2773
2774 int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI));
2775 assert(StageScheduled != -1 && "Expecting scheduled instruction.")((StageScheduled != -1 && "Expecting scheduled instruction."
) ? static_cast<void> (0) : __assert_fail ("StageScheduled != -1 && \"Expecting scheduled instruction.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2775, __PRETTY_FUNCTION__))
;
2776 unsigned Def = MO.getReg();
2777 unsigned NumPhis = Schedule.getStagesForReg(Def, CurStageNum);
2778 // An instruction scheduled in stage 0 and is used after the loop
2779 // requires a phi in the epilog for the last definition from either
2780 // the kernel or prolog.
2781 if (!InKernel && NumPhis == 0 && StageScheduled == 0 &&
2782 hasUseAfterLoop(Def, BB, MRI))
2783 NumPhis = 1;
2784 if (!InKernel && (unsigned)StageScheduled > PrologStage)
2785 continue;
2786
2787 unsigned PhiOp2 = VRMap[PrevStage][Def];
2788 if (MachineInstr *InstOp2 = MRI.getVRegDef(PhiOp2))
2789 if (InstOp2->isPHI() && InstOp2->getParent() == NewBB)
2790 PhiOp2 = getLoopPhiReg(*InstOp2, BB2);
2791 // The number of Phis can't exceed the number of prolog stages. The
2792 // prolog stage number is zero based.
2793 if (NumPhis > PrologStage + 1 - StageScheduled)
2794 NumPhis = PrologStage + 1 - StageScheduled;
2795 for (unsigned np = 0; np < NumPhis; ++np) {
2796 unsigned PhiOp1 = VRMap[PrologStage][Def];
2797 if (np <= PrologStage)
2798 PhiOp1 = VRMap[PrologStage - np][Def];
2799 if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1)) {
2800 if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB)
2801 PhiOp1 = getInitPhiReg(*InstOp1, KernelBB);
2802 if (InstOp1->isPHI() && InstOp1->getParent() == NewBB)
2803 PhiOp1 = getInitPhiReg(*InstOp1, NewBB);
2804 }
2805 if (!InKernel)
2806 PhiOp2 = VRMap[PrevStage - np][Def];
2807
2808 const TargetRegisterClass *RC = MRI.getRegClass(Def);
2809 unsigned NewReg = MRI.createVirtualRegister(RC);
2810
2811 MachineInstrBuilder NewPhi =
2812 BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(),
2813 TII->get(TargetOpcode::PHI), NewReg);
2814 NewPhi.addReg(PhiOp1).addMBB(BB1);
2815 NewPhi.addReg(PhiOp2).addMBB(BB2);
2816 if (np == 0)
2817 InstrMap[NewPhi] = &*BBI;
2818
2819 // Rewrite uses and update the map. The actions depend upon whether
2820 // we generating code for the kernel or epilog blocks.
2821 if (InKernel) {
2822 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np,
2823 &*BBI, PhiOp1, NewReg);
2824 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np,
2825 &*BBI, PhiOp2, NewReg);
2826
2827 PhiOp2 = NewReg;
2828 VRMap[PrevStage - np - 1][Def] = NewReg;
2829 } else {
2830 VRMap[CurStageNum - np][Def] = NewReg;
2831 if (np == NumPhis - 1)
2832 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np,
2833 &*BBI, Def, NewReg);
2834 }
2835 if (IsLast && np == NumPhis - 1)
2836 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS);
2837 }
2838 }
2839 }
2840}
2841
2842/// Remove instructions that generate values with no uses.
2843/// Typically, these are induction variable operations that generate values
2844/// used in the loop itself. A dead instruction has a definition with
2845/// no uses, or uses that occur in the original loop only.
2846void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB,
2847 MBBVectorTy &EpilogBBs) {
2848 // For each epilog block, check that the value defined by each instruction
2849 // is used. If not, delete it.
2850 for (MBBVectorTy::reverse_iterator MBB = EpilogBBs.rbegin(),
2851 MBE = EpilogBBs.rend();
2852 MBB != MBE; ++MBB)
2853 for (MachineBasicBlock::reverse_instr_iterator MI = (*MBB)->instr_rbegin(),
2854 ME = (*MBB)->instr_rend();
2855 MI != ME;) {
2856 // From DeadMachineInstructionElem. Don't delete inline assembly.
2857 if (MI->isInlineAsm()) {
2858 ++MI;
2859 continue;
2860 }
2861 bool SawStore = false;
2862 // Check if it's safe to remove the instruction due to side effects.
2863 // We can, and want to, remove Phis here.
2864 if (!MI->isSafeToMove(nullptr, SawStore) && !MI->isPHI()) {
2865 ++MI;
2866 continue;
2867 }
2868 bool used = true;
2869 for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
2870 MOE = MI->operands_end();
2871 MOI != MOE; ++MOI) {
2872 if (!MOI->isReg() || !MOI->isDef())
2873 continue;
2874 unsigned reg = MOI->getReg();
2875 unsigned realUses = 0;
2876 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(reg),
2877 EI = MRI.use_end();
2878 UI != EI; ++UI) {
2879 // Check if there are any uses that occur only in the original
2880 // loop. If so, that's not a real use.
2881 if (UI->getParent()->getParent() != BB) {
2882 realUses++;
2883 used = true;
2884 break;
2885 }
2886 }
2887 if (realUses > 0)
2888 break;
2889 used = false;
2890 }
2891 if (!used) {
2892 MI++->eraseFromParent();
2893 continue;
2894 }
2895 ++MI;
2896 }
2897 // In the kernel block, check if we can remove a Phi that generates a value
2898 // used in an instruction removed in the epilog block.
2899 for (MachineBasicBlock::iterator BBI = KernelBB->instr_begin(),
2900 BBE = KernelBB->getFirstNonPHI();
2901 BBI != BBE;) {
2902 MachineInstr *MI = &*BBI;
2903 ++BBI;
2904 unsigned reg = MI->getOperand(0).getReg();
2905 if (MRI.use_begin(reg) == MRI.use_end()) {
2906 MI->eraseFromParent();
2907 }
2908 }
2909}
2910
2911/// For loop carried definitions, we split the lifetime of a virtual register
2912/// that has uses past the definition in the next iteration. A copy with a new
2913/// virtual register is inserted before the definition, which helps with
2914/// generating a better register assignment.
2915///
2916/// v1 = phi(a, v2) v1 = phi(a, v2)
2917/// v2 = phi(b, v3) v2 = phi(b, v3)
2918/// v3 = .. v4 = copy v1
2919/// .. = V1 v3 = ..
2920/// .. = v4
2921void SwingSchedulerDAG::splitLifetimes(MachineBasicBlock *KernelBB,
2922 MBBVectorTy &EpilogBBs,
2923 SMSchedule &Schedule) {
2924 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2925 for (MachineBasicBlock::iterator BBI = KernelBB->instr_begin(),
2926 BBF = KernelBB->getFirstNonPHI();
2927 BBI != BBF; ++BBI) {
2928 unsigned Def = BBI->getOperand(0).getReg();
2929 // Check for any Phi definition that used as an operand of another Phi
2930 // in the same block.
2931 for (MachineRegisterInfo::use_instr_iterator I = MRI.use_instr_begin(Def),
2932 E = MRI.use_instr_end();
2933 I != E; ++I) {
2934 if (I->isPHI() && I->getParent() == KernelBB) {
2935 // Get the loop carried definition.
2936 unsigned LCDef = getLoopPhiReg(*BBI, KernelBB);
2937 if (!LCDef)
2938 continue;
2939 MachineInstr *MI = MRI.getVRegDef(LCDef);
2940 if (!MI || MI->getParent() != KernelBB || MI->isPHI())
2941 continue;
2942 // Search through the rest of the block looking for uses of the Phi
2943 // definition. If one occurs, then split the lifetime.
2944 unsigned SplitReg = 0;
2945 for (auto &BBJ : make_range(MachineBasicBlock::instr_iterator(MI),
2946 KernelBB->instr_end()))
2947 if (BBJ.readsRegister(Def)) {
2948 // We split the lifetime when we find the first use.
2949 if (SplitReg == 0) {
2950 SplitReg = MRI.createVirtualRegister(MRI.getRegClass(Def));
2951 BuildMI(*KernelBB, MI, MI->getDebugLoc(),
2952 TII->get(TargetOpcode::COPY), SplitReg)
2953 .addReg(Def);
2954 }
2955 BBJ.substituteRegister(Def, SplitReg, 0, *TRI);
2956 }
2957 if (!SplitReg)
2958 continue;
2959 // Search through each of the epilog blocks for any uses to be renamed.
2960 for (auto &Epilog : EpilogBBs)
2961 for (auto &I : *Epilog)
2962 if (I.readsRegister(Def))
2963 I.substituteRegister(Def, SplitReg, 0, *TRI);
2964 break;
2965 }
2966 }
2967 }
2968}
2969
2970/// Remove the incoming block from the Phis in a basic block.
2971static void removePhis(MachineBasicBlock *BB, MachineBasicBlock *Incoming) {
2972 for (MachineInstr &MI : *BB) {
2973 if (!MI.isPHI())
2974 break;
2975 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2)
2976 if (MI.getOperand(i + 1).getMBB() == Incoming) {
2977 MI.RemoveOperand(i + 1);
2978 MI.RemoveOperand(i);
2979 break;
2980 }
2981 }
2982}
2983
2984/// Create branches from each prolog basic block to the appropriate epilog
2985/// block. These edges are needed if the loop ends before reaching the
2986/// kernel.
2987void SwingSchedulerDAG::addBranches(MBBVectorTy &PrologBBs,
2988 MachineBasicBlock *KernelBB,
2989 MBBVectorTy &EpilogBBs,
2990 SMSchedule &Schedule, ValueMapTy *VRMap) {
2991 assert(PrologBBs.size() == EpilogBBs.size() && "Prolog/Epilog mismatch")((PrologBBs.size() == EpilogBBs.size() && "Prolog/Epilog mismatch"
) ? static_cast<void> (0) : __assert_fail ("PrologBBs.size() == EpilogBBs.size() && \"Prolog/Epilog mismatch\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 2991, __PRETTY_FUNCTION__))
;
2992 MachineInstr *IndVar = Pass.LI.LoopInductionVar;
2993 MachineInstr *Cmp = Pass.LI.LoopCompare;
2994 MachineBasicBlock *LastPro = KernelBB;
2995 MachineBasicBlock *LastEpi = KernelBB;
2996
2997 // Start from the blocks connected to the kernel and work "out"
2998 // to the first prolog and the last epilog blocks.
2999 SmallVector<MachineInstr *, 4> PrevInsts;
3000 unsigned MaxIter = PrologBBs.size() - 1;
3001 unsigned LC = UINT_MAX(2147483647 *2U +1U);
3002 unsigned LCMin = UINT_MAX(2147483647 *2U +1U);
3003 for (unsigned i = 0, j = MaxIter; i <= MaxIter; ++i, --j) {
3004 // Add branches to the prolog that go to the corresponding
3005 // epilog, and the fall-thru prolog/kernel block.
3006 MachineBasicBlock *Prolog = PrologBBs[j];
3007 MachineBasicBlock *Epilog = EpilogBBs[i];
3008 // We've executed one iteration, so decrement the loop count and check for
3009 // the loop end.
3010 SmallVector<MachineOperand, 4> Cond;
3011 // Check if the LOOP0 has already been removed. If so, then there is no need
3012 // to reduce the trip count.
3013 if (LC != 0)
3014 LC = TII->reduceLoopCount(*Prolog, IndVar, *Cmp, Cond, PrevInsts, j,
3015 MaxIter);
3016
3017 // Record the value of the first trip count, which is used to determine if
3018 // branches and blocks can be removed for constant trip counts.
3019 if (LCMin == UINT_MAX(2147483647 *2U +1U))
3020 LCMin = LC;
3021
3022 unsigned numAdded = 0;
3023 if (TargetRegisterInfo::isVirtualRegister(LC)) {
3024 Prolog->addSuccessor(Epilog);
3025 numAdded = TII->insertBranch(*Prolog, Epilog, LastPro, Cond, DebugLoc());
3026 } else if (j >= LCMin) {
3027 Prolog->addSuccessor(Epilog);
3028 Prolog->removeSuccessor(LastPro);
3029 LastEpi->removeSuccessor(Epilog);
3030 numAdded = TII->insertBranch(*Prolog, Epilog, nullptr, Cond, DebugLoc());
3031 removePhis(Epilog, LastEpi);
3032 // Remove the blocks that are no longer referenced.
3033 if (LastPro != LastEpi) {
3034 LastEpi->clear();
3035 LastEpi->eraseFromParent();
3036 }
3037 LastPro->clear();
3038 LastPro->eraseFromParent();
3039 } else {
3040 numAdded = TII->insertBranch(*Prolog, LastPro, nullptr, Cond, DebugLoc());
3041 removePhis(Epilog, Prolog);
3042 }
3043 LastPro = Prolog;
3044 LastEpi = Epilog;
3045 for (MachineBasicBlock::reverse_instr_iterator I = Prolog->instr_rbegin(),
3046 E = Prolog->instr_rend();
3047 I != E && numAdded > 0; ++I, --numAdded)
3048 updateInstruction(&*I, false, j, 0, Schedule, VRMap);
3049 }
3050}
3051
3052/// Return true if we can compute the amount the instruction changes
3053/// during each iteration. Set Delta to the amount of the change.
3054bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) {
3055 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
3056 unsigned BaseReg;
3057 int64_t Offset;
3058 if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
3059 return false;
3060
3061 MachineRegisterInfo &MRI = MF.getRegInfo();
3062 // Check if there is a Phi. If so, get the definition in the loop.
3063 MachineInstr *BaseDef = MRI.getVRegDef(BaseReg);
3064 if (BaseDef && BaseDef->isPHI()) {
3065 BaseReg = getLoopPhiReg(*BaseDef, MI.getParent());
3066 BaseDef = MRI.getVRegDef(BaseReg);
3067 }
3068 if (!BaseDef)
3069 return false;
3070
3071 int D = 0;
3072 if (!TII->getIncrementValue(*BaseDef, D) && D >= 0)
3073 return false;
3074
3075 Delta = D;
3076 return true;
3077}
3078
3079/// Update the memory operand with a new offset when the pipeliner
3080/// generates a new copy of the instruction that refers to a
3081/// different memory location.
3082void SwingSchedulerDAG::updateMemOperands(MachineInstr &NewMI,
3083 MachineInstr &OldMI, unsigned Num) {
3084 if (Num == 0)
3085 return;
3086 // If the instruction has memory operands, then adjust the offset
3087 // when the instruction appears in different stages.
3088 unsigned NumRefs = NewMI.memoperands_end() - NewMI.memoperands_begin();
3089 if (NumRefs == 0)
3090 return;
3091 MachineInstr::mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NumRefs);
3092 unsigned Refs = 0;
3093 for (MachineMemOperand *MMO : NewMI.memoperands()) {
3094 if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) ||
3095 (!MMO->getValue())) {
3096 NewMemRefs[Refs++] = MMO;
3097 continue;
3098 }
3099 unsigned Delta;
3100 if (computeDelta(OldMI, Delta)) {
3101 int64_t AdjOffset = Delta * Num;
3102 NewMemRefs[Refs++] =
3103 MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize());
3104 } else
3105 NewMemRefs[Refs++] = MF.getMachineMemOperand(MMO, 0, UINT64_MAX(18446744073709551615UL));
3106 }
3107 NewMI.setMemRefs(NewMemRefs, NewMemRefs + NumRefs);
3108}
3109
3110/// Clone the instruction for the new pipelined loop and update the
3111/// memory operands, if needed.
3112MachineInstr *SwingSchedulerDAG::cloneInstr(MachineInstr *OldMI,
3113 unsigned CurStageNum,
3114 unsigned InstStageNum) {
3115 MachineInstr *NewMI = MF.CloneMachineInstr(OldMI);
3116 // Check for tied operands in inline asm instructions. This should be handled
3117 // elsewhere, but I'm not sure of the best solution.
3118 if (OldMI->isInlineAsm())
3119 for (unsigned i = 0, e = OldMI->getNumOperands(); i != e; ++i) {
3120 const auto &MO = OldMI->getOperand(i);
3121 if (MO.isReg() && MO.isUse())
3122 break;
3123 unsigned UseIdx;
3124 if (OldMI->isRegTiedToUseOperand(i, &UseIdx))
3125 NewMI->tieOperands(i, UseIdx);
3126 }
3127 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
3128 return NewMI;
3129}
3130
3131/// Clone the instruction for the new pipelined loop. If needed, this
3132/// function updates the instruction using the values saved in the
3133/// InstrChanges structure.
3134MachineInstr *SwingSchedulerDAG::cloneAndChangeInstr(MachineInstr *OldMI,
3135 unsigned CurStageNum,
3136 unsigned InstStageNum,
3137 SMSchedule &Schedule) {
3138 MachineInstr *NewMI = MF.CloneMachineInstr(OldMI);
3139 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
3140 InstrChanges.find(getSUnit(OldMI));
3141 if (It != InstrChanges.end()) {
3142 std::pair<unsigned, int64_t> RegAndOffset = It->second;
3143 unsigned BasePos, OffsetPos;
3144 if (!TII->getBaseAndOffsetPosition(*OldMI, BasePos, OffsetPos))
3145 return nullptr;
3146 int64_t NewOffset = OldMI->getOperand(OffsetPos).getImm();
3147 MachineInstr *LoopDef = findDefInLoop(RegAndOffset.first);
3148 if (Schedule.stageScheduled(getSUnit(LoopDef)) > (signed)InstStageNum)
3149 NewOffset += RegAndOffset.second * (CurStageNum - InstStageNum);
3150 NewMI->getOperand(OffsetPos).setImm(NewOffset);
3151 }
3152 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
3153 return NewMI;
3154}
3155
3156/// Update the machine instruction with new virtual registers. This
3157/// function may change the defintions and/or uses.
3158void SwingSchedulerDAG::updateInstruction(MachineInstr *NewMI, bool LastDef,
3159 unsigned CurStageNum,
3160 unsigned InstrStageNum,
3161 SMSchedule &Schedule,
3162 ValueMapTy *VRMap) {
3163 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
3164 MachineOperand &MO = NewMI->getOperand(i);
3165 if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3166 continue;
3167 unsigned reg = MO.getReg();
3168 if (MO.isDef()) {
3169 // Create a new virtual register for the definition.
3170 const TargetRegisterClass *RC = MRI.getRegClass(reg);
3171 unsigned NewReg = MRI.createVirtualRegister(RC);
3172 MO.setReg(NewReg);
3173 VRMap[CurStageNum][reg] = NewReg;
3174 if (LastDef)
3175 replaceRegUsesAfterLoop(reg, NewReg, BB, MRI, LIS);
3176 } else if (MO.isUse()) {
3177 MachineInstr *Def = MRI.getVRegDef(reg);
3178 // Compute the stage that contains the last definition for instruction.
3179 int DefStageNum = Schedule.stageScheduled(getSUnit(Def));
3180 unsigned StageNum = CurStageNum;
3181 if (DefStageNum != -1 && (int)InstrStageNum > DefStageNum) {
3182 // Compute the difference in stages between the defintion and the use.
3183 unsigned StageDiff = (InstrStageNum - DefStageNum);
3184 // Make an adjustment to get the last definition.
3185 StageNum -= StageDiff;
3186 }
3187 if (VRMap[StageNum].count(reg))
3188 MO.setReg(VRMap[StageNum][reg]);
3189 }
3190 }
3191}
3192
3193/// Return the instruction in the loop that defines the register.
3194/// If the definition is a Phi, then follow the Phi operand to
3195/// the instruction in the loop.
3196MachineInstr *SwingSchedulerDAG::findDefInLoop(unsigned Reg) {
3197 SmallPtrSet<MachineInstr *, 8> Visited;
3198 MachineInstr *Def = MRI.getVRegDef(Reg);
3199 while (Def->isPHI()) {
3200 if (!Visited.insert(Def).second)
3201 break;
3202 for (unsigned i = 1, e = Def->getNumOperands(); i < e; i += 2)
3203 if (Def->getOperand(i + 1).getMBB() == BB) {
3204 Def = MRI.getVRegDef(Def->getOperand(i).getReg());
3205 break;
3206 }
3207 }
3208 return Def;
3209}
3210
3211/// Return the new name for the value from the previous stage.
3212unsigned SwingSchedulerDAG::getPrevMapVal(unsigned StageNum, unsigned PhiStage,
3213 unsigned LoopVal, unsigned LoopStage,
3214 ValueMapTy *VRMap,
3215 MachineBasicBlock *BB) {
3216 unsigned PrevVal = 0;
3217 if (StageNum > PhiStage) {
3218 MachineInstr *LoopInst = MRI.getVRegDef(LoopVal);
3219 if (PhiStage == LoopStage && VRMap[StageNum - 1].count(LoopVal))
3220 // The name is defined in the previous stage.
3221 PrevVal = VRMap[StageNum - 1][LoopVal];
3222 else if (VRMap[StageNum].count(LoopVal))
3223 // The previous name is defined in the current stage when the instruction
3224 // order is swapped.
3225 PrevVal = VRMap[StageNum][LoopVal];
3226 else if (!LoopInst->isPHI() || LoopInst->getParent() != BB)
3227 // The loop value hasn't yet been scheduled.
3228 PrevVal = LoopVal;
3229 else if (StageNum == PhiStage + 1)
3230 // The loop value is another phi, which has not been scheduled.
3231 PrevVal = getInitPhiReg(*LoopInst, BB);
3232 else if (StageNum > PhiStage + 1 && LoopInst->getParent() == BB)
3233 // The loop value is another phi, which has been scheduled.
3234 PrevVal =
3235 getPrevMapVal(StageNum - 1, PhiStage, getLoopPhiReg(*LoopInst, BB),
3236 LoopStage, VRMap, BB);
3237 }
3238 return PrevVal;
3239}
3240
3241/// Rewrite the Phi values in the specified block to use the mappings
3242/// from the initial operand. Once the Phi is scheduled, we switch
3243/// to using the loop value instead of the Phi value, so those names
3244/// do not need to be rewritten.
3245void SwingSchedulerDAG::rewritePhiValues(MachineBasicBlock *NewBB,
3246 unsigned StageNum,
3247 SMSchedule &Schedule,
3248 ValueMapTy *VRMap,
3249 InstrMapTy &InstrMap) {
3250 for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
3251 BBE = BB->getFirstNonPHI();
3252 BBI != BBE; ++BBI) {
3253 unsigned InitVal = 0;
3254 unsigned LoopVal = 0;
3255 getPhiRegs(*BBI, BB, InitVal, LoopVal);
3256 unsigned PhiDef = BBI->getOperand(0).getReg();
3257
3258 unsigned PhiStage =
3259 (unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(PhiDef)));
3260 unsigned LoopStage =
3261 (unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(LoopVal)));
3262 unsigned NumPhis = Schedule.getStagesForPhi(PhiDef);
3263 if (NumPhis > StageNum)
3264 NumPhis = StageNum;
3265 for (unsigned np = 0; np <= NumPhis; ++np) {
3266 unsigned NewVal =
3267 getPrevMapVal(StageNum - np, PhiStage, LoopVal, LoopStage, VRMap, BB);
3268 if (!NewVal)
3269 NewVal = InitVal;
3270 rewriteScheduledInstr(NewBB, Schedule, InstrMap, StageNum - np, np, &*BBI,
3271 PhiDef, NewVal);
3272 }
3273 }
3274}
3275
3276/// Rewrite a previously scheduled instruction to use the register value
3277/// from the new instruction. Make sure the instruction occurs in the
3278/// basic block, and we don't change the uses in the new instruction.
3279void SwingSchedulerDAG::rewriteScheduledInstr(
3280 MachineBasicBlock *BB, SMSchedule &Schedule, InstrMapTy &InstrMap,
3281 unsigned CurStageNum, unsigned PhiNum, MachineInstr *Phi, unsigned OldReg,
3282 unsigned NewReg, unsigned PrevReg) {
3283 bool InProlog = (CurStageNum < Schedule.getMaxStageCount());
3284 int StagePhi = Schedule.stageScheduled(getSUnit(Phi)) + PhiNum;
3285 // Rewrite uses that have been scheduled already to use the new
3286 // Phi register.
3287 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(OldReg),
3288 EI = MRI.use_end();
3289 UI != EI;) {
3290 MachineOperand &UseOp = *UI;
3291 MachineInstr *UseMI = UseOp.getParent();
3292 ++UI;
3293 if (UseMI->getParent() != BB)
3294 continue;
3295 if (UseMI->isPHI()) {
3296 if (!Phi->isPHI() && UseMI->getOperand(0).getReg() == NewReg)
3297 continue;
3298 if (getLoopPhiReg(*UseMI, BB) != OldReg)
3299 continue;
3300 }
3301 InstrMapTy::iterator OrigInstr = InstrMap.find(UseMI);
3302 assert(OrigInstr != InstrMap.end() && "Instruction not scheduled.")((OrigInstr != InstrMap.end() && "Instruction not scheduled."
) ? static_cast<void> (0) : __assert_fail ("OrigInstr != InstrMap.end() && \"Instruction not scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3302, __PRETTY_FUNCTION__))
;
3303 SUnit *OrigMISU = getSUnit(OrigInstr->second);
3304 int StageSched = Schedule.stageScheduled(OrigMISU);
3305 int CycleSched = Schedule.cycleScheduled(OrigMISU);
3306 unsigned ReplaceReg = 0;
3307 // This is the stage for the scheduled instruction.
3308 if (StagePhi == StageSched && Phi->isPHI()) {
3309 int CyclePhi = Schedule.cycleScheduled(getSUnit(Phi));
3310 if (PrevReg && InProlog)
3311 ReplaceReg = PrevReg;
3312 else if (PrevReg && !Schedule.isLoopCarried(this, *Phi) &&
3313 (CyclePhi <= CycleSched || OrigMISU->getInstr()->isPHI()))
3314 ReplaceReg = PrevReg;
3315 else
3316 ReplaceReg = NewReg;
3317 }
3318 // The scheduled instruction occurs before the scheduled Phi, and the
3319 // Phi is not loop carried.
3320 if (!InProlog && StagePhi + 1 == StageSched &&
3321 !Schedule.isLoopCarried(this, *Phi))
3322 ReplaceReg = NewReg;
3323 if (StagePhi > StageSched && Phi->isPHI())
3324 ReplaceReg = NewReg;
3325 if (!InProlog && !Phi->isPHI() && StagePhi < StageSched)
3326 ReplaceReg = NewReg;
3327 if (ReplaceReg) {
3328 MRI.constrainRegClass(ReplaceReg, MRI.getRegClass(OldReg));
3329 UseOp.setReg(ReplaceReg);
3330 }
3331 }
3332}
3333
3334/// Check if we can change the instruction to use an offset value from the
3335/// previous iteration. If so, return true and set the base and offset values
3336/// so that we can rewrite the load, if necessary.
3337/// v1 = Phi(v0, v3)
3338/// v2 = load v1, 0
3339/// v3 = post_store v1, 4, x
3340/// This function enables the load to be rewritten as v2 = load v3, 4.
3341bool SwingSchedulerDAG::canUseLastOffsetValue(MachineInstr *MI,
3342 unsigned &BasePos,
3343 unsigned &OffsetPos,
3344 unsigned &NewBase,
3345 int64_t &Offset) {
3346 // Get the load instruction.
3347 if (TII->isPostIncrement(*MI))
3348 return false;
3349 unsigned BasePosLd, OffsetPosLd;
3350 if (!TII->getBaseAndOffsetPosition(*MI, BasePosLd, OffsetPosLd))
3351 return false;
3352 unsigned BaseReg = MI->getOperand(BasePosLd).getReg();
3353
3354 // Look for the Phi instruction.
3355 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
3356 MachineInstr *Phi = MRI.getVRegDef(BaseReg);
3357 if (!Phi || !Phi->isPHI())
3358 return false;
3359 // Get the register defined in the loop block.
3360 unsigned PrevReg = getLoopPhiReg(*Phi, MI->getParent());
3361 if (!PrevReg)
3362 return false;
3363
3364 // Check for the post-increment load/store instruction.
3365 MachineInstr *PrevDef = MRI.getVRegDef(PrevReg);
3366 if (!PrevDef || PrevDef == MI)
3367 return false;
3368
3369 if (!TII->isPostIncrement(*PrevDef))
3370 return false;
3371
3372 unsigned BasePos1 = 0, OffsetPos1 = 0;
3373 if (!TII->getBaseAndOffsetPosition(*PrevDef, BasePos1, OffsetPos1))
3374 return false;
3375
3376 // Make sure offset values are both positive or both negative.
3377 int64_t LoadOffset = MI->getOperand(OffsetPosLd).getImm();
3378 int64_t StoreOffset = PrevDef->getOperand(OffsetPos1).getImm();
3379 if ((LoadOffset >= 0) != (StoreOffset >= 0))
3380 return false;
3381
3382 // Set the return value once we determine that we return true.
3383 BasePos = BasePosLd;
3384 OffsetPos = OffsetPosLd;
3385 NewBase = PrevReg;
3386 Offset = StoreOffset;
3387 return true;
3388}
3389
3390/// Apply changes to the instruction if needed. The changes are need
3391/// to improve the scheduling and depend up on the final schedule.
3392MachineInstr *SwingSchedulerDAG::applyInstrChange(MachineInstr *MI,
3393 SMSchedule &Schedule,
3394 bool UpdateDAG) {
3395 SUnit *SU = getSUnit(MI);
3396 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
3397 InstrChanges.find(SU);
3398 if (It != InstrChanges.end()) {
3399 std::pair<unsigned, int64_t> RegAndOffset = It->second;
3400 unsigned BasePos, OffsetPos;
3401 if (!TII->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos))
3402 return nullptr;
3403 unsigned BaseReg = MI->getOperand(BasePos).getReg();
3404 MachineInstr *LoopDef = findDefInLoop(BaseReg);
3405 int DefStageNum = Schedule.stageScheduled(getSUnit(LoopDef));
3406 int DefCycleNum = Schedule.cycleScheduled(getSUnit(LoopDef));
3407 int BaseStageNum = Schedule.stageScheduled(SU);
3408 int BaseCycleNum = Schedule.cycleScheduled(SU);
3409 if (BaseStageNum < DefStageNum) {
3410 MachineInstr *NewMI = MF.CloneMachineInstr(MI);
3411 int OffsetDiff = DefStageNum - BaseStageNum;
3412 if (DefCycleNum < BaseCycleNum) {
3413 NewMI->getOperand(BasePos).setReg(RegAndOffset.first);
3414 if (OffsetDiff > 0)
3415 --OffsetDiff;
3416 }
3417 int64_t NewOffset =
3418 MI->getOperand(OffsetPos).getImm() + RegAndOffset.second * OffsetDiff;
3419 NewMI->getOperand(OffsetPos).setImm(NewOffset);
3420 if (UpdateDAG) {
3421 SU->setInstr(NewMI);
3422 MISUnitMap[NewMI] = SU;
3423 }
3424 NewMIs.insert(NewMI);
3425 return NewMI;
3426 }
3427 }
3428 return nullptr;
3429}
3430
3431/// Return true for an order dependence that is loop carried potentially.
3432/// An order dependence is loop carried if the destination defines a value
3433/// that may be used by the source in a subsequent iteration.
3434bool SwingSchedulerDAG::isLoopCarriedOrder(SUnit *Source, const SDep &Dep,
3435 bool isSucc) {
3436 if (!isOrder(Source, Dep) || Dep.isArtificial())
3437 return false;
3438
3439 if (!SwpPruneLoopCarried)
3440 return true;
3441
3442 MachineInstr *SI = Source->getInstr();
3443 MachineInstr *DI = Dep.getSUnit()->getInstr();
3444 if (!isSucc)
3445 std::swap(SI, DI);
3446 assert(SI != nullptr && DI != nullptr && "Expecting SUnit with an MI.")((SI != nullptr && DI != nullptr && "Expecting SUnit with an MI."
) ? static_cast<void> (0) : __assert_fail ("SI != nullptr && DI != nullptr && \"Expecting SUnit with an MI.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3446, __PRETTY_FUNCTION__))
;
3447
3448 // Assume ordered loads and stores may have a loop carried dependence.
3449 if (SI->hasUnmodeledSideEffects() || DI->hasUnmodeledSideEffects() ||
3450 SI->hasOrderedMemoryRef() || DI->hasOrderedMemoryRef())
3451 return true;
3452
3453 // Only chain dependences between a load and store can be loop carried.
3454 if (!DI->mayStore() || !SI->mayLoad())
3455 return false;
3456
3457 unsigned DeltaS, DeltaD;
3458 if (!computeDelta(*SI, DeltaS) || !computeDelta(*DI, DeltaD))
3459 return true;
3460
3461 unsigned BaseRegS, BaseRegD;
3462 int64_t OffsetS, OffsetD;
3463 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
3464 if (!TII->getMemOpBaseRegImmOfs(*SI, BaseRegS, OffsetS, TRI) ||
3465 !TII->getMemOpBaseRegImmOfs(*DI, BaseRegD, OffsetD, TRI))
3466 return true;
3467
3468 if (BaseRegS != BaseRegD)
3469 return true;
3470
3471 uint64_t AccessSizeS = (*SI->memoperands_begin())->getSize();
3472 uint64_t AccessSizeD = (*DI->memoperands_begin())->getSize();
3473
3474 // This is the main test, which checks the offset values and the loop
3475 // increment value to determine if the accesses may be loop carried.
3476 if (OffsetS >= OffsetD)
3477 return OffsetS + AccessSizeS > DeltaS;
3478 else
3479 return OffsetD + AccessSizeD > DeltaD;
3480
3481 return true;
3482}
3483
3484void SwingSchedulerDAG::postprocessDAG() {
3485 for (auto &M : Mutations)
3486 M->apply(this);
3487}
3488
3489/// Try to schedule the node at the specified StartCycle and continue
3490/// until the node is schedule or the EndCycle is reached. This function
3491/// returns true if the node is scheduled. This routine may search either
3492/// forward or backward for a place to insert the instruction based upon
3493/// the relative values of StartCycle and EndCycle.
3494bool SMSchedule::insert(SUnit *SU, int StartCycle, int EndCycle, int II) {
3495 bool forward = true;
3496 if (StartCycle > EndCycle)
3497 forward = false;
3498
3499 // The terminating condition depends on the direction.
3500 int termCycle = forward ? EndCycle + 1 : EndCycle - 1;
3501 for (int curCycle = StartCycle; curCycle != termCycle;
3502 forward ? ++curCycle : --curCycle) {
3503
3504 // Add the already scheduled instructions at the specified cycle to the DFA.
3505 Resources->clearResources();
3506 for (int checkCycle = FirstCycle + ((curCycle - FirstCycle) % II);
3507 checkCycle <= LastCycle; checkCycle += II) {
3508 std::deque<SUnit *> &cycleInstrs = ScheduledInstrs[checkCycle];
3509
3510 for (std::deque<SUnit *>::iterator I = cycleInstrs.begin(),
3511 E = cycleInstrs.end();
3512 I != E; ++I) {
3513 if (ST.getInstrInfo()->isZeroCost((*I)->getInstr()->getOpcode()))
3514 continue;
3515 assert(Resources->canReserveResources(*(*I)->getInstr()) &&((Resources->canReserveResources(*(*I)->getInstr()) &&
"These instructions have already been scheduled.") ? static_cast
<void> (0) : __assert_fail ("Resources->canReserveResources(*(*I)->getInstr()) && \"These instructions have already been scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3516, __PRETTY_FUNCTION__))
3516 "These instructions have already been scheduled.")((Resources->canReserveResources(*(*I)->getInstr()) &&
"These instructions have already been scheduled.") ? static_cast
<void> (0) : __assert_fail ("Resources->canReserveResources(*(*I)->getInstr()) && \"These instructions have already been scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3516, __PRETTY_FUNCTION__))
;
3517 Resources->reserveResources(*(*I)->getInstr());
3518 }
3519 }
3520 if (ST.getInstrInfo()->isZeroCost(SU->getInstr()->getOpcode()) ||
3521 Resources->canReserveResources(*SU->getInstr())) {
3522 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tinsert at cycle " <<
curCycle << " "; SU->getInstr()->dump(); }; } } while
(false)
3523 dbgs() << "\tinsert at cycle " << curCycle << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tinsert at cycle " <<
curCycle << " "; SU->getInstr()->dump(); }; } } while
(false)
3524 SU->getInstr()->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tinsert at cycle " <<
curCycle << " "; SU->getInstr()->dump(); }; } } while
(false)
3525 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tinsert at cycle " <<
curCycle << " "; SU->getInstr()->dump(); }; } } while
(false)
;
3526
3527 ScheduledInstrs[curCycle].push_back(SU);
3528 InstrToCycle.insert(std::make_pair(SU, curCycle));
3529 if (curCycle > LastCycle)
3530 LastCycle = curCycle;
3531 if (curCycle < FirstCycle)
3532 FirstCycle = curCycle;
3533 return true;
3534 }
3535 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tfailed to insert at cycle "
<< curCycle << " "; SU->getInstr()->dump()
; }; } } while (false)
3536 dbgs() << "\tfailed to insert at cycle " << curCycle << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tfailed to insert at cycle "
<< curCycle << " "; SU->getInstr()->dump()
; }; } } while (false)
3537 SU->getInstr()->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tfailed to insert at cycle "
<< curCycle << " "; SU->getInstr()->dump()
; }; } } while (false)
3538 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { { dbgs() << "\tfailed to insert at cycle "
<< curCycle << " "; SU->getInstr()->dump()
; }; } } while (false)
;
3539 }
3540 return false;
3541}
3542
3543// Return the cycle of the earliest scheduled instruction in the chain.
3544int SMSchedule::earliestCycleInChain(const SDep &Dep) {
3545 SmallPtrSet<SUnit *, 8> Visited;
3546 SmallVector<SDep, 8> Worklist;
3547 Worklist.push_back(Dep);
3548 int EarlyCycle = INT_MAX2147483647;
3549 while (!Worklist.empty()) {
3550 const SDep &Cur = Worklist.pop_back_val();
3551 SUnit *PrevSU = Cur.getSUnit();
3552 if (Visited.count(PrevSU))
3553 continue;
3554 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(PrevSU);
3555 if (it == InstrToCycle.end())
3556 continue;
3557 EarlyCycle = std::min(EarlyCycle, it->second);
3558 for (const auto &PI : PrevSU->Preds)
3559 if (SwingSchedulerDAG::isOrder(PrevSU, PI))
3560 Worklist.push_back(PI);
3561 Visited.insert(PrevSU);
3562 }
3563 return EarlyCycle;
3564}
3565
3566// Return the cycle of the latest scheduled instruction in the chain.
3567int SMSchedule::latestCycleInChain(const SDep &Dep) {
3568 SmallPtrSet<SUnit *, 8> Visited;
3569 SmallVector<SDep, 8> Worklist;
3570 Worklist.push_back(Dep);
3571 int LateCycle = INT_MIN(-2147483647 -1);
3572 while (!Worklist.empty()) {
3573 const SDep &Cur = Worklist.pop_back_val();
3574 SUnit *SuccSU = Cur.getSUnit();
3575 if (Visited.count(SuccSU))
3576 continue;
3577 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SuccSU);
3578 if (it == InstrToCycle.end())
3579 continue;
3580 LateCycle = std::max(LateCycle, it->second);
3581 for (const auto &SI : SuccSU->Succs)
3582 if (SwingSchedulerDAG::isOrder(SuccSU, SI))
3583 Worklist.push_back(SI);
3584 Visited.insert(SuccSU);
3585 }
3586 return LateCycle;
3587}
3588
3589/// If an instruction has a use that spans multiple iterations, then
3590/// return true. These instructions are characterized by having a back-ege
3591/// to a Phi, which contains a reference to another Phi.
3592static SUnit *multipleIterations(SUnit *SU, SwingSchedulerDAG *DAG) {
3593 for (auto &P : SU->Preds)
3594 if (DAG->isBackedge(SU, P) && P.getSUnit()->getInstr()->isPHI())
3595 for (auto &S : P.getSUnit()->Succs)
3596 if (S.getKind() == SDep::Order && S.getSUnit()->getInstr()->isPHI())
3597 return P.getSUnit();
3598 return nullptr;
3599}
3600
3601/// Compute the scheduling start slot for the instruction. The start slot
3602/// depends on any predecessor or successor nodes scheduled already.
3603void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
3604 int *MinEnd, int *MaxStart, int II,
3605 SwingSchedulerDAG *DAG) {
3606 // Iterate over each instruction that has been scheduled already. The start
3607 // slot computuation depends on whether the previously scheduled instruction
3608 // is a predecessor or successor of the specified instruction.
3609 for (int cycle = getFirstCycle(); cycle <= LastCycle; ++cycle) {
3610
3611 // Iterate over each instruction in the current cycle.
3612 for (SUnit *I : getInstructions(cycle)) {
3613 // Because we're processing a DAG for the dependences, we recognize
3614 // the back-edge in recurrences by anti dependences.
3615 for (unsigned i = 0, e = (unsigned)SU->Preds.size(); i != e; ++i) {
3616 const SDep &Dep = SU->Preds[i];
3617 if (Dep.getSUnit() == I) {
3618 if (!DAG->isBackedge(SU, Dep)) {
3619 int EarlyStart = cycle + DAG->getLatency(SU, Dep) -
3620 DAG->getDistance(Dep.getSUnit(), SU, Dep) * II;
3621 *MaxEarlyStart = std::max(*MaxEarlyStart, EarlyStart);
3622 if (DAG->isLoopCarriedOrder(SU, Dep, false)) {
3623 int End = earliestCycleInChain(Dep) + (II - 1);
3624 *MinEnd = std::min(*MinEnd, End);
3625 }
3626 } else {
3627 int LateStart = cycle - DAG->getLatency(SU, Dep) +
3628 DAG->getDistance(SU, Dep.getSUnit(), Dep) * II;
3629 *MinLateStart = std::min(*MinLateStart, LateStart);
3630 }
3631 }
3632 // For instruction that requires multiple iterations, make sure that
3633 // the dependent instruction is not scheduled past the definition.
3634 SUnit *BE = multipleIterations(I, DAG);
3635 if (BE && Dep.getSUnit() == BE && !SU->getInstr()->isPHI() &&
3636 !SU->isPred(I))
3637 *MinLateStart = std::min(*MinLateStart, cycle);
3638 }
3639 for (unsigned i = 0, e = (unsigned)SU->Succs.size(); i != e; ++i)
3640 if (SU->Succs[i].getSUnit() == I) {
3641 const SDep &Dep = SU->Succs[i];
3642 if (!DAG->isBackedge(SU, Dep)) {
3643 int LateStart = cycle - DAG->getLatency(SU, Dep) +
3644 DAG->getDistance(SU, Dep.getSUnit(), Dep) * II;
3645 *MinLateStart = std::min(*MinLateStart, LateStart);
3646 if (DAG->isLoopCarriedOrder(SU, Dep)) {
3647 int Start = latestCycleInChain(Dep) + 1 - II;
3648 *MaxStart = std::max(*MaxStart, Start);
3649 }
3650 } else {
3651 int EarlyStart = cycle + DAG->getLatency(SU, Dep) -
3652 DAG->getDistance(Dep.getSUnit(), SU, Dep) * II;
3653 *MaxEarlyStart = std::max(*MaxEarlyStart, EarlyStart);
3654 }
3655 }
3656 }
3657 }
3658}
3659
3660/// Order the instructions within a cycle so that the definitions occur
3661/// before the uses. Returns true if the instruction is added to the start
3662/// of the list, or false if added to the end.
3663bool SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
3664 std::deque<SUnit *> &Insts) {
3665 MachineInstr *MI = SU->getInstr();
3666 bool OrderBeforeUse = false;
3667 bool OrderAfterDef = false;
3668 bool OrderBeforeDef = false;
3669 unsigned MoveDef = 0;
3670 unsigned MoveUse = 0;
3671 int StageInst1 = stageScheduled(SU);
3672
3673 unsigned Pos = 0;
3674 for (std::deque<SUnit *>::iterator I = Insts.begin(), E = Insts.end(); I != E;
3675 ++I, ++Pos) {
3676 // Relative order of Phis does not matter.
3677 if (MI->isPHI() && (*I)->getInstr()->isPHI())
3678 continue;
3679 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
3680 MachineOperand &MO = MI->getOperand(i);
3681 if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3682 continue;
3683 unsigned Reg = MO.getReg();
3684 unsigned BasePos, OffsetPos;
3685 if (ST.getInstrInfo()->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos))
3686 if (MI->getOperand(BasePos).getReg() == Reg)
3687 if (unsigned NewReg = SSD->getInstrBaseReg(SU))
3688 Reg = NewReg;
3689 bool Reads, Writes;
3690 std::tie(Reads, Writes) =
3691 (*I)->getInstr()->readsWritesVirtualRegister(Reg);
3692 if (MO.isDef() && Reads && stageScheduled(*I) <= StageInst1) {
3693 OrderBeforeUse = true;
3694 MoveUse = Pos;
3695 } else if (MO.isDef() && Reads && stageScheduled(*I) > StageInst1) {
3696 // Add the instruction after the scheduled instruction.
3697 OrderAfterDef = true;
3698 MoveDef = Pos;
3699 } else if (MO.isUse() && Writes && stageScheduled(*I) == StageInst1) {
3700 if (cycleScheduled(*I) == cycleScheduled(SU) && !(*I)->isSucc(SU)) {
3701 OrderBeforeUse = true;
3702 MoveUse = Pos;
3703 } else {
3704 OrderAfterDef = true;
3705 MoveDef = Pos;
3706 }
3707 } else if (MO.isUse() && Writes && stageScheduled(*I) > StageInst1) {
3708 OrderBeforeUse = true;
3709 MoveUse = Pos;
3710 if (MoveUse != 0) {
3711 OrderAfterDef = true;
3712 MoveDef = Pos - 1;
3713 }
3714 } else if (MO.isUse() && Writes && stageScheduled(*I) < StageInst1) {
3715 // Add the instruction before the scheduled instruction.
3716 OrderBeforeUse = true;
3717 MoveUse = Pos;
3718 } else if (MO.isUse() && stageScheduled(*I) == StageInst1 &&
3719 isLoopCarriedDefOfUse(SSD, (*I)->getInstr(), MO)) {
3720 OrderBeforeDef = true;
3721 MoveUse = Pos;
3722 }
3723 }
3724 // Check for order dependences between instructions. Make sure the source
3725 // is ordered before the destination.
3726 for (auto &S : SU->Succs)
3727 if (S.getKind() == SDep::Order) {
3728 if (S.getSUnit() == *I && stageScheduled(*I) == StageInst1) {
3729 OrderBeforeUse = true;
3730 MoveUse = Pos;
3731 }
3732 } else if (TargetRegisterInfo::isPhysicalRegister(S.getReg())) {
3733 if (cycleScheduled(SU) != cycleScheduled(S.getSUnit())) {
3734 if (S.isAssignedRegDep()) {
3735 OrderAfterDef = true;
3736 MoveDef = Pos;
3737 }
3738 } else {
3739 OrderBeforeUse = true;
3740 MoveUse = Pos;
3741 }
3742 }
3743 for (auto &P : SU->Preds)
3744 if (P.getKind() == SDep::Order) {
3745 if (P.getSUnit() == *I && stageScheduled(*I) == StageInst1) {
3746 OrderAfterDef = true;
3747 MoveDef = Pos;
3748 }
3749 } else if (TargetRegisterInfo::isPhysicalRegister(P.getReg())) {
3750 if (cycleScheduled(SU) != cycleScheduled(P.getSUnit())) {
3751 if (P.isAssignedRegDep()) {
3752 OrderBeforeUse = true;
3753 MoveUse = Pos;
3754 }
3755 } else {
3756 OrderAfterDef = true;
3757 MoveDef = Pos;
3758 }
3759 }
3760 }
3761
3762 // A circular dependence.
3763 if (OrderAfterDef && OrderBeforeUse && MoveUse == MoveDef)
3764 OrderBeforeUse = false;
3765
3766 // OrderAfterDef takes precedences over OrderBeforeDef. The latter is due
3767 // to a loop-carried dependence.
3768 if (OrderBeforeDef)
3769 OrderBeforeUse = !OrderAfterDef || (MoveUse > MoveDef);
3770
3771 // The uncommon case when the instruction order needs to be updated because
3772 // there is both a use and def.
3773 if (OrderBeforeUse && OrderAfterDef) {
3774 SUnit *UseSU = Insts.at(MoveUse);
3775 SUnit *DefSU = Insts.at(MoveDef);
3776 if (MoveUse > MoveDef) {
3777 Insts.erase(Insts.begin() + MoveUse);
3778 Insts.erase(Insts.begin() + MoveDef);
3779 } else {
3780 Insts.erase(Insts.begin() + MoveDef);
3781 Insts.erase(Insts.begin() + MoveUse);
3782 }
3783 if (orderDependence(SSD, UseSU, Insts)) {
3784 Insts.push_front(SU);
3785 orderDependence(SSD, DefSU, Insts);
3786 return true;
3787 }
3788 Insts.pop_back();
3789 Insts.push_back(SU);
3790 Insts.push_back(UseSU);
3791 orderDependence(SSD, DefSU, Insts);
3792 return false;
3793 }
3794 // Put the new instruction first if there is a use in the list. Otherwise,
3795 // put it at the end of the list.
3796 if (OrderBeforeUse)
3797 Insts.push_front(SU);
3798 else
3799 Insts.push_back(SU);
3800 return OrderBeforeUse;
3801}
3802
3803/// Return true if the scheduled Phi has a loop carried operand.
3804bool SMSchedule::isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi) {
3805 if (!Phi.isPHI())
3806 return false;
3807 assert(Phi.isPHI() && "Expecing a Phi.")((Phi.isPHI() && "Expecing a Phi.") ? static_cast<
void> (0) : __assert_fail ("Phi.isPHI() && \"Expecing a Phi.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3807, __PRETTY_FUNCTION__))
;
3808 SUnit *DefSU = SSD->getSUnit(&Phi);
3809 unsigned DefCycle = cycleScheduled(DefSU);
3810 int DefStage = stageScheduled(DefSU);
3811
3812 unsigned InitVal = 0;
3813 unsigned LoopVal = 0;
3814 getPhiRegs(Phi, Phi.getParent(), InitVal, LoopVal);
3815 SUnit *UseSU = SSD->getSUnit(MRI.getVRegDef(LoopVal));
3816 if (!UseSU)
3817 return true;
3818 if (UseSU->getInstr()->isPHI())
3819 return true;
3820 unsigned LoopCycle = cycleScheduled(UseSU);
3821 int LoopStage = stageScheduled(UseSU);
3822 return (LoopCycle > DefCycle) || (LoopStage <= DefStage);
3823}
3824
3825/// Return true if the instruction is a definition that is loop carried
3826/// and defines the use on the next iteration.
3827/// v1 = phi(v2, v3)
3828/// (Def) v3 = op v1
3829/// (MO) = v1
3830/// If MO appears before Def, then then v1 and v3 may get assigned to the same
3831/// register.
3832bool SMSchedule::isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD,
3833 MachineInstr *Def, MachineOperand &MO) {
3834 if (!MO.isReg())
3835 return false;
3836 if (Def->isPHI())
3837 return false;
3838 MachineInstr *Phi = MRI.getVRegDef(MO.getReg());
3839 if (!Phi || !Phi->isPHI() || Phi->getParent() != Def->getParent())
3840 return false;
3841 if (!isLoopCarried(SSD, *Phi))
3842 return false;
3843 unsigned LoopReg = getLoopPhiReg(*Phi, Phi->getParent());
3844 for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) {
3845 MachineOperand &DMO = Def->getOperand(i);
3846 if (!DMO.isReg() || !DMO.isDef())
3847 continue;
3848 if (DMO.getReg() == LoopReg)
3849 return true;
3850 }
3851 return false;
3852}
3853
3854// Check if the generated schedule is valid. This function checks if
3855// an instruction that uses a physical register is scheduled in a
3856// different stage than the definition. The pipeliner does not handle
3857// physical register values that may cross a basic block boundary.
3858bool SMSchedule::isValidSchedule(SwingSchedulerDAG *SSD) {
3859 for (int i = 0, e = SSD->SUnits.size(); i < e; ++i) {
3860 SUnit &SU = SSD->SUnits[i];
3861 if (!SU.hasPhysRegDefs)
3862 continue;
3863 int StageDef = stageScheduled(&SU);
3864 assert(StageDef != -1 && "Instruction should have been scheduled.")((StageDef != -1 && "Instruction should have been scheduled."
) ? static_cast<void> (0) : __assert_fail ("StageDef != -1 && \"Instruction should have been scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/MachinePipeliner.cpp"
, 3864, __PRETTY_FUNCTION__))
;
3865 for (auto &SI : SU.Succs)
3866 if (SI.isAssignedRegDep())
3867 if (ST.getRegisterInfo()->isPhysicalRegister(SI.getReg()))
3868 if (stageScheduled(SI.getSUnit()) != StageDef)
3869 return false;
3870 }
3871 return true;
3872}
3873
3874/// After the schedule has been formed, call this function to combine
3875/// the instructions from the different stages/cycles. That is, this
3876/// function creates a schedule that represents a single iteration.
3877void SMSchedule::finalizeSchedule(SwingSchedulerDAG *SSD) {
3878 // Move all instructions to the first stage from later stages.
3879 for (int cycle = getFirstCycle(); cycle <= getFinalCycle(); ++cycle) {
3880 for (int stage = 1, lastStage = getMaxStageCount(); stage <= lastStage;
3881 ++stage) {
3882 std::deque<SUnit *> &cycleInstrs =
3883 ScheduledInstrs[cycle + (stage * InitiationInterval)];
3884 for (std::deque<SUnit *>::reverse_iterator I = cycleInstrs.rbegin(),
3885 E = cycleInstrs.rend();
3886 I != E; ++I)
3887 ScheduledInstrs[cycle].push_front(*I);
3888 }
3889 }
3890 // Iterate over the definitions in each instruction, and compute the
3891 // stage difference for each use. Keep the maximum value.
3892 for (auto &I : InstrToCycle) {
3893 int DefStage = stageScheduled(I.first);
3894 MachineInstr *MI = I.first->getInstr();
3895 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
3896 MachineOperand &Op = MI->getOperand(i);
3897 if (!Op.isReg() || !Op.isDef())
3898 continue;
3899
3900 unsigned Reg = Op.getReg();
3901 unsigned MaxDiff = 0;
3902 bool PhiIsSwapped = false;
3903 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(Reg),
3904 EI = MRI.use_end();
3905 UI != EI; ++UI) {
3906 MachineOperand &UseOp = *UI;
3907 MachineInstr *UseMI = UseOp.getParent();
3908 SUnit *SUnitUse = SSD->getSUnit(UseMI);
3909 int UseStage = stageScheduled(SUnitUse);
3910 unsigned Diff = 0;
3911 if (UseStage != -1 && UseStage >= DefStage)
3912 Diff = UseStage - DefStage;
3913 if (MI->isPHI()) {
3914 if (isLoopCarried(SSD, *MI))
3915 ++Diff;
3916 else
3917 PhiIsSwapped = true;
3918 }
3919 MaxDiff = std::max(Diff, MaxDiff);
3920 }
3921 RegToStageDiff[Reg] = std::make_pair(MaxDiff, PhiIsSwapped);
3922 }
3923 }
3924
3925 // Erase all the elements in the later stages. Only one iteration should
3926 // remain in the scheduled list, and it contains all the instructions.
3927 for (int cycle = getFinalCycle() + 1; cycle <= LastCycle; ++cycle)
3928 ScheduledInstrs.erase(cycle);
3929
3930 // Change the registers in instruction as specified in the InstrChanges
3931 // map. We need to use the new registers to create the correct order.
3932 for (int i = 0, e = SSD->SUnits.size(); i != e; ++i) {
3933 SUnit *SU = &SSD->SUnits[i];
3934 SSD->applyInstrChange(SU->getInstr(), *this, true);
3935 }
3936
3937 // Reorder the instructions in each cycle to fix and improve the
3938 // generated code.
3939 for (int Cycle = getFirstCycle(), E = getFinalCycle(); Cycle <= E; ++Cycle) {
3940 std::deque<SUnit *> &cycleInstrs = ScheduledInstrs[Cycle];
3941 std::deque<SUnit *> newOrderZC;
3942 // Put the zero-cost, pseudo instructions at the start of the cycle.
3943 for (unsigned i = 0, e = cycleInstrs.size(); i < e; ++i) {
3944 SUnit *SU = cycleInstrs[i];
3945 if (ST.getInstrInfo()->isZeroCost(SU->getInstr()->getOpcode()))
3946 orderDependence(SSD, SU, newOrderZC);
3947 }
3948 std::deque<SUnit *> newOrderI;
3949 // Then, add the regular instructions back.
3950 for (unsigned i = 0, e = cycleInstrs.size(); i < e; ++i) {
3951 SUnit *SU = cycleInstrs[i];
3952 if (!ST.getInstrInfo()->isZeroCost(SU->getInstr()->getOpcode()))
3953 orderDependence(SSD, SU, newOrderI);
3954 }
3955 // Replace the old order with the new order.
3956 cycleInstrs.swap(newOrderZC);
3957 cycleInstrs.insert(cycleInstrs.end(), newOrderI.begin(), newOrderI.end());
3958 }
3959
3960 DEBUG(dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pipeliner")) { dump();; } } while (false)
;
3961}
3962
3963/// Print the schedule information to the given output.
3964void SMSchedule::print(raw_ostream &os) const {
3965 // Iterate over each cycle.
3966 for (int cycle = getFirstCycle(); cycle <= getFinalCycle(); ++cycle) {
3967 // Iterate over each instruction in the cycle.
3968 const_sched_iterator cycleInstrs = ScheduledInstrs.find(cycle);
3969 for (SUnit *CI : cycleInstrs->second) {
3970 os << "cycle " << cycle << " (" << stageScheduled(CI) << ") ";
3971 os << "(" << CI->NodeNum << ") ";
3972 CI->getInstr()->print(os);
3973 os << "\n";
3974 }
3975 }
3976}
3977
3978#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3979/// Utility function used for debugging to print the schedule.
3980LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void SMSchedule::dump() const { print(dbgs()); }
3981#endif