LLVM  14.0.0git
RDFLiveness.cpp
Go to the documentation of this file.
1 //===- RDFLiveness.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Computation of the liveness information from the data-flow graph.
10 //
11 // The main functionality of this code is to compute block live-in
12 // information. With the live-in information in place, the placement
13 // of kill flags can also be recalculated.
14 //
15 // The block live-in calculation is based on the ideas from the following
16 // publication:
17 //
18 // Dibyendu Das, Ramakrishna Upadrasta, Benoit Dupont de Dinechin.
19 // "Efficient Liveness Computation Using Merge Sets and DJ-Graphs."
20 // ACM Transactions on Architecture and Code Optimization, Association for
21 // Computing Machinery, 2012, ACM TACO Special Issue on "High-Performance
22 // and Embedded Architectures and Compilers", 8 (4),
23 // <10.1145/2086696.2086706>. <hal-00647369>
24 //
25 #include "llvm/ADT/BitVector.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/CodeGen/RDFGraph.h"
39 #include "llvm/MC/LaneBitmask.h"
40 #include "llvm/MC/MCRegisterInfo.h"
42 #include "llvm/Support/Debug.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <iterator>
49 #include <map>
50 #include <unordered_map>
51 #include <utility>
52 #include <vector>
53 
54 using namespace llvm;
55 using namespace rdf;
56 
57 static cl::opt<unsigned> MaxRecNest("rdf-liveness-max-rec", cl::init(25),
58  cl::Hidden, cl::desc("Maximum recursion level"));
59 
60 namespace llvm {
61 namespace rdf {
62 
64  OS << '{';
65  for (auto &I : P.Obj) {
66  OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{';
67  for (auto J = I.second.begin(), E = I.second.end(); J != E; ) {
68  OS << Print<NodeId>(J->first, P.G) << PrintLaneMaskOpt(J->second);
69  if (++J != E)
70  OS << ',';
71  }
72  OS << '}';
73  }
74  OS << " }";
75  return OS;
76  }
77 
78 } // end namespace rdf
79 } // end namespace llvm
80 
81 // The order in the returned sequence is the order of reaching defs in the
82 // upward traversal: the first def is the closest to the given reference RefA,
83 // the next one is further up, and so on.
84 // The list ends at a reaching phi def, or when the reference from RefA is
85 // covered by the defs in the list (see FullChain).
86 // This function provides two modes of operation:
87 // (1) Returning the sequence of reaching defs for a particular reference
88 // node. This sequence will terminate at the first phi node [1].
89 // (2) Returning a partial sequence of reaching defs, where the final goal
90 // is to traverse past phi nodes to the actual defs arising from the code
91 // itself.
92 // In mode (2), the register reference for which the search was started
93 // may be different from the reference node RefA, for which this call was
94 // made, hence the argument RefRR, which holds the original register.
95 // Also, some definitions may have already been encountered in a previous
96 // call that will influence register covering. The register references
97 // already defined are passed in through DefRRs.
98 // In mode (1), the "continuation" considerations do not apply, and the
99 // RefRR is the same as the register in RefA, and the set DefRRs is empty.
100 //
101 // [1] It is possible for multiple phi nodes to be included in the returned
102 // sequence:
103 // SubA = phi ...
104 // SubB = phi ...
105 // ... = SuperAB(rdef:SubA), SuperAB"(rdef:SubB)
106 // However, these phi nodes are independent from one another in terms of
107 // the data-flow.
108 
110  NodeAddr<RefNode*> RefA, bool TopShadows, bool FullChain,
111  const RegisterAggr &DefRRs) {
112  NodeList RDefs; // Return value.
113  SetVector<NodeId> DefQ;
115 
116  // Dead defs will be treated as if they were live, since they are actually
117  // on the data-flow path. They cannot be ignored because even though they
118  // do not generate meaningful values, they still modify registers.
119 
120  // If the reference is undefined, there is nothing to do.
121  if (RefA.Addr->getFlags() & NodeAttrs::Undef)
122  return RDefs;
123 
124  // The initial queue should not have reaching defs for shadows. The
125  // whole point of a shadow is that it will have a reaching def that
126  // is not aliased to the reaching defs of the related shadows.
127  NodeId Start = RefA.Id;
128  auto SNA = DFG.addr<RefNode*>(Start);
129  if (NodeId RD = SNA.Addr->getReachingDef())
130  DefQ.insert(RD);
131  if (TopShadows) {
132  for (auto S : DFG.getRelatedRefs(RefA.Addr->getOwner(DFG), RefA))
133  if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
134  DefQ.insert(RD);
135  }
136 
137  // Collect all the reaching defs, going up until a phi node is encountered,
138  // or there are no more reaching defs. From this set, the actual set of
139  // reaching defs will be selected.
140  // The traversal upwards must go on until a covering def is encountered.
141  // It is possible that a collection of non-covering (individually) defs
142  // will be sufficient, but keep going until a covering one is found.
143  for (unsigned i = 0; i < DefQ.size(); ++i) {
144  auto TA = DFG.addr<DefNode*>(DefQ[i]);
145  if (TA.Addr->getFlags() & NodeAttrs::PhiRef)
146  continue;
147  // Stop at the covering/overwriting def of the initial register reference.
148  RegisterRef RR = TA.Addr->getRegRef(DFG);
149  if (!DFG.IsPreservingDef(TA))
150  if (RegisterAggr::isCoverOf(RR, RefRR, PRI))
151  continue;
152  // Get the next level of reaching defs. This will include multiple
153  // reaching defs for shadows.
154  for (auto S : DFG.getRelatedRefs(TA.Addr->getOwner(DFG), TA))
155  if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
156  DefQ.insert(RD);
157  // Don't visit sibling defs. They share the same reaching def (which
158  // will be visited anyway), but they define something not aliased to
159  // this ref.
160  }
161 
162  // Return the MachineBasicBlock containing a given instruction.
163  auto Block = [this] (NodeAddr<InstrNode*> IA) -> MachineBasicBlock* {
164  if (IA.Addr->getKind() == NodeAttrs::Stmt)
165  return NodeAddr<StmtNode*>(IA).Addr->getCode()->getParent();
166  assert(IA.Addr->getKind() == NodeAttrs::Phi);
167  NodeAddr<PhiNode*> PA = IA;
168  NodeAddr<BlockNode*> BA = PA.Addr->getOwner(DFG);
169  return BA.Addr->getCode();
170  };
171 
172  SmallSet<NodeId,32> Defs;
173 
174  // Remove all non-phi defs that are not aliased to RefRR, and segregate
175  // the the remaining defs into buckets for containing blocks.
176  std::map<NodeId, NodeAddr<InstrNode*>> Owners;
177  std::map<MachineBasicBlock*, SmallVector<NodeId,32>> Blocks;
178  for (NodeId N : DefQ) {
179  auto TA = DFG.addr<DefNode*>(N);
180  bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef;
181  if (!IsPhi && !PRI.alias(RefRR, TA.Addr->getRegRef(DFG)))
182  continue;
183  Defs.insert(TA.Id);
184  NodeAddr<InstrNode*> IA = TA.Addr->getOwner(DFG);
185  Owners[TA.Id] = IA;
186  Blocks[Block(IA)].push_back(IA.Id);
187  }
188 
189  auto Precedes = [this,&OrdMap] (NodeId A, NodeId B) {
190  if (A == B)
191  return false;
192  NodeAddr<InstrNode*> OA = DFG.addr<InstrNode*>(A);
194  bool StmtA = OA.Addr->getKind() == NodeAttrs::Stmt;
195  bool StmtB = OB.Addr->getKind() == NodeAttrs::Stmt;
196  if (StmtA && StmtB) {
197  const MachineInstr *InA = NodeAddr<StmtNode*>(OA).Addr->getCode();
198  const MachineInstr *InB = NodeAddr<StmtNode*>(OB).Addr->getCode();
199  assert(InA->getParent() == InB->getParent());
200  auto FA = OrdMap.find(InA);
201  if (FA != OrdMap.end())
202  return FA->second < OrdMap.find(InB)->second;
203  const MachineBasicBlock *BB = InA->getParent();
204  for (auto It = BB->begin(), E = BB->end(); It != E; ++It) {
205  if (It == InA->getIterator())
206  return true;
207  if (It == InB->getIterator())
208  return false;
209  }
210  llvm_unreachable("InA and InB should be in the same block");
211  }
212  // One of them is a phi node.
213  if (!StmtA && !StmtB) {
214  // Both are phis, which are unordered. Break the tie by id numbers.
215  return A < B;
216  }
217  // Only one of them is a phi. Phis always precede statements.
218  return !StmtA;
219  };
220 
221  auto GetOrder = [&OrdMap] (MachineBasicBlock &B) {
222  uint32_t Pos = 0;
223  for (MachineInstr &In : B)
224  OrdMap.insert({&In, ++Pos});
225  };
226 
227  // For each block, sort the nodes in it.
228  std::vector<MachineBasicBlock*> TmpBB;
229  for (auto &Bucket : Blocks) {
230  TmpBB.push_back(Bucket.first);
231  if (Bucket.second.size() > 2)
232  GetOrder(*Bucket.first);
233  llvm::sort(Bucket.second, Precedes);
234  }
235 
236  // Sort the blocks with respect to dominance.
237  llvm::sort(TmpBB,
238  [this](auto A, auto B) { return MDT.properlyDominates(A, B); });
239 
240  std::vector<NodeId> TmpInst;
241  for (MachineBasicBlock *MBB : llvm::reverse(TmpBB)) {
242  auto &Bucket = Blocks[MBB];
243  TmpInst.insert(TmpInst.end(), Bucket.rbegin(), Bucket.rend());
244  }
245 
246  // The vector is a list of instructions, so that defs coming from
247  // the same instruction don't need to be artificially ordered.
248  // Then, when computing the initial segment, and iterating over an
249  // instruction, pick the defs that contribute to the covering (i.e. is
250  // not covered by previously added defs). Check the defs individually,
251  // i.e. first check each def if is covered or not (without adding them
252  // to the tracking set), and then add all the selected ones.
253 
254  // The reason for this is this example:
255  // *d1<A>, *d2<B>, ... Assume A and B are aliased (can happen in phi nodes).
256  // *d3<C> If A \incl BuC, and B \incl AuC, then *d2 would be
257  // covered if we added A first, and A would be covered
258  // if we added B first.
259  // In this example we want both A and B, because we don't want to give
260  // either one priority over the other, since they belong to the same
261  // statement.
262 
263  RegisterAggr RRs(DefRRs);
264 
265  auto DefInSet = [&Defs] (NodeAddr<RefNode*> TA) -> bool {
266  return TA.Addr->getKind() == NodeAttrs::Def &&
267  Defs.count(TA.Id);
268  };
269 
270  for (NodeId T : TmpInst) {
271  if (!FullChain && RRs.hasCoverOf(RefRR))
272  break;
273  auto TA = DFG.addr<InstrNode*>(T);
274  bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA);
275  NodeList Ds;
276  for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) {
277  RegisterRef QR = DA.Addr->getRegRef(DFG);
278  // Add phi defs even if they are covered by subsequent defs. This is
279  // for cases where the reached use is not covered by any of the defs
280  // encountered so far: the phi def is needed to expose the liveness
281  // of that use to the entry of the block.
282  // Example:
283  // phi d1<R3>(,d2,), ... Phi def d1 is covered by d2.
284  // d2<R3>(d1,,u3), ...
285  // ..., u3<D1>(d2) This use needs to be live on entry.
286  if (FullChain || IsPhi || !RRs.hasCoverOf(QR))
287  Ds.push_back(DA);
288  }
289  llvm::append_range(RDefs, Ds);
290  for (NodeAddr<DefNode*> DA : Ds) {
291  // When collecting a full chain of definitions, do not consider phi
292  // defs to actually define a register.
293  uint16_t Flags = DA.Addr->getFlags();
294  if (!FullChain || !(Flags & NodeAttrs::PhiRef))
295  if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here.
296  RRs.insert(DA.Addr->getRegRef(DFG));
297  }
298  }
299 
300  auto DeadP = [](const NodeAddr<DefNode*> DA) -> bool {
301  return DA.Addr->getFlags() & NodeAttrs::Dead;
302  };
303  llvm::erase_if(RDefs, DeadP);
304 
305  return RDefs;
306 }
307 
308 std::pair<NodeSet,bool>
310  NodeSet &Visited, const NodeSet &Defs) {
311  return getAllReachingDefsRecImpl(RefRR, RefA, Visited, Defs, 0, MaxRecNest);
312 }
313 
314 std::pair<NodeSet,bool>
315 Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
316  NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) {
317  if (Nest > MaxNest)
318  return { NodeSet(), false };
319  // Collect all defined registers. Do not consider phis to be defining
320  // anything, only collect "real" definitions.
321  RegisterAggr DefRRs(PRI);
322  for (NodeId D : Defs) {
323  const auto DA = DFG.addr<const DefNode*>(D);
324  if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
325  DefRRs.insert(DA.Addr->getRegRef(DFG));
326  }
327 
328  NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs);
329  if (RDs.empty())
330  return { Defs, true };
331 
332  // Make a copy of the preexisting definitions and add the newly found ones.
333  NodeSet TmpDefs = Defs;
334  for (NodeAddr<NodeBase*> R : RDs)
335  TmpDefs.insert(R.Id);
336 
337  NodeSet Result = Defs;
338 
339  for (NodeAddr<DefNode*> DA : RDs) {
340  Result.insert(DA.Id);
341  if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
342  continue;
343  NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG);
344  if (Visited.count(PA.Id))
345  continue;
346  Visited.insert(PA.Id);
347  // Go over all phi uses and get the reaching defs for each use.
348  for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
349  const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs,
350  Nest+1, MaxNest);
351  if (!T.second)
352  return { T.first, false };
353  Result.insert(T.first.begin(), T.first.end());
354  }
355  }
356 
357  return { Result, true };
358 }
359 
360 /// Find the nearest ref node aliased to RefRR, going upwards in the data
361 /// flow, starting from the instruction immediately preceding Inst.
364  NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
365  NodeList Ins = BA.Addr->members(DFG);
366  NodeId FindId = IA.Id;
367  auto E = Ins.rend();
368  auto B = std::find_if(Ins.rbegin(), E,
369  [FindId] (const NodeAddr<InstrNode*> T) {
370  return T.Id == FindId;
371  });
372  // Do not scan IA (which is what B would point to).
373  if (B != E)
374  ++B;
375 
376  do {
377  // Process the range of instructions from B to E.
378  for (NodeAddr<InstrNode*> I : make_range(B, E)) {
379  NodeList Refs = I.Addr->members(DFG);
380  NodeAddr<RefNode*> Clob, Use;
381  // Scan all the refs in I aliased to RefRR, and return the one that
382  // is the closest to the output of I, i.e. def > clobber > use.
383  for (NodeAddr<RefNode*> R : Refs) {
384  if (!PRI.alias(R.Addr->getRegRef(DFG), RefRR))
385  continue;
386  if (DFG.IsDef(R)) {
387  // If it's a non-clobbering def, just return it.
388  if (!(R.Addr->getFlags() & NodeAttrs::Clobbering))
389  return R;
390  Clob = R;
391  } else {
392  Use = R;
393  }
394  }
395  if (Clob.Id != 0)
396  return Clob;
397  if (Use.Id != 0)
398  return Use;
399  }
400 
401  // Go up to the immediate dominator, if any.
402  MachineBasicBlock *BB = BA.Addr->getCode();
403  BA = NodeAddr<BlockNode*>();
404  if (MachineDomTreeNode *N = MDT.getNode(BB)) {
405  if ((N = N->getIDom()))
406  BA = DFG.findBlock(N->getBlock());
407  }
408  if (!BA.Id)
409  break;
410 
411  Ins = BA.Addr->members(DFG);
412  B = Ins.rbegin();
413  E = Ins.rend();
414  } while (true);
415 
416  return NodeAddr<RefNode*>();
417 }
418 
420  NodeAddr<DefNode*> DefA, const RegisterAggr &DefRRs) {
421  NodeSet Uses;
422 
423  // If the original register is already covered by all the intervening
424  // defs, no more uses can be reached.
425  if (DefRRs.hasCoverOf(RefRR))
426  return Uses;
427 
428  // Add all directly reached uses.
429  // If the def is dead, it does not provide a value for any use.
430  bool IsDead = DefA.Addr->getFlags() & NodeAttrs::Dead;
431  NodeId U = !IsDead ? DefA.Addr->getReachedUse() : 0;
432  while (U != 0) {
433  auto UA = DFG.addr<UseNode*>(U);
434  if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) {
435  RegisterRef UR = UA.Addr->getRegRef(DFG);
436  if (PRI.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR))
437  Uses.insert(U);
438  }
439  U = UA.Addr->getSibling();
440  }
441 
442  // Traverse all reached defs. This time dead defs cannot be ignored.
443  for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
444  auto DA = DFG.addr<DefNode*>(D);
445  NextD = DA.Addr->getSibling();
446  RegisterRef DR = DA.Addr->getRegRef(DFG);
447  // If this def is already covered, it cannot reach anything new.
448  // Similarly, skip it if it is not aliased to the interesting register.
449  if (DefRRs.hasCoverOf(DR) || !PRI.alias(RefRR, DR))
450  continue;
451  NodeSet T;
452  if (DFG.IsPreservingDef(DA)) {
453  // If it is a preserving def, do not update the set of intervening defs.
454  T = getAllReachedUses(RefRR, DA, DefRRs);
455  } else {
456  RegisterAggr NewDefRRs = DefRRs;
457  NewDefRRs.insert(DR);
458  T = getAllReachedUses(RefRR, DA, NewDefRRs);
459  }
460  Uses.insert(T.begin(), T.end());
461  }
462  return Uses;
463 }
464 
466  RealUseMap.clear();
467 
468  NodeList Phis;
469  NodeAddr<FuncNode*> FA = DFG.getFunc();
470  NodeList Blocks = FA.Addr->members(DFG);
471  for (NodeAddr<BlockNode*> BA : Blocks) {
472  auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
473  llvm::append_range(Phis, Ps);
474  }
475 
476  // phi use -> (map: reaching phi -> set of registers defined in between)
477  std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp;
478  std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation.
479  std::unordered_map<NodeId,RegisterAggr> PhiDRs; // Phi -> registers defined by it.
480 
481  // Go over all phis.
482  for (NodeAddr<PhiNode*> PhiA : Phis) {
483  // Go over all defs and collect the reached uses that are non-phi uses
484  // (i.e. the "real uses").
485  RefMap &RealUses = RealUseMap[PhiA.Id];
486  NodeList PhiRefs = PhiA.Addr->members(DFG);
487 
488  // Have a work queue of defs whose reached uses need to be found.
489  // For each def, add to the queue all reached (non-phi) defs.
490  SetVector<NodeId> DefQ;
491  NodeSet PhiDefs;
492  RegisterAggr DRs(PRI);
493  for (NodeAddr<RefNode*> R : PhiRefs) {
494  if (!DFG.IsRef<NodeAttrs::Def>(R))
495  continue;
496  DRs.insert(R.Addr->getRegRef(DFG));
497  DefQ.insert(R.Id);
498  PhiDefs.insert(R.Id);
499  }
500  PhiDRs.insert(std::make_pair(PhiA.Id, DRs));
501 
502  // Collect the super-set of all possible reached uses. This set will
503  // contain all uses reached from this phi, either directly from the
504  // phi defs, or (recursively) via non-phi defs reached by the phi defs.
505  // This set of uses will later be trimmed to only contain these uses that
506  // are actually reached by the phi defs.
507  for (unsigned i = 0; i < DefQ.size(); ++i) {
508  NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]);
509  // Visit all reached uses. Phi defs should not really have the "dead"
510  // flag set, but check it anyway for consistency.
511  bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead;
512  NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0;
513  while (UN != 0) {
514  NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN);
515  uint16_t F = A.Addr->getFlags();
516  if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) {
517  RegisterRef R = A.Addr->getRegRef(DFG);
518  RealUses[R.Reg].insert({A.Id,R.Mask});
519  }
520  UN = A.Addr->getSibling();
521  }
522  // Visit all reached defs, and add them to the queue. These defs may
523  // override some of the uses collected here, but that will be handled
524  // later.
525  NodeId DN = DA.Addr->getReachedDef();
526  while (DN != 0) {
527  NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN);
528  for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) {
529  uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags();
530  // Must traverse the reached-def chain. Consider:
531  // def(D0) -> def(R0) -> def(R0) -> use(D0)
532  // The reachable use of D0 passes through a def of R0.
533  if (!(Flags & NodeAttrs::PhiRef))
534  DefQ.insert(T.Id);
535  }
536  DN = A.Addr->getSibling();
537  }
538  }
539  // Filter out these uses that appear to be reachable, but really
540  // are not. For example:
541  //
542  // R1:0 = d1
543  // = R1:0 u2 Reached by d1.
544  // R0 = d3
545  // = R1:0 u4 Still reached by d1: indirectly through
546  // the def d3.
547  // R1 = d5
548  // = R1:0 u6 Not reached by d1 (covered collectively
549  // by d3 and d5), but following reached
550  // defs and uses from d1 will lead here.
551  for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
552  // For each reached register UI->first, there is a set UI->second, of
553  // uses of it. For each such use, check if it is reached by this phi,
554  // i.e. check if the set of its reaching uses intersects the set of
555  // this phi's defs.
556  NodeRefSet Uses = UI->second;
557  UI->second.clear();
558  for (std::pair<NodeId,LaneBitmask> I : Uses) {
559  auto UA = DFG.addr<UseNode*>(I.first);
560  // Undef flag is checked above.
561  assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
562  RegisterRef R(UI->first, I.second);
563  // Calculate the exposed part of the reached use.
564  RegisterAggr Covered(PRI);
565  for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
566  if (PhiDefs.count(DA.Id))
567  break;
568  Covered.insert(DA.Addr->getRegRef(DFG));
569  }
570  if (RegisterRef RC = Covered.clearIn(R)) {
571  // We are updating the map for register UI->first, so we need
572  // to map RC to be expressed in terms of that register.
573  RegisterRef S = PRI.mapTo(RC, UI->first);
574  UI->second.insert({I.first, S.Mask});
575  }
576  }
577  UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
578  }
579 
580  // If this phi reaches some "real" uses, add it to the queue for upward
581  // propagation.
582  if (!RealUses.empty())
583  PhiUQ.push_back(PhiA.Id);
584 
585  // Go over all phi uses and check if the reaching def is another phi.
586  // Collect the phis that are among the reaching defs of these uses.
587  // While traversing the list of reaching defs for each phi use, accumulate
588  // the set of registers defined between this phi (PhiA) and the owner phi
589  // of the reaching def.
590  NodeSet SeenUses;
591 
592  for (auto I : PhiRefs) {
593  if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id))
594  continue;
595  NodeAddr<PhiUseNode*> PUA = I;
596  if (PUA.Addr->getReachingDef() == 0)
597  continue;
598 
599  RegisterRef UR = PUA.Addr->getRegRef(DFG);
600  NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs);
601  RegisterAggr DefRRs(PRI);
602 
603  for (NodeAddr<DefNode*> D : Ds) {
604  if (D.Addr->getFlags() & NodeAttrs::PhiRef) {
605  NodeId RP = D.Addr->getOwner(DFG).Id;
606  std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id];
607  auto F = M.find(RP);
608  if (F == M.end())
609  M.insert(std::make_pair(RP, DefRRs));
610  else
611  F->second.insert(DefRRs);
612  }
613  DefRRs.insert(D.Addr->getRegRef(DFG));
614  }
615 
616  for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA))
617  SeenUses.insert(T.Id);
618  }
619  }
620 
621  if (Trace) {
622  dbgs() << "Phi-up-to-phi map with intervening defs:\n";
623  for (auto I : PhiUp) {
624  dbgs() << "phi " << Print<NodeId>(I.first, DFG) << " -> {";
625  for (auto R : I.second)
626  dbgs() << ' ' << Print<NodeId>(R.first, DFG)
627  << Print<RegisterAggr>(R.second, DFG);
628  dbgs() << " }\n";
629  }
630  }
631 
632  // Propagate the reached registers up in the phi chain.
633  //
634  // The following type of situation needs careful handling:
635  //
636  // phi d1<R1:0> (1)
637  // |
638  // ... d2<R1>
639  // |
640  // phi u3<R1:0> (2)
641  // |
642  // ... u4<R1>
643  //
644  // The phi node (2) defines a register pair R1:0, and reaches a "real"
645  // use u4 of just R1. The same phi node is also known to reach (upwards)
646  // the phi node (1). However, the use u4 is not reached by phi (1),
647  // because of the intervening definition d2 of R1. The data flow between
648  // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0.
649  //
650  // When propagating uses up the phi chains, get the all reaching defs
651  // for a given phi use, and traverse the list until the propagated ref
652  // is covered, or until reaching the final phi. Only assume that the
653  // reference reaches the phi in the latter case.
654 
655  // The operation "clearIn" can be expensive. For a given set of intervening
656  // defs, cache the result of subtracting these defs from a given register
657  // ref.
658  using SubMap = std::unordered_map<RegisterRef, RegisterRef>;
659  std::unordered_map<RegisterAggr, SubMap> Subs;
660  auto ClearIn = [] (RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) {
661  if (Mid.empty())
662  return RR;
663  auto F = SM.find(RR);
664  if (F != SM.end())
665  return F->second;
666  RegisterRef S = Mid.clearIn(RR);
667  SM.insert({RR, S});
668  return S;
669  };
670 
671  // Go over all phis.
672  for (unsigned i = 0; i < PhiUQ.size(); ++i) {
673  auto PA = DFG.addr<PhiNode*>(PhiUQ[i]);
674  NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG);
675  RefMap &RUM = RealUseMap[PA.Id];
676 
677  for (NodeAddr<UseNode*> UA : PUs) {
678  std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id];
679  RegisterRef UR = UA.Addr->getRegRef(DFG);
680  for (const std::pair<const NodeId, RegisterAggr> &P : PUM) {
681  bool Changed = false;
682  const RegisterAggr &MidDefs = P.second;
683  // Collect the set PropUp of uses that are reached by the current
684  // phi PA, and are not covered by any intervening def between the
685  // currently visited use UA and the upward phi P.
686 
687  if (MidDefs.hasCoverOf(UR))
688  continue;
689  SubMap &SM = Subs[MidDefs];
690 
691  // General algorithm:
692  // for each (R,U) : U is use node of R, U is reached by PA
693  // if MidDefs does not cover (R,U)
694  // then add (R-MidDefs,U) to RealUseMap[P]
695  //
696  for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) {
697  RegisterRef R(T.first);
698  // The current phi (PA) could be a phi for a regmask. It could
699  // reach a whole variety of uses that are not related to the
700  // specific upward phi (P.first).
701  const RegisterAggr &DRs = PhiDRs.at(P.first);
702  if (!DRs.hasAliasOf(R))
703  continue;
704  R = PRI.mapTo(DRs.intersectWith(R), T.first);
705  for (std::pair<NodeId,LaneBitmask> V : T.second) {
706  LaneBitmask M = R.Mask & V.second;
707  if (M.none())
708  continue;
709  if (RegisterRef SS = ClearIn(RegisterRef(R.Reg, M), MidDefs, SM)) {
710  NodeRefSet &RS = RealUseMap[P.first][SS.Reg];
711  Changed |= RS.insert({V.first,SS.Mask}).second;
712  }
713  }
714  }
715 
716  if (Changed)
717  PhiUQ.push_back(P.first);
718  }
719  }
720  }
721 
722  if (Trace) {
723  dbgs() << "Real use map:\n";
724  for (auto I : RealUseMap) {
725  dbgs() << "phi " << Print<NodeId>(I.first, DFG);
726  NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first);
727  NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG);
728  if (!Ds.empty()) {
729  RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG);
730  dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>';
731  } else {
732  dbgs() << "<noreg>";
733  }
734  dbgs() << " -> " << Print<RefMap>(I.second, DFG) << '\n';
735  }
736  }
737 }
738 
740  // Populate the node-to-block map. This speeds up the calculations
741  // significantly.
742  NBMap.clear();
743  for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG)) {
744  MachineBasicBlock *BB = BA.Addr->getCode();
745  for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
746  for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG))
747  NBMap.insert(std::make_pair(RA.Id, BB));
748  NBMap.insert(std::make_pair(IA.Id, BB));
749  }
750  }
751 
752  MachineFunction &MF = DFG.getMF();
753 
754  // Compute IDF first, then the inverse.
755  decltype(IIDF) IDF;
756  for (MachineBasicBlock &B : MF) {
757  auto F1 = MDF.find(&B);
758  if (F1 == MDF.end())
759  continue;
760  SetVector<MachineBasicBlock*> IDFB(F1->second.begin(), F1->second.end());
761  for (unsigned i = 0; i < IDFB.size(); ++i) {
762  auto F2 = MDF.find(IDFB[i]);
763  if (F2 != MDF.end())
764  IDFB.insert(F2->second.begin(), F2->second.end());
765  }
766  // Add B to the IDF(B). This will put B in the IIDF(B).
767  IDFB.insert(&B);
768  IDF[&B].insert(IDFB.begin(), IDFB.end());
769  }
770 
771  for (auto I : IDF)
772  for (auto S : I.second)
773  IIDF[S].insert(I.first);
774 
775  computePhiInfo();
776 
777  NodeAddr<FuncNode*> FA = DFG.getFunc();
778  NodeList Blocks = FA.Addr->members(DFG);
779 
780  // Build the phi live-on-entry map.
781  for (NodeAddr<BlockNode*> BA : Blocks) {
782  MachineBasicBlock *MB = BA.Addr->getCode();
783  RefMap &LON = PhiLON[MB];
784  for (auto P : BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG))
785  for (const RefMap::value_type &S : RealUseMap[P.Id])
786  LON[S.first].insert(S.second.begin(), S.second.end());
787  }
788 
789  if (Trace) {
790  dbgs() << "Phi live-on-entry map:\n";
791  for (auto &I : PhiLON)
792  dbgs() << "block #" << I.first->getNumber() << " -> "
793  << Print<RefMap>(I.second, DFG) << '\n';
794  }
795 
796  // Build the phi live-on-exit map. Each phi node has some set of reached
797  // "real" uses. Propagate this set backwards into the block predecessors
798  // through the reaching defs of the corresponding phi uses.
799  for (NodeAddr<BlockNode*> BA : Blocks) {
800  NodeList Phis = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
801  for (NodeAddr<PhiNode*> PA : Phis) {
802  RefMap &RUs = RealUseMap[PA.Id];
803  if (RUs.empty())
804  continue;
805 
806  NodeSet SeenUses;
807  for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
808  if (!SeenUses.insert(U.Id).second)
809  continue;
810  NodeAddr<PhiUseNode*> PUA = U;
811  if (PUA.Addr->getReachingDef() == 0)
812  continue;
813 
814  // Each phi has some set (possibly empty) of reached "real" uses,
815  // that is, uses that are part of the compiled program. Such a use
816  // may be located in some farther block, but following a chain of
817  // reaching defs will eventually lead to this phi.
818  // Any chain of reaching defs may fork at a phi node, but there
819  // will be a path upwards that will lead to this phi. Now, this
820  // chain will need to fork at this phi, since some of the reached
821  // uses may have definitions joining in from multiple predecessors.
822  // For each reached "real" use, identify the set of reaching defs
823  // coming from each predecessor P, and add them to PhiLOX[P].
824  //
825  auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor());
826  RefMap &LOX = PhiLOX[PrA.Addr->getCode()];
827 
828  for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) {
829  // We need to visit each individual use.
830  for (std::pair<NodeId,LaneBitmask> P : RS.second) {
831  // Create a register ref corresponding to the use, and find
832  // all reaching defs starting from the phi use, and treating
833  // all related shadows as a single use cluster.
834  RegisterRef S(RS.first, P.second);
835  NodeList Ds = getAllReachingDefs(S, PUA, true, false, NoRegs);
836  for (NodeAddr<DefNode*> D : Ds) {
837  // Calculate the mask corresponding to the visited def.
838  RegisterAggr TA(PRI);
839  TA.insert(D.Addr->getRegRef(DFG)).intersect(S);
840  LaneBitmask TM = TA.makeRegRef().Mask;
841  LOX[S.Reg].insert({D.Id, TM});
842  }
843  }
844  }
845 
846  for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PA, PUA))
847  SeenUses.insert(T.Id);
848  } // for U : phi uses
849  } // for P : Phis
850  } // for B : Blocks
851 
852  if (Trace) {
853  dbgs() << "Phi live-on-exit map:\n";
854  for (auto &I : PhiLOX)
855  dbgs() << "block #" << I.first->getNumber() << " -> "
856  << Print<RefMap>(I.second, DFG) << '\n';
857  }
858 
859  RefMap LiveIn;
860  traverse(&MF.front(), LiveIn);
861 
862  // Add function live-ins to the live-in set of the function entry block.
863  LiveMap[&MF.front()].insert(DFG.getLiveIns());
864 
865  if (Trace) {
866  // Dump the liveness map
867  for (MachineBasicBlock &B : MF) {
868  std::vector<RegisterRef> LV;
869  for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
870  LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask));
871  llvm::sort(LV);
872  dbgs() << printMBBReference(B) << "\t rec = {";
873  for (auto I : LV)
874  dbgs() << ' ' << Print<RegisterRef>(I, DFG);
875  dbgs() << " }\n";
876  //dbgs() << "\tcomp = " << Print<RegisterAggr>(LiveMap[&B], DFG) << '\n';
877 
878  LV.clear();
879  const RegisterAggr &LG = LiveMap[&B];
880  for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
881  LV.push_back(*I);
882  llvm::sort(LV);
883  dbgs() << "\tcomp = {";
884  for (auto I : LV)
885  dbgs() << ' ' << Print<RegisterRef>(I, DFG);
886  dbgs() << " }\n";
887 
888  }
889  }
890 }
891 
893  for (auto &B : DFG.getMF()) {
894  // Remove all live-ins.
895  std::vector<unsigned> T;
896  for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
897  T.push_back(LI.PhysReg);
898  for (auto I : T)
899  B.removeLiveIn(I);
900  // Add the newly computed live-ins.
901  const RegisterAggr &LiveIns = LiveMap[&B];
902  for (const RegisterRef R : make_range(LiveIns.rr_begin(), LiveIns.rr_end()))
903  B.addLiveIn({MCPhysReg(R.Reg), R.Mask});
904  }
905 }
906 
908  for (auto &B : DFG.getMF())
909  resetKills(&B);
910 }
911 
913  auto CopyLiveIns = [this] (MachineBasicBlock *B, BitVector &LV) -> void {
914  for (auto I : B->liveins()) {
915  MCSubRegIndexIterator S(I.PhysReg, &TRI);
916  if (!S.isValid()) {
917  LV.set(I.PhysReg);
918  continue;
919  }
920  do {
921  LaneBitmask M = TRI.getSubRegIndexLaneMask(S.getSubRegIndex());
922  if ((M & I.LaneMask).any())
923  LV.set(S.getSubReg());
924  ++S;
925  } while (S.isValid());
926  }
927  };
928 
929  BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs());
930  CopyLiveIns(B, LiveIn);
931  for (auto SI : B->successors())
932  CopyLiveIns(SI, Live);
933 
934  for (MachineInstr &MI : llvm::reverse(*B)) {
935  if (MI.isDebugInstr())
936  continue;
937 
938  MI.clearKillInfo();
939  for (auto &Op : MI.operands()) {
940  // An implicit def of a super-register may not necessarily start a
941  // live range of it, since an implicit use could be used to keep parts
942  // of it live. Instead of analyzing the implicit operands, ignore
943  // implicit defs.
944  if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
945  continue;
946  Register R = Op.getReg();
948  continue;
949  for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
950  Live.reset(*SR);
951  }
952  for (auto &Op : MI.operands()) {
953  if (!Op.isReg() || !Op.isUse() || Op.isUndef())
954  continue;
955  Register R = Op.getReg();
957  continue;
958  bool IsLive = false;
959  for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
960  if (!Live[*AR])
961  continue;
962  IsLive = true;
963  break;
964  }
965  if (!IsLive)
966  Op.setIsKill(true);
967  for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
968  Live.set(*SR);
969  }
970  }
971 }
972 
973 // Helper function to obtain the basic block containing the reaching def
974 // of the given use.
975 MachineBasicBlock *Liveness::getBlockWithRef(NodeId RN) const {
976  auto F = NBMap.find(RN);
977  if (F != NBMap.end())
978  return F->second;
979  llvm_unreachable("Node id not in map");
980 }
981 
982 void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
983  // The LiveIn map, for each (physical) register, contains the set of live
984  // reaching defs of that register that are live on entry to the associated
985  // block.
986 
987  // The summary of the traversal algorithm:
988  //
989  // R is live-in in B, if there exists a U(R), such that rdef(R) dom B
990  // and (U \in IDF(B) or B dom U).
991  //
992  // for (C : children) {
993  // LU = {}
994  // traverse(C, LU)
995  // LiveUses += LU
996  // }
997  //
998  // LiveUses -= Defs(B);
999  // LiveUses += UpwardExposedUses(B);
1000  // for (C : IIDF[B])
1001  // for (U : LiveUses)
1002  // if (Rdef(U) dom C)
1003  // C.addLiveIn(U)
1004  //
1005 
1006  // Go up the dominator tree (depth-first).
1007  MachineDomTreeNode *N = MDT.getNode(B);
1008  for (auto I : *N) {
1009  RefMap L;
1010  MachineBasicBlock *SB = I->getBlock();
1011  traverse(SB, L);
1012 
1013  for (auto S : L)
1014  LiveIn[S.first].insert(S.second.begin(), S.second.end());
1015  }
1016 
1017  if (Trace) {
1018  dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
1019  << " after recursion into: {";
1020  for (auto I : *N)
1021  dbgs() << ' ' << I->getBlock()->getNumber();
1022  dbgs() << " }\n";
1023  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1024  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1025  }
1026 
1027  // Add reaching defs of phi uses that are live on exit from this block.
1028  RefMap &PUs = PhiLOX[B];
1029  for (auto &S : PUs)
1030  LiveIn[S.first].insert(S.second.begin(), S.second.end());
1031 
1032  if (Trace) {
1033  dbgs() << "after LOX\n";
1034  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1035  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1036  }
1037 
1038  // The LiveIn map at this point has all defs that are live-on-exit from B,
1039  // as if they were live-on-entry to B. First, we need to filter out all
1040  // defs that are present in this block. Then we will add reaching defs of
1041  // all upward-exposed uses.
1042 
1043  // To filter out the defs, first make a copy of LiveIn, and then re-populate
1044  // LiveIn with the defs that should remain.
1045  RefMap LiveInCopy = LiveIn;
1046  LiveIn.clear();
1047 
1048  for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) {
1049  RegisterRef LRef(LE.first);
1050  NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled.
1051  const NodeRefSet &OldDefs = LE.second;
1052  for (NodeRef OR : OldDefs) {
1053  // R is a def node that was live-on-exit
1054  auto DA = DFG.addr<DefNode*>(OR.first);
1055  NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG);
1056  NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
1057  if (B != BA.Addr->getCode()) {
1058  // Defs from a different block need to be preserved. Defs from this
1059  // block will need to be processed further, except for phi defs, the
1060  // liveness of which is handled through the PhiLON/PhiLOX maps.
1061  NewDefs.insert(OR);
1062  continue;
1063  }
1064 
1065  // Defs from this block need to stop the liveness from being
1066  // propagated upwards. This only applies to non-preserving defs,
1067  // and to the parts of the register actually covered by those defs.
1068  // (Note that phi defs should always be preserving.)
1069  RegisterAggr RRs(PRI);
1070  LRef.Mask = OR.second;
1071 
1072  if (!DFG.IsPreservingDef(DA)) {
1073  assert(!(IA.Addr->getFlags() & NodeAttrs::Phi));
1074  // DA is a non-phi def that is live-on-exit from this block, and
1075  // that is also located in this block. LRef is a register ref
1076  // whose use this def reaches. If DA covers LRef, then no part
1077  // of LRef is exposed upwards.A
1078  if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef))
1079  continue;
1080  }
1081 
1082  // DA itself was not sufficient to cover LRef. In general, it is
1083  // the last in a chain of aliased defs before the exit from this block.
1084  // There could be other defs in this block that are a part of that
1085  // chain. Check that now: accumulate the registers from these defs,
1086  // and if they all together cover LRef, it is not live-on-entry.
1088  // DefNode -> InstrNode -> BlockNode.
1089  NodeAddr<InstrNode*> ITA = TA.Addr->getOwner(DFG);
1090  NodeAddr<BlockNode*> BTA = ITA.Addr->getOwner(DFG);
1091  // Reaching defs are ordered in the upward direction.
1092  if (BTA.Addr->getCode() != B) {
1093  // We have reached past the beginning of B, and the accumulated
1094  // registers are not covering LRef. The first def from the
1095  // upward chain will be live.
1096  // Subtract all accumulated defs (RRs) from LRef.
1097  RegisterRef T = RRs.clearIn(LRef);
1098  assert(T);
1099  NewDefs.insert({TA.Id,T.Mask});
1100  break;
1101  }
1102 
1103  // TA is in B. Only add this def to the accumulated cover if it is
1104  // not preserving.
1105  if (!(TA.Addr->getFlags() & NodeAttrs::Preserving))
1106  RRs.insert(TA.Addr->getRegRef(DFG));
1107  // If this is enough to cover LRef, then stop.
1108  if (RRs.hasCoverOf(LRef))
1109  break;
1110  }
1111  }
1112  }
1113 
1114  emptify(LiveIn);
1115 
1116  if (Trace) {
1117  dbgs() << "after defs in block\n";
1118  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1119  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1120  }
1121 
1122  // Scan the block for upward-exposed uses and add them to the tracking set.
1123  for (auto I : DFG.getFunc().Addr->findBlock(B, DFG).Addr->members(DFG)) {
1124  NodeAddr<InstrNode*> IA = I;
1125  if (IA.Addr->getKind() != NodeAttrs::Stmt)
1126  continue;
1127  for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
1128  if (UA.Addr->getFlags() & NodeAttrs::Undef)
1129  continue;
1130  RegisterRef RR = UA.Addr->getRegRef(DFG);
1132  if (getBlockWithRef(D.Id) != B)
1133  LiveIn[RR.Reg].insert({D.Id,RR.Mask});
1134  }
1135  }
1136 
1137  if (Trace) {
1138  dbgs() << "after uses in block\n";
1139  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1140  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1141  }
1142 
1143  // Phi uses should not be propagated up the dominator tree, since they
1144  // are not dominated by their corresponding reaching defs.
1145  RegisterAggr &Local = LiveMap[B];
1146  RefMap &LON = PhiLON[B];
1147  for (auto &R : LON) {
1148  LaneBitmask M;
1149  for (auto P : R.second)
1150  M |= P.second;
1151  Local.insert(RegisterRef(R.first,M));
1152  }
1153 
1154  if (Trace) {
1155  dbgs() << "after phi uses in block\n";
1156  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1157  dbgs() << " Local: " << Print<RegisterAggr>(Local, DFG) << '\n';
1158  }
1159 
1160  for (auto C : IIDF[B]) {
1161  RegisterAggr &LiveC = LiveMap[C];
1162  for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn)
1163  for (auto R : S.second)
1164  if (MDT.properlyDominates(getBlockWithRef(R.first), C))
1165  LiveC.insert(RegisterRef(S.first, R.second));
1166  }
1167 }
1168 
1169 void Liveness::emptify(RefMap &M) {
1170  for (auto I = M.begin(), E = M.end(); I != E; )
1171  I = I->second.empty() ? M.erase(I) : std::next(I);
1172 }
llvm::LaneBitmask
Definition: LaneBitmask.h:40
llvm::rdf::Print
Definition: RDFGraph.h:924
i
i
Definition: README.txt:29
llvm::rdf::DataFlowGraph::getMF
MachineFunction & getMF() const
Definition: RDFGraph.h:661
llvm::rdf::RegisterAggr::rr_end
rr_iterator rr_end() const
Definition: RDFRegisters.h:240
llvm::rdf::PhiUseNode::getPredecessor
NodeId getPredecessor() const
Definition: RDFGraph.h:580
IsDead
bool IsDead
Definition: SILowerControlFlow.cpp:158
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:103
MachineInstr.h
llvm::rdf::BlockNode
Definition: RDFGraph.h:626
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::rdf::RegisterRef::Reg
RegisterId Reg
Definition: RDFRegisters.h:72
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::MCSubRegIndexIterator
Iterator that enumerates the sub-registers of a Reg and the associated sub-register indices.
Definition: MCRegisterInfo.h:607
llvm::rdf::DataFlowGraph::getRelatedRefs
NodeList getRelatedRefs(NodeAddr< InstrNode * > IA, NodeAddr< RefNode * > RA) const
Definition: RDFGraph.cpp:1125
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::rdf::DataFlowGraph::IsPreservingDef
static bool IsPreservingDef(const NodeAddr< DefNode * > DA)
Definition: RDFGraph.h:808
llvm::rdf::CodeNode::members_if
NodeList members_if(Predicate P, const DataFlowGraph &G) const
Definition: RDFGraph.h:909
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:633
llvm::rdf::InstrNode
Definition: RDFGraph.h:610
llvm::rdf::RegisterAggr::insert
RegisterAggr & insert(RegisterRef RR)
Definition: RDFRegisters.cpp:273
llvm::MachineDominatorTree::properlyDominates
bool properlyDominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
Definition: MachineDominators.h:141
T
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::rdf::DefNode::getReachedUse
NodeId getReachedUse() const
Definition: RDFGraph.h:565
llvm::MachineDominanceFrontier::end
iterator end()
Definition: MachineDominanceFrontier.h:60
llvm::BitVector::set
BitVector & set()
Definition: BitVector.h:343
llvm::rdf::Liveness::RefMap
std::unordered_map< RegisterId, NodeRefSet > RefMap
Definition: RDFLiveness.h:77
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::printMBBReference
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Definition: MachineBasicBlock.cpp:119
ErrorHandling.h
llvm::MCRegisterInfo::getNumRegs
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Definition: MCRegisterInfo.h:491
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1732
MachineBasicBlock.h
llvm::rdf::Liveness::getAllReachedUses
NodeSet getAllReachedUses(RegisterRef RefRR, NodeAddr< DefNode * > DefA, const RegisterAggr &DefRRs)
Definition: RDFLiveness.cpp:419
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::X86II::TA
@ TA
Definition: X86BaseInfo.h:803
DenseMap.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:333
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
llvm::rdf::Liveness::getAllReachingDefsRec
std::pair< NodeSet, bool > getAllReachingDefsRec(RegisterRef RefRR, NodeAddr< RefNode * > RefA, NodeSet &Visited, const NodeSet &Defs)
Definition: RDFLiveness.cpp:309
llvm::MachineDominanceFrontier::find
iterator find(MachineBasicBlock *B)
Definition: MachineDominanceFrontier.h:68
STLExtras.h
llvm::rdf::Liveness::getNearestAliasedRef
NodeAddr< RefNode * > getNearestAliasedRef(RegisterRef RefRR, NodeAddr< InstrNode * > IA)
Find the nearest ref node aliased to RefRR, going upwards in the data flow, starting from the instruc...
Definition: RDFLiveness.cpp:362
RDFRegisters.h
llvm::rdf::RegisterRef
Definition: RDFRegisters.h:71
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:198
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:588
llvm::rdf::NodeBase::getFlags
uint16_t getFlags() const
Definition: RDFGraph.h:456
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
RDFLiveness.h
llvm::rdf::DataFlowGraph::getFunc
NodeAddr< FuncNode * > getFunc() const
Definition: RDFGraph.h:660
llvm::rdf::RegisterRef::Mask
LaneBitmask Mask
Definition: RDFRegisters.h:73
llvm::SetVector::begin
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
llvm::rdf::NodeAttrs::Clobbering
@ Clobbering
Definition: RDFGraph.h:285
llvm::TargetRegisterInfo::getSubRegIndexLaneMask
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx.
Definition: TargetRegisterInfo.h:377
llvm::rdf::NodeAddr
Definition: RDFGraph.h:334
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:471
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::rdf::RefNode::getRegRef
RegisterRef getRegRef(const DataFlowGraph &G) const
Definition: RDFGraph.cpp:408
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::Register::isPhysicalRegister
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
llvm::rdf::PhysicalRegisterInfo::alias
bool alias(RegisterRef RA, RegisterRef RB) const
Definition: RDFRegisters.h:118
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::rdf::NodeAttrs::Undef
@ Undef
Definition: RDFGraph.h:289
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
NodeList
Definition: MicrosoftDemangle.cpp:37
llvm::MachineBasicBlock::RegisterMaskPair
Pair of physical register and lane mask.
Definition: MachineBasicBlock.h:101
llvm::rdf::RegisterAggr
Definition: RDFRegisters.h:168
llvm::rdf::NodeId
uint32_t NodeId
Definition: RDFGraph.h:260
llvm::M68kBeads::DA
@ DA
Definition: M68kBaseInfo.h:59
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:268
BitVector.h
llvm::rdf::DataFlowGraph::IsDef
static bool IsDef(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:793
llvm::BitVector
Definition: BitVector.h:74
llvm::rdf::RegisterAggr::isCoverOf
static bool isCoverOf(RegisterRef RA, RegisterRef RB, const PhysicalRegisterInfo &PRI)
Definition: RDFRegisters.h:182
llvm::rdf::NodeAddr::Addr
T Addr
Definition: RDFGraph.h:351
llvm::NodeSet
A NodeSet contains a set of SUnit DAG nodes with additional information that assigns a priority to th...
Definition: MachinePipeliner.h:311
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::rdf::RegisterAggr::hasCoverOf
bool hasCoverOf(RegisterRef RR) const
Definition: RDFRegisters.cpp:258
llvm::tgtok::In
@ In
Definition: TGLexer.h:51
llvm::cl::opt
Definition: CommandLine.h:1434
llvm::rdf::DataFlowGraph::IsCode
static bool IsCode(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:788
llvm::SmallSet::count
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164
llvm::rdf::PrintLaneMaskOpt
Definition: RDFRegisters.h:250
llvm::rdf::DataFlowGraph::addr
NodeAddr< T > addr(NodeId N) const
Definition: RDFGraph.h:656
llvm::rdf::DataFlowGraph::IsRef
static bool IsRef(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:782
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::rdf::RegisterAggr::hasAliasOf
bool hasAliasOf(RegisterRef RR) const
Definition: RDFRegisters.cpp:245
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
llvm::rdf::PhiNode
Definition: RDFGraph.h:614
llvm::rdf::CodeNode::members
NodeList members(const DataFlowGraph &G) const
Definition: RDFGraph.cpp:527
llvm::MachineDominatorTree::getNode
MachineDomTreeNode * getNode(MachineBasicBlock *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: MachineDominators.h:169
llvm::rdf::UseNode
Definition: RDFGraph.h:575
llvm::DenseMap
Definition: DenseMap.h:714
llvm::rdf::NodeAttrs::Def
@ Def
Definition: RDFGraph.h:275
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MCPhysReg
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:20
llvm::rdf::Liveness::NodeRef
detail::NodeRef NodeRef
Definition: RDFLiveness.h:75
llvm::rdf::operator<<
raw_ostream & operator<<(raw_ostream &OS, const Print< RegisterRef > &P)
Definition: RDFGraph.cpp:57
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
MCRegisterInfo.h
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
RA
SI optimize exec mask operations pre RA
Definition: SIOptimizeExecMaskingPreRA.cpp:71
llvm::rdf::Liveness::resetKills
void resetKills()
Definition: RDFLiveness.cpp:907
llvm::MachineFunction
Definition: MachineFunction.h:230
llvm::SetVector::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
llvm::rdf::RegisterAggr::rr_begin
rr_iterator rr_begin() const
Definition: RDFRegisters.h:237
llvm::rdf::NodeBase::getKind
uint16_t getKind() const
Definition: RDFGraph.h:455
RDFGraph.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::X86II::OB
@ OB
Definition: X86BaseInfo.h:796
uint32_t
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1748
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
llvm::rdf::DataFlowGraph::IsUse
static bool IsUse(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:798
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:286
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:180
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::Trace
Definition: Trace.h:30
llvm::rdf::DataFlowGraph::getLiveIns
const RegisterAggr & getLiveIns() const
Definition: RDFGraph.h:667
llvm::rdf::NodeAttrs::Use
@ Use
Definition: RDFGraph.h:276
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1578
llvm::pdb::PDB_DataKind::Local
@ Local
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::rdf::RegisterAggr::intersectWith
RegisterRef intersectWith(RegisterRef RR) const
Definition: RDFRegisters.cpp:310
uint16_t
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:321
MachineDominanceFrontier.h
llvm::rdf::DefNode
Definition: RDFGraph.h:558
llvm::rdf::Liveness::computePhiInfo
void computePhiInfo()
Definition: RDFLiveness.cpp:465
llvm::sort
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1492
llvm::rdf::NodeAttrs::Dead
@ Dead
Definition: RDFGraph.h:290
llvm::rdf::Liveness::resetLiveIns
void resetLiveIns()
Definition: RDFLiveness.cpp:892
llvm::rdf::RefNode::getOwner
NodeAddr< NodeBase * > getOwner(const DataFlowGraph &G)
Definition: RDFGraph.cpp:434
llvm::rdf::DefNode::getReachedDef
NodeId getReachedDef() const
Definition: RDFGraph.h:559
llvm::BitVector::reset
BitVector & reset()
Definition: BitVector.h:384
llvm::MCSubRegIterator
MCSubRegIterator enumerates all sub-registers of Reg.
Definition: MCRegisterInfo.h:594
llvm::rdf::NodeAddr::Id
NodeId Id
Definition: RDFGraph.h:352
llvm::rdf::RefNode::getReachingDef
NodeId getReachingDef() const
Definition: RDFGraph.h:528
llvm::rdf::NodeAttrs::PhiRef
@ PhiRef
Definition: RDFGraph.h:286
llvm::SetVector::end
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
llvm::MCRegAliasIterator::isValid
bool isValid() const
Definition: MCRegisterInfo.h:805
llvm::X86AS::SS
@ SS
Definition: X86.h:189
llvm::rdf::RegisterAggr::clearIn
RegisterRef clearIn(RegisterRef RR) const
Definition: RDFRegisters.cpp:320
llvm::rdf::NodeSet
std::set< NodeId > NodeSet
Definition: RDFGraph.h:513
llvm::rdf::NodeAttrs::Stmt
@ Stmt
Definition: RDFGraph.h:278
N
#define N
llvm::MCRegisterInfo::DiffListIterator::isValid
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
Definition: MCRegisterInfo.h:224
llvm::rdf::BlockNode::getCode
MachineBasicBlock * getCode() const
Definition: RDFGraph.h:627
LaneBitmask.h
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:157
MaxRecNest
static cl::opt< unsigned > MaxRecNest("rdf-liveness-max-rec", cl::init(25), cl::Hidden, cl::desc("Maximum recursion level"))
llvm::rdf::RefNode
Definition: RDFGraph.h:515
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::NodeSet::insert
bool insert(SUnit *SU)
Definition: MachinePipeliner.h:345
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::rdf::Liveness::NodeRefSet
std::unordered_set< NodeRef > NodeRefSet
Definition: RDFLiveness.h:76
RN
It looks like we only need to define PPCfmarto for these because according to these instructions perform RTO on fma s src2 rnd ← FPSCR RN
Definition: README_P9.txt:262
llvm::cl::desc
Definition: CommandLine.h:414
llvm::rdf::NodeAttrs::Preserving
@ Preserving
Definition: RDFGraph.h:287
llvm::rdf::Liveness::getAllReachingDefs
NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr< RefNode * > RefA, bool TopShadows, bool FullChain, const RegisterAggr &DefRRs)
Definition: RDFLiveness.cpp:109
raw_ostream.h
llvm::SetVector
A vector that has set insertion semantics.
Definition: SetVector.h:40
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:110
llvm::rdf::DataFlowGraph::findBlock
NodeAddr< BlockNode * > findBlock(MachineBasicBlock *BB) const
Definition: RDFGraph.h:764
llvm::rdf::NodeAttrs::Phi
@ Phi
Definition: RDFGraph.h:277
TargetRegisterInfo.h
Debug.h
llvm::rdf::PhysicalRegisterInfo::mapTo
RegisterRef mapTo(RegisterRef RR, unsigned R) const
Definition: RDFRegisters.cpp:230
SetVector.h
llvm::MCRegAliasIterator
MCRegAliasIterator enumerates all registers aliasing Reg.
Definition: MCRegisterInfo.h:780
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
MachineDominators.h
SmallSet.h
llvm::rdf::InstrNode::getOwner
NodeAddr< NodeBase * > getOwner(const DataFlowGraph &G)
Definition: RDFGraph.cpp:533
llvm::rdf::Liveness::computeLiveIns
void computeLiveIns()
Definition: RDFLiveness.cpp:739