LLVM  9.0.0svn
HexagonBlockRanges.cpp
Go to the documentation of this file.
1 //===- HexagonBlockRanges.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "HexagonBlockRanges.h"
10 #include "HexagonInstrInfo.h"
11 #include "HexagonSubtarget.h"
12 #include "llvm/ADT/BitVector.h"
13 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
23 #include <algorithm>
24 #include <cassert>
25 #include <cstdint>
26 #include <iterator>
27 #include <map>
28 #include <utility>
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "hbr"
33 
35  // If A contains start(), or "this" contains A.start(), then overlap.
36  IndexType S = start(), E = end(), AS = A.start(), AE = A.end();
37  if (AS == S)
38  return true;
39  bool SbAE = (S < AE) || (S == AE && A.TiedEnd); // S-before-AE.
40  bool ASbE = (AS < E) || (AS == E && TiedEnd); // AS-before-E.
41  if ((AS < S && SbAE) || (S < AS && ASbE))
42  return true;
43  // Otherwise no overlap.
44  return false;
45 }
46 
48  if (start() <= A.start()) {
49  // Treat "None" in the range end as equal to the range start.
50  IndexType E = (end() != IndexType::None) ? end() : start();
51  IndexType AE = (A.end() != IndexType::None) ? A.end() : A.start();
52  if (AE <= E)
53  return true;
54  }
55  return false;
56 }
57 
59  // Allow merging adjacent ranges.
60  assert(end() == A.start() || overlaps(A));
61  IndexType AS = A.start(), AE = A.end();
62  if (AS < start() || start() == IndexType::None)
63  setStart(AS);
64  if (end() < AE || end() == IndexType::None) {
65  setEnd(AE);
66  TiedEnd = A.TiedEnd;
67  } else {
68  if (end() == AE)
69  TiedEnd |= A.TiedEnd;
70  }
71  if (A.Fixed)
72  Fixed = true;
73 }
74 
76  for (auto &R : RL)
77  if (!is_contained(*this, R))
78  push_back(R);
79 }
80 
81 // Merge all overlapping ranges in the list, so that all that remains
82 // is a list of disjoint ranges.
83 void HexagonBlockRanges::RangeList::unionize(bool MergeAdjacent) {
84  if (empty())
85  return;
86 
87  llvm::sort(begin(), end());
88  iterator Iter = begin();
89 
90  while (Iter != end()-1) {
91  iterator Next = std::next(Iter);
92  // If MergeAdjacent is true, merge ranges A and B, where A.end == B.start.
93  // This allows merging dead ranges, but is not valid for live ranges.
94  bool Merge = MergeAdjacent && (Iter->end() == Next->start());
95  if (Merge || Iter->overlaps(*Next)) {
96  Iter->merge(*Next);
97  erase(Next);
98  continue;
99  }
100  ++Iter;
101  }
102 }
103 
104 // Compute a range A-B and add it to the list.
105 void HexagonBlockRanges::RangeList::addsub(const IndexRange &A,
106  const IndexRange &B) {
107  // Exclusion of non-overlapping ranges makes some checks simpler
108  // later in this function.
109  if (!A.overlaps(B)) {
110  // A - B = A.
111  add(A);
112  return;
113  }
114 
115  IndexType AS = A.start(), AE = A.end();
116  IndexType BS = B.start(), BE = B.end();
117 
118  // If AE is None, then A is included in B, since A and B overlap.
119  // The result of subtraction if empty, so just return.
120  if (AE == IndexType::None)
121  return;
122 
123  if (AS < BS) {
124  // A starts before B.
125  // AE cannot be None since A and B overlap.
126  assert(AE != IndexType::None);
127  // Add the part of A that extends on the "less" side of B.
128  add(AS, BS, A.Fixed, false);
129  }
130 
131  if (BE < AE) {
132  // BE cannot be Exit here.
133  if (BE == IndexType::None)
134  add(BS, AE, A.Fixed, false);
135  else
136  add(BE, AE, A.Fixed, false);
137  }
138 }
139 
140 // Subtract a given range from each element in the list.
142  // Cannot assume that the list is unionized (i.e. contains only non-
143  // overlapping ranges.
144  RangeList T;
145  for (iterator Next, I = begin(); I != end(); I = Next) {
146  IndexRange &Rg = *I;
147  if (Rg.overlaps(Range)) {
148  T.addsub(Rg, Range);
149  Next = this->erase(I);
150  } else {
151  Next = std::next(I);
152  }
153  }
154  include(T);
155 }
156 
158  : Block(B) {
160  First = Idx;
161  for (auto &In : B) {
162  if (In.isDebugInstr())
163  continue;
164  assert(getIndex(&In) == IndexType::None && "Instruction already in map");
165  Map.insert(std::make_pair(Idx, &In));
166  ++Idx;
167  }
168  Last = B.empty() ? IndexType::None : unsigned(Idx)-1;
169 }
170 
172  auto F = Map.find(Idx);
173  return (F != Map.end()) ? F->second : nullptr;
174 }
175 
177  MachineInstr *MI) const {
178  for (auto &I : Map)
179  if (I.second == MI)
180  return I.first;
181  return IndexType::None;
182 }
183 
185  IndexType Idx) const {
186  assert (Idx != IndexType::None);
187  if (Idx == IndexType::Entry)
188  return IndexType::None;
189  if (Idx == IndexType::Exit)
190  return Last;
191  if (Idx == First)
192  return IndexType::Entry;
193  return unsigned(Idx)-1;
194 }
195 
197  IndexType Idx) const {
198  assert (Idx != IndexType::None);
199  if (Idx == IndexType::Entry)
200  return IndexType::First;
201  if (Idx == IndexType::Exit || Idx == Last)
202  return IndexType::None;
203  return unsigned(Idx)+1;
204 }
205 
207  MachineInstr *NewMI) {
208  for (auto &I : Map) {
209  if (I.second != OldMI)
210  continue;
211  if (NewMI != nullptr)
212  I.second = NewMI;
213  else
214  Map.erase(I.first);
215  break;
216  }
217 }
218 
220  : MF(mf), HST(mf.getSubtarget<HexagonSubtarget>()),
221  TII(*HST.getInstrInfo()), TRI(*HST.getRegisterInfo()),
222  Reserved(TRI.getReservedRegs(mf)) {
223  // Consider all non-allocatable registers as reserved.
224  for (const TargetRegisterClass *RC : TRI.regclasses()) {
225  if (RC->isAllocatable())
226  continue;
227  for (unsigned R : *RC)
228  Reserved[R] = true;
229  }
230 }
231 
232 HexagonBlockRanges::RegisterSet HexagonBlockRanges::getLiveIns(
233  const MachineBasicBlock &B, const MachineRegisterInfo &MRI,
234  const TargetRegisterInfo &TRI) {
235  RegisterSet LiveIns;
236  RegisterSet Tmp;
237 
238  for (auto I : B.liveins()) {
239  MCSubRegIndexIterator S(I.PhysReg, &TRI);
240  if (I.LaneMask.all() || (I.LaneMask.any() && !S.isValid())) {
241  Tmp.insert({I.PhysReg, 0});
242  continue;
243  }
244  for (; S.isValid(); ++S) {
245  unsigned SI = S.getSubRegIndex();
246  if ((I.LaneMask & TRI.getSubRegIndexLaneMask(SI)).any())
247  Tmp.insert({S.getSubReg(), 0});
248  }
249  }
250 
251  for (auto R : Tmp) {
252  if (!Reserved[R.Reg])
253  LiveIns.insert(R);
254  for (auto S : expandToSubRegs(R, MRI, TRI))
255  if (!Reserved[S.Reg])
256  LiveIns.insert(S);
257  }
258  return LiveIns;
259 }
260 
262  RegisterRef R, const MachineRegisterInfo &MRI,
263  const TargetRegisterInfo &TRI) {
264  RegisterSet SRs;
265 
266  if (R.Sub != 0) {
267  SRs.insert(R);
268  return SRs;
269  }
270 
272  MCSubRegIterator I(R.Reg, &TRI);
273  if (!I.isValid())
274  SRs.insert({R.Reg, 0});
275  for (; I.isValid(); ++I)
276  SRs.insert({*I, 0});
277  } else {
279  auto &RC = *MRI.getRegClass(R.Reg);
280  unsigned PReg = *RC.begin();
281  MCSubRegIndexIterator I(PReg, &TRI);
282  if (!I.isValid())
283  SRs.insert({R.Reg, 0});
284  for (; I.isValid(); ++I)
285  SRs.insert({R.Reg, I.getSubRegIndex()});
286  }
287  return SRs;
288 }
289 
290 void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
291  RegToRangeMap &LiveMap) {
292  std::map<RegisterRef,IndexType> LastDef, LastUse;
293  RegisterSet LiveOnEntry;
294  MachineBasicBlock &B = IndexMap.getBlock();
296 
297  for (auto R : getLiveIns(B, MRI, TRI))
298  LiveOnEntry.insert(R);
299 
300  for (auto R : LiveOnEntry)
301  LastDef[R] = IndexType::Entry;
302 
303  auto closeRange = [&LastUse,&LastDef,&LiveMap] (RegisterRef R) -> void {
304  auto LD = LastDef[R], LU = LastUse[R];
305  if (LD == IndexType::None)
307  if (LU == IndexType::None)
308  LU = IndexType::Exit;
309  LiveMap[R].add(LD, LU, false, false);
310  LastUse[R] = LastDef[R] = IndexType::None;
311  };
312 
313  RegisterSet Defs, Clobbers;
314 
315  for (auto &In : B) {
316  if (In.isDebugInstr())
317  continue;
318  IndexType Index = IndexMap.getIndex(&In);
319  // Process uses first.
320  for (auto &Op : In.operands()) {
321  if (!Op.isReg() || !Op.isUse() || Op.isUndef())
322  continue;
323  RegisterRef R = { Op.getReg(), Op.getSubReg() };
324  if (TargetRegisterInfo::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
325  continue;
326  bool IsKill = Op.isKill();
327  for (auto S : expandToSubRegs(R, MRI, TRI)) {
328  LastUse[S] = Index;
329  if (IsKill)
330  closeRange(S);
331  }
332  }
333  // Process defs and clobbers.
334  Defs.clear();
335  Clobbers.clear();
336  for (auto &Op : In.operands()) {
337  if (!Op.isReg() || !Op.isDef() || Op.isUndef())
338  continue;
339  RegisterRef R = { Op.getReg(), Op.getSubReg() };
340  for (auto S : expandToSubRegs(R, MRI, TRI)) {
341  if (TargetRegisterInfo::isPhysicalRegister(S.Reg) && Reserved[S.Reg])
342  continue;
343  if (Op.isDead())
344  Clobbers.insert(S);
345  else
346  Defs.insert(S);
347  }
348  }
349 
350  for (auto &Op : In.operands()) {
351  if (!Op.isRegMask())
352  continue;
353  const uint32_t *BM = Op.getRegMask();
354  for (unsigned PR = 1, N = TRI.getNumRegs(); PR != N; ++PR) {
355  // Skip registers that have subregisters. A register is preserved
356  // iff its bit is set in the regmask, so if R1:0 was preserved, both
357  // R1 and R0 would also be present.
358  if (MCSubRegIterator(PR, &TRI, false).isValid())
359  continue;
360  if (Reserved[PR])
361  continue;
362  if (BM[PR/32] & (1u << (PR%32)))
363  continue;
364  RegisterRef R = { PR, 0 };
365  if (!Defs.count(R))
366  Clobbers.insert(R);
367  }
368  }
369  // Defs and clobbers can overlap, e.g.
370  // dead %d0 = COPY %5, implicit-def %r0, implicit-def %r1
371  for (RegisterRef R : Defs)
372  Clobbers.erase(R);
373 
374  // Update maps for defs.
375  for (RegisterRef S : Defs) {
376  // Defs should already be expanded into subregs.
378  !MCSubRegIterator(S.Reg, &TRI, false).isValid());
379  if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
380  closeRange(S);
381  LastDef[S] = Index;
382  }
383  // Update maps for clobbers.
384  for (RegisterRef S : Clobbers) {
385  // Clobbers should already be expanded into subregs.
387  !MCSubRegIterator(S.Reg, &TRI, false).isValid());
388  if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
389  closeRange(S);
390  // Create a single-instruction range.
391  LastDef[S] = LastUse[S] = Index;
392  closeRange(S);
393  }
394  }
395 
396  // Collect live-on-exit.
397  RegisterSet LiveOnExit;
398  for (auto *SB : B.successors())
399  for (auto R : getLiveIns(*SB, MRI, TRI))
400  LiveOnExit.insert(R);
401 
402  for (auto R : LiveOnExit)
403  LastUse[R] = IndexType::Exit;
404 
405  // Process remaining registers.
407  for (auto &I : LastUse)
408  if (I.second != IndexType::None)
409  Left.insert(I.first);
410  for (auto &I : LastDef)
411  if (I.second != IndexType::None)
412  Left.insert(I.first);
413  for (auto R : Left)
414  closeRange(R);
415 
416  // Finalize the live ranges.
417  for (auto &P : LiveMap)
418  P.second.unionize();
419 }
420 
422  InstrIndexMap &IndexMap) {
423  RegToRangeMap LiveMap;
424  LLVM_DEBUG(dbgs() << __func__ << ": index map\n" << IndexMap << '\n');
425  computeInitialLiveRanges(IndexMap, LiveMap);
426  LLVM_DEBUG(dbgs() << __func__ << ": live map\n"
427  << PrintRangeMap(LiveMap, TRI) << '\n');
428  return LiveMap;
429 }
430 
432  InstrIndexMap &IndexMap, RegToRangeMap &LiveMap) {
433  RegToRangeMap DeadMap;
434 
435  auto addDeadRanges = [&IndexMap,&LiveMap,&DeadMap] (RegisterRef R) -> void {
436  auto F = LiveMap.find(R);
437  if (F == LiveMap.end() || F->second.empty()) {
438  DeadMap[R].add(IndexType::Entry, IndexType::Exit, false, false);
439  return;
440  }
441 
442  RangeList &RL = F->second;
443  RangeList::iterator A = RL.begin(), Z = RL.end()-1;
444 
445  // Try to create the initial range.
446  if (A->start() != IndexType::Entry) {
447  IndexType DE = IndexMap.getPrevIndex(A->start());
448  if (DE != IndexType::Entry)
449  DeadMap[R].add(IndexType::Entry, DE, false, false);
450  }
451 
452  while (A != Z) {
453  // Creating a dead range that follows A. Pay attention to empty
454  // ranges (i.e. those ending with "None").
455  IndexType AE = (A->end() == IndexType::None) ? A->start() : A->end();
456  IndexType DS = IndexMap.getNextIndex(AE);
457  ++A;
458  IndexType DE = IndexMap.getPrevIndex(A->start());
459  if (DS < DE)
460  DeadMap[R].add(DS, DE, false, false);
461  }
462 
463  // Try to create the final range.
464  if (Z->end() != IndexType::Exit) {
465  IndexType ZE = (Z->end() == IndexType::None) ? Z->start() : Z->end();
466  IndexType DS = IndexMap.getNextIndex(ZE);
467  if (DS < IndexType::Exit)
468  DeadMap[R].add(DS, IndexType::Exit, false, false);
469  }
470  };
471 
472  MachineFunction &MF = *IndexMap.getBlock().getParent();
473  auto &MRI = MF.getRegInfo();
474  unsigned NumRegs = TRI.getNumRegs();
475  BitVector Visited(NumRegs);
476  for (unsigned R = 1; R < NumRegs; ++R) {
477  for (auto S : expandToSubRegs({R,0}, MRI, TRI)) {
478  if (Reserved[S.Reg] || Visited[S.Reg])
479  continue;
480  addDeadRanges(S);
481  Visited[S.Reg] = true;
482  }
483  }
484  for (auto &P : LiveMap)
486  addDeadRanges(P.first);
487 
488  LLVM_DEBUG(dbgs() << __func__ << ": dead map\n"
489  << PrintRangeMap(DeadMap, TRI) << '\n');
490  return DeadMap;
491 }
492 
496  return OS << '-';
498  return OS << 'n';
500  return OS << 'x';
502 }
503 
504 // A mapping to translate between instructions and their indices.
507  OS << '[' << IR.start() << ':' << IR.end() << (IR.TiedEnd ? '}' : ']');
508  if (IR.Fixed)
509  OS << '!';
510  return OS;
511 }
512 
514  const HexagonBlockRanges::RangeList &RL) {
515  for (auto &R : RL)
516  OS << R << " ";
517  return OS;
518 }
519 
522  for (auto &In : M.Block) {
524  OS << Idx << (Idx == M.Last ? ". " : " ") << In;
525  }
526  return OS;
527 }
528 
531  for (auto &I : P.Map) {
532  const HexagonBlockRanges::RangeList &RL = I.second;
533  OS << printReg(I.first.Reg, &P.TRI, I.first.Sub) << " -> " << RL << "\n";
534  }
535  return OS;
536 }
MachineInstr * getInstr(IndexType Idx) const
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:249
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator begin() const
begin/end - Return all of the registers in this class.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
unsigned const TargetRegisterInfo * TRI
F(f)
std::set< RegisterRef > RegisterSet
std::map< RegisterRef, RangeList > RegToRangeMap
const HexagonInstrInfo * TII
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
bool overlaps(const IndexRange &A) const
iterator_range< regclass_iterator > regclasses() const
zlib-gnu style compression
#define T
unsigned getSubRegIndex() const
Returns sub-register index of the current sub-register.
IndexType getNextIndex(IndexType Idx) const
#define P(N)
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Iterator that enumerates the sub-registers of a Reg and the associated sub-register indices...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
R600 Clause Merge
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
MCSubRegIterator enumerates all sub-registers of Reg.
IndexType getIndex(MachineInstr *MI) const
void subtract(const IndexRange &Range)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1115
constexpr bool empty(const T &RangeOrContainer)
Test whether RangeOrContainer is empty. Similar to C++17 std::empty.
Definition: STLExtras.h:209
bool isValid() const
Returns true if this iterator is not yet at the end.
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx. ...
IndexType getPrevIndex(IndexType Idx) const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
HexagonBlockRanges(MachineFunction &MF)
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:63
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
RegToRangeMap computeLiveMap(InstrIndexMap &IndexMap)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
RegToRangeMap computeDeadMap(InstrIndexMap &IndexMap, RegToRangeMap &LiveMap)
raw_ostream & operator<<(raw_ostream &OS, const APInt &I)
Definition: APInt.h:2038
iterator_range< livein_iterator > liveins() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void unionize(bool MergeAdjacent=false)
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:45
IRTranslator LLVM IR MI
void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)
bool contains(const IndexRange &A) const
#define LLVM_DEBUG(X)
Definition: Debug.h:122
Statically lint checks LLVM IR
Definition: Lint.cpp:192
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1244