Bug Summary

File:include/llvm/MC/LaneBitmask.h
Warning:line 86, column 34
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'llvm::LaneBitmask::Type'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CodeGenRegisters.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/utils/TableGen -I /build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/utils/TableGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp

1//===- CodeGenRegisters.cpp - Register and RegisterClass Info -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines structures to encapsulate information gleaned from the
11// target register and register class definitions.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CodeGenRegisters.h"
16#include "CodeGenTarget.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/IntEqClasses.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/StringExtras.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/MathExtras.h"
31#include "llvm/Support/raw_ostream.h"
32#include "llvm/TableGen/Error.h"
33#include "llvm/TableGen/Record.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <iterator>
38#include <map>
39#include <queue>
40#include <set>
41#include <string>
42#include <tuple>
43#include <utility>
44#include <vector>
45
46using namespace llvm;
47
48#define DEBUG_TYPE"regalloc-emitter" "regalloc-emitter"
49
50//===----------------------------------------------------------------------===//
51// CodeGenSubRegIndex
52//===----------------------------------------------------------------------===//
53
54CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
55 : TheDef(R), EnumValue(Enum), AllSuperRegsCovered(true), Artificial(true) {
56 Name = R->getName();
57 if (R->getValue("Namespace"))
58 Namespace = R->getValueAsString("Namespace");
59 Size = R->getValueAsInt("Size");
60 Offset = R->getValueAsInt("Offset");
61}
62
63CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
64 unsigned Enum)
65 : TheDef(nullptr), Name(N), Namespace(Nspace), Size(-1), Offset(-1),
66 EnumValue(Enum), AllSuperRegsCovered(true), Artificial(true) {
67}
68
69std::string CodeGenSubRegIndex::getQualifiedName() const {
70 std::string N = getNamespace();
71 if (!N.empty())
72 N += "::";
73 N += getName();
74 return N;
75}
76
77void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
78 if (!TheDef)
79 return;
80
81 std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
82 if (!Comps.empty()) {
83 if (Comps.size() != 2)
84 PrintFatalError(TheDef->getLoc(),
85 "ComposedOf must have exactly two entries");
86 CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
87 CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
88 CodeGenSubRegIndex *X = A->addComposite(B, this);
89 if (X)
90 PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
91 }
92
93 std::vector<Record*> Parts =
94 TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
95 if (!Parts.empty()) {
96 if (Parts.size() < 2)
97 PrintFatalError(TheDef->getLoc(),
98 "CoveredBySubRegs must have two or more entries");
99 SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
100 for (Record *Part : Parts)
101 IdxParts.push_back(RegBank.getSubRegIdx(Part));
102 setConcatenationOf(IdxParts);
103 }
104}
105
106LaneBitmask CodeGenSubRegIndex::computeLaneMask() const {
107 // Already computed?
108 if (LaneMask.any())
109 return LaneMask;
110
111 // Recursion guard, shouldn't be required.
112 LaneMask = LaneBitmask::getAll();
113
114 // The lane mask is simply the union of all sub-indices.
115 LaneBitmask M;
116 for (const auto &C : Composed)
117 M |= C.second->computeLaneMask();
118 assert(M.any() && "Missing lane mask, sub-register cycle?")((M.any() && "Missing lane mask, sub-register cycle?"
) ? static_cast<void> (0) : __assert_fail ("M.any() && \"Missing lane mask, sub-register cycle?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 118, __PRETTY_FUNCTION__))
;
119 LaneMask = M;
120 return LaneMask;
121}
122
123void CodeGenSubRegIndex::setConcatenationOf(
124 ArrayRef<CodeGenSubRegIndex*> Parts) {
125 if (ConcatenationOf.empty())
126 ConcatenationOf.assign(Parts.begin(), Parts.end());
127 else
128 assert(std::equal(Parts.begin(), Parts.end(),((std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin
()) && "parts consistent") ? static_cast<void> (
0) : __assert_fail ("std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin()) && \"parts consistent\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 129, __PRETTY_FUNCTION__))
129 ConcatenationOf.begin()) && "parts consistent")((std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin
()) && "parts consistent") ? static_cast<void> (
0) : __assert_fail ("std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin()) && \"parts consistent\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 129, __PRETTY_FUNCTION__))
;
130}
131
132void CodeGenSubRegIndex::computeConcatTransitiveClosure() {
133 for (SmallVectorImpl<CodeGenSubRegIndex*>::iterator
134 I = ConcatenationOf.begin(); I != ConcatenationOf.end(); /*empty*/) {
135 CodeGenSubRegIndex *SubIdx = *I;
136 SubIdx->computeConcatTransitiveClosure();
137#ifndef NDEBUG
138 for (CodeGenSubRegIndex *SRI : SubIdx->ConcatenationOf)
139 assert(SRI->ConcatenationOf.empty() && "No transitive closure?")((SRI->ConcatenationOf.empty() && "No transitive closure?"
) ? static_cast<void> (0) : __assert_fail ("SRI->ConcatenationOf.empty() && \"No transitive closure?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 139, __PRETTY_FUNCTION__))
;
140#endif
141
142 if (SubIdx->ConcatenationOf.empty()) {
143 ++I;
144 } else {
145 I = ConcatenationOf.erase(I);
146 I = ConcatenationOf.insert(I, SubIdx->ConcatenationOf.begin(),
147 SubIdx->ConcatenationOf.end());
148 I += SubIdx->ConcatenationOf.size();
149 }
150 }
151}
152
153//===----------------------------------------------------------------------===//
154// CodeGenRegister
155//===----------------------------------------------------------------------===//
156
157CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
158 : TheDef(R),
159 EnumValue(Enum),
160 CostPerUse(R->getValueAsInt("CostPerUse")),
161 CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
162 HasDisjunctSubRegs(false),
163 SubRegsComplete(false),
164 SuperRegsComplete(false),
165 TopoSig(~0u) {
166 Artificial = R->getValueAsBit("isArtificial");
167}
168
169void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
170 std::vector<Record*> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
171 std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
172
173 if (SRIs.size() != SRs.size())
174 PrintFatalError(TheDef->getLoc(),
175 "SubRegs and SubRegIndices must have the same size");
176
177 for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
178 ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
179 ExplicitSubRegs.push_back(RegBank.getReg(SRs[i]));
180 }
181
182 // Also compute leading super-registers. Each register has a list of
183 // covered-by-subregs super-registers where it appears as the first explicit
184 // sub-register.
185 //
186 // This is used by computeSecondarySubRegs() to find candidates.
187 if (CoveredBySubRegs && !ExplicitSubRegs.empty())
188 ExplicitSubRegs.front()->LeadingSuperRegs.push_back(this);
189
190 // Add ad hoc alias links. This is a symmetric relationship between two
191 // registers, so build a symmetric graph by adding links in both ends.
192 std::vector<Record*> Aliases = TheDef->getValueAsListOfDefs("Aliases");
193 for (Record *Alias : Aliases) {
194 CodeGenRegister *Reg = RegBank.getReg(Alias);
195 ExplicitAliases.push_back(Reg);
196 Reg->ExplicitAliases.push_back(this);
197 }
198}
199
200const StringRef CodeGenRegister::getName() const {
201 assert(TheDef && "no def")((TheDef && "no def") ? static_cast<void> (0) :
__assert_fail ("TheDef && \"no def\"", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 201, __PRETTY_FUNCTION__))
;
202 return TheDef->getName();
203}
204
205namespace {
206
207// Iterate over all register units in a set of registers.
208class RegUnitIterator {
209 CodeGenRegister::Vec::const_iterator RegI, RegE;
210 CodeGenRegister::RegUnitList::iterator UnitI, UnitE;
211
212public:
213 RegUnitIterator(const CodeGenRegister::Vec &Regs):
214 RegI(Regs.begin()), RegE(Regs.end()) {
215
216 if (RegI != RegE) {
217 UnitI = (*RegI)->getRegUnits().begin();
218 UnitE = (*RegI)->getRegUnits().end();
219 advance();
220 }
221 }
222
223 bool isValid() const { return UnitI != UnitE; }
224
225 unsigned operator* () const { assert(isValid())((isValid()) ? static_cast<void> (0) : __assert_fail ("isValid()"
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 225, __PRETTY_FUNCTION__))
; return *UnitI; }
226
227 const CodeGenRegister *getReg() const { assert(isValid())((isValid()) ? static_cast<void> (0) : __assert_fail ("isValid()"
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 227, __PRETTY_FUNCTION__))
; return *RegI; }
228
229 /// Preincrement. Move to the next unit.
230 void operator++() {
231 assert(isValid() && "Cannot advance beyond the last operand")((isValid() && "Cannot advance beyond the last operand"
) ? static_cast<void> (0) : __assert_fail ("isValid() && \"Cannot advance beyond the last operand\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 231, __PRETTY_FUNCTION__))
;
232 ++UnitI;
233 advance();
234 }
235
236protected:
237 void advance() {
238 while (UnitI == UnitE) {
239 if (++RegI == RegE)
240 break;
241 UnitI = (*RegI)->getRegUnits().begin();
242 UnitE = (*RegI)->getRegUnits().end();
243 }
244 }
245};
246
247} // end anonymous namespace
248
249// Return true of this unit appears in RegUnits.
250static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
251 return RegUnits.test(Unit);
252}
253
254// Inherit register units from subregisters.
255// Return true if the RegUnits changed.
256bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
257 bool changed = false;
258 for (const auto &SubReg : SubRegs) {
259 CodeGenRegister *SR = SubReg.second;
260 // Merge the subregister's units into this register's RegUnits.
261 changed |= (RegUnits |= SR->RegUnits);
262 }
263
264 return changed;
265}
266
267const CodeGenRegister::SubRegMap &
268CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
269 // Only compute this map once.
270 if (SubRegsComplete)
271 return SubRegs;
272 SubRegsComplete = true;
273
274 HasDisjunctSubRegs = ExplicitSubRegs.size() > 1;
275
276 // First insert the explicit subregs and make sure they are fully indexed.
277 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
278 CodeGenRegister *SR = ExplicitSubRegs[i];
279 CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
280 if (!SR->Artificial)
281 Idx->Artificial = false;
282 if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
283 PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
284 " appears twice in Register " + getName());
285 // Map explicit sub-registers first, so the names take precedence.
286 // The inherited sub-registers are mapped below.
287 SubReg2Idx.insert(std::make_pair(SR, Idx));
288 }
289
290 // Keep track of inherited subregs and how they can be reached.
291 SmallPtrSet<CodeGenRegister*, 8> Orphans;
292
293 // Clone inherited subregs and place duplicate entries in Orphans.
294 // Here the order is important - earlier subregs take precedence.
295 for (CodeGenRegister *ESR : ExplicitSubRegs) {
296 const SubRegMap &Map = ESR->computeSubRegs(RegBank);
297 HasDisjunctSubRegs |= ESR->HasDisjunctSubRegs;
298
299 for (const auto &SR : Map) {
300 if (!SubRegs.insert(SR).second)
301 Orphans.insert(SR.second);
302 }
303 }
304
305 // Expand any composed subreg indices.
306 // If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
307 // qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
308 // expanded subreg indices recursively.
309 SmallVector<CodeGenSubRegIndex*, 8> Indices = ExplicitSubRegIndices;
310 for (unsigned i = 0; i != Indices.size(); ++i) {
311 CodeGenSubRegIndex *Idx = Indices[i];
312 const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
313 CodeGenRegister *SR = SubRegs[Idx];
314 const SubRegMap &Map = SR->computeSubRegs(RegBank);
315
316 // Look at the possible compositions of Idx.
317 // They may not all be supported by SR.
318 for (CodeGenSubRegIndex::CompMap::const_iterator I = Comps.begin(),
319 E = Comps.end(); I != E; ++I) {
320 SubRegMap::const_iterator SRI = Map.find(I->first);
321 if (SRI == Map.end())
322 continue; // Idx + I->first doesn't exist in SR.
323 // Add I->second as a name for the subreg SRI->second, assuming it is
324 // orphaned, and the name isn't already used for something else.
325 if (SubRegs.count(I->second) || !Orphans.erase(SRI->second))
326 continue;
327 // We found a new name for the orphaned sub-register.
328 SubRegs.insert(std::make_pair(I->second, SRI->second));
329 Indices.push_back(I->second);
330 }
331 }
332
333 // Now Orphans contains the inherited subregisters without a direct index.
334 // Create inferred indexes for all missing entries.
335 // Work backwards in the Indices vector in order to compose subregs bottom-up.
336 // Consider this subreg sequence:
337 //
338 // qsub_1 -> dsub_0 -> ssub_0
339 //
340 // The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
341 // can be reached in two different ways:
342 //
343 // qsub_1 -> ssub_0
344 // dsub_2 -> ssub_0
345 //
346 // We pick the latter composition because another register may have [dsub_0,
347 // dsub_1, dsub_2] subregs without necessarily having a qsub_1 subreg. The
348 // dsub_2 -> ssub_0 composition can be shared.
349 while (!Indices.empty() && !Orphans.empty()) {
350 CodeGenSubRegIndex *Idx = Indices.pop_back_val();
351 CodeGenRegister *SR = SubRegs[Idx];
352 const SubRegMap &Map = SR->computeSubRegs(RegBank);
353 for (const auto &SubReg : Map)
354 if (Orphans.erase(SubReg.second))
355 SubRegs[RegBank.getCompositeSubRegIndex(Idx, SubReg.first)] = SubReg.second;
356 }
357
358 // Compute the inverse SubReg -> Idx map.
359 for (const auto &SubReg : SubRegs) {
360 if (SubReg.second == this) {
361 ArrayRef<SMLoc> Loc;
362 if (TheDef)
363 Loc = TheDef->getLoc();
364 PrintFatalError(Loc, "Register " + getName() +
365 " has itself as a sub-register");
366 }
367
368 // Compute AllSuperRegsCovered.
369 if (!CoveredBySubRegs)
370 SubReg.first->AllSuperRegsCovered = false;
371
372 // Ensure that every sub-register has a unique name.
373 DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
374 SubReg2Idx.insert(std::make_pair(SubReg.second, SubReg.first)).first;
375 if (Ins->second == SubReg.first)
376 continue;
377 // Trouble: Two different names for SubReg.second.
378 ArrayRef<SMLoc> Loc;
379 if (TheDef)
380 Loc = TheDef->getLoc();
381 PrintFatalError(Loc, "Sub-register can't have two names: " +
382 SubReg.second->getName() + " available as " +
383 SubReg.first->getName() + " and " + Ins->second->getName());
384 }
385
386 // Derive possible names for sub-register concatenations from any explicit
387 // sub-registers. By doing this before computeSecondarySubRegs(), we ensure
388 // that getConcatSubRegIndex() won't invent any concatenated indices that the
389 // user already specified.
390 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
391 CodeGenRegister *SR = ExplicitSubRegs[i];
392 if (!SR->CoveredBySubRegs || SR->ExplicitSubRegs.size() <= 1 ||
393 SR->Artificial)
394 continue;
395
396 // SR is composed of multiple sub-regs. Find their names in this register.
397 SmallVector<CodeGenSubRegIndex*, 8> Parts;
398 for (unsigned j = 0, e = SR->ExplicitSubRegs.size(); j != e; ++j) {
399 CodeGenSubRegIndex &I = *SR->ExplicitSubRegIndices[j];
400 if (!I.Artificial)
401 Parts.push_back(getSubRegIndex(SR->ExplicitSubRegs[j]));
402 }
403
404 // Offer this as an existing spelling for the concatenation of Parts.
405 CodeGenSubRegIndex &Idx = *ExplicitSubRegIndices[i];
406 Idx.setConcatenationOf(Parts);
407 }
408
409 // Initialize RegUnitList. Because getSubRegs is called recursively, this
410 // processes the register hierarchy in postorder.
411 //
412 // Inherit all sub-register units. It is good enough to look at the explicit
413 // sub-registers, the other registers won't contribute any more units.
414 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
415 CodeGenRegister *SR = ExplicitSubRegs[i];
416 RegUnits |= SR->RegUnits;
417 }
418
419 // Absent any ad hoc aliasing, we create one register unit per leaf register.
420 // These units correspond to the maximal cliques in the register overlap
421 // graph which is optimal.
422 //
423 // When there is ad hoc aliasing, we simply create one unit per edge in the
424 // undirected ad hoc aliasing graph. Technically, we could do better by
425 // identifying maximal cliques in the ad hoc graph, but cliques larger than 2
426 // are extremely rare anyway (I've never seen one), so we don't bother with
427 // the added complexity.
428 for (unsigned i = 0, e = ExplicitAliases.size(); i != e; ++i) {
429 CodeGenRegister *AR = ExplicitAliases[i];
430 // Only visit each edge once.
431 if (AR->SubRegsComplete)
432 continue;
433 // Create a RegUnit representing this alias edge, and add it to both
434 // registers.
435 unsigned Unit = RegBank.newRegUnit(this, AR);
436 RegUnits.set(Unit);
437 AR->RegUnits.set(Unit);
438 }
439
440 // Finally, create units for leaf registers without ad hoc aliases. Note that
441 // a leaf register with ad hoc aliases doesn't get its own unit - it isn't
442 // necessary. This means the aliasing leaf registers can share a single unit.
443 if (RegUnits.empty())
444 RegUnits.set(RegBank.newRegUnit(this));
445
446 // We have now computed the native register units. More may be adopted later
447 // for balancing purposes.
448 NativeRegUnits = RegUnits;
449
450 return SubRegs;
451}
452
453// In a register that is covered by its sub-registers, try to find redundant
454// sub-registers. For example:
455//
456// QQ0 = {Q0, Q1}
457// Q0 = {D0, D1}
458// Q1 = {D2, D3}
459//
460// We can infer that D1_D2 is also a sub-register, even if it wasn't named in
461// the register definition.
462//
463// The explicitly specified registers form a tree. This function discovers
464// sub-register relationships that would force a DAG.
465//
466void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
467 SmallVector<SubRegMap::value_type, 8> NewSubRegs;
468
469 std::queue<std::pair<CodeGenSubRegIndex*,CodeGenRegister*>> SubRegQueue;
470 for (std::pair<CodeGenSubRegIndex*,CodeGenRegister*> P : SubRegs)
471 SubRegQueue.push(P);
472
473 // Look at the leading super-registers of each sub-register. Those are the
474 // candidates for new sub-registers, assuming they are fully contained in
475 // this register.
476 while (!SubRegQueue.empty()) {
477 CodeGenSubRegIndex *SubRegIdx;
478 const CodeGenRegister *SubReg;
479 std::tie(SubRegIdx, SubReg) = SubRegQueue.front();
480 SubRegQueue.pop();
481
482 const CodeGenRegister::SuperRegList &Leads = SubReg->LeadingSuperRegs;
483 for (unsigned i = 0, e = Leads.size(); i != e; ++i) {
484 CodeGenRegister *Cand = const_cast<CodeGenRegister*>(Leads[i]);
485 // Already got this sub-register?
486 if (Cand == this || getSubRegIndex(Cand))
487 continue;
488 // Check if each component of Cand is already a sub-register.
489 assert(!Cand->ExplicitSubRegs.empty() &&((!Cand->ExplicitSubRegs.empty() && "Super-register has no sub-registers"
) ? static_cast<void> (0) : __assert_fail ("!Cand->ExplicitSubRegs.empty() && \"Super-register has no sub-registers\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 490, __PRETTY_FUNCTION__))
490 "Super-register has no sub-registers")((!Cand->ExplicitSubRegs.empty() && "Super-register has no sub-registers"
) ? static_cast<void> (0) : __assert_fail ("!Cand->ExplicitSubRegs.empty() && \"Super-register has no sub-registers\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 490, __PRETTY_FUNCTION__))
;
491 if (Cand->ExplicitSubRegs.size() == 1)
492 continue;
493 SmallVector<CodeGenSubRegIndex*, 8> Parts;
494 // We know that the first component is (SubRegIdx,SubReg). However we
495 // may still need to split it into smaller subregister parts.
496 assert(Cand->ExplicitSubRegs[0] == SubReg && "LeadingSuperRegs correct")((Cand->ExplicitSubRegs[0] == SubReg && "LeadingSuperRegs correct"
) ? static_cast<void> (0) : __assert_fail ("Cand->ExplicitSubRegs[0] == SubReg && \"LeadingSuperRegs correct\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 496, __PRETTY_FUNCTION__))
;
497 assert(getSubRegIndex(SubReg) == SubRegIdx && "LeadingSuperRegs correct")((getSubRegIndex(SubReg) == SubRegIdx && "LeadingSuperRegs correct"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndex(SubReg) == SubRegIdx && \"LeadingSuperRegs correct\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 497, __PRETTY_FUNCTION__))
;
498 for (CodeGenRegister *SubReg : Cand->ExplicitSubRegs) {
499 if (CodeGenSubRegIndex *SubRegIdx = getSubRegIndex(SubReg)) {
500 if (SubRegIdx->ConcatenationOf.empty()) {
501 Parts.push_back(SubRegIdx);
502 } else
503 for (CodeGenSubRegIndex *SubIdx : SubRegIdx->ConcatenationOf)
504 Parts.push_back(SubIdx);
505 } else {
506 // Sub-register doesn't exist.
507 Parts.clear();
508 break;
509 }
510 }
511 // There is nothing to do if some Cand sub-register is not part of this
512 // register.
513 if (Parts.empty())
514 continue;
515
516 // Each part of Cand is a sub-register of this. Make the full Cand also
517 // a sub-register with a concatenated sub-register index.
518 CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts);
519 std::pair<CodeGenSubRegIndex*,CodeGenRegister*> NewSubReg =
520 std::make_pair(Concat, Cand);
521
522 if (!SubRegs.insert(NewSubReg).second)
523 continue;
524
525 // We inserted a new subregister.
526 NewSubRegs.push_back(NewSubReg);
527 SubRegQueue.push(NewSubReg);
528 SubReg2Idx.insert(std::make_pair(Cand, Concat));
529 }
530 }
531
532 // Create sub-register index composition maps for the synthesized indices.
533 for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
534 CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
535 CodeGenRegister *NewSubReg = NewSubRegs[i].second;
536 for (SubRegMap::const_iterator SI = NewSubReg->SubRegs.begin(),
537 SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) {
538 CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second);
539 if (!SubIdx)
540 PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
541 SI->second->getName() + " in " + getName());
542 NewIdx->addComposite(SI->first, SubIdx);
543 }
544 }
545}
546
547void CodeGenRegister::computeSuperRegs(CodeGenRegBank &RegBank) {
548 // Only visit each register once.
549 if (SuperRegsComplete)
550 return;
551 SuperRegsComplete = true;
552
553 // Make sure all sub-registers have been visited first, so the super-reg
554 // lists will be topologically ordered.
555 for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
556 I != E; ++I)
557 I->second->computeSuperRegs(RegBank);
558
559 // Now add this as a super-register on all sub-registers.
560 // Also compute the TopoSigId in post-order.
561 TopoSigId Id;
562 for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
563 I != E; ++I) {
564 // Topological signature computed from SubIdx, TopoId(SubReg).
565 // Loops and idempotent indices have TopoSig = ~0u.
566 Id.push_back(I->first->EnumValue);
567 Id.push_back(I->second->TopoSig);
568
569 // Don't add duplicate entries.
570 if (!I->second->SuperRegs.empty() && I->second->SuperRegs.back() == this)
571 continue;
572 I->second->SuperRegs.push_back(this);
573 }
574 TopoSig = RegBank.getTopoSig(Id);
575}
576
577void
578CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
579 CodeGenRegBank &RegBank) const {
580 assert(SubRegsComplete && "Must precompute sub-registers")((SubRegsComplete && "Must precompute sub-registers")
? static_cast<void> (0) : __assert_fail ("SubRegsComplete && \"Must precompute sub-registers\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 580, __PRETTY_FUNCTION__))
;
581 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
582 CodeGenRegister *SR = ExplicitSubRegs[i];
583 if (OSet.insert(SR))
584 SR->addSubRegsPreOrder(OSet, RegBank);
585 }
586 // Add any secondary sub-registers that weren't part of the explicit tree.
587 for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
588 I != E; ++I)
589 OSet.insert(I->second);
590}
591
592// Get the sum of this register's unit weights.
593unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
594 unsigned Weight = 0;
595 for (RegUnitList::iterator I = RegUnits.begin(), E = RegUnits.end();
596 I != E; ++I) {
597 Weight += RegBank.getRegUnit(*I).Weight;
598 }
599 return Weight;
600}
601
602//===----------------------------------------------------------------------===//
603// RegisterTuples
604//===----------------------------------------------------------------------===//
605
606// A RegisterTuples def is used to generate pseudo-registers from lists of
607// sub-registers. We provide a SetTheory expander class that returns the new
608// registers.
609namespace {
610
611struct TupleExpander : SetTheory::Expander {
612 // Reference to SynthDefs in the containing CodeGenRegBank, to keep track of
613 // the synthesized definitions for their lifetime.
614 std::vector<std::unique_ptr<Record>> &SynthDefs;
615
616 TupleExpander(std::vector<std::unique_ptr<Record>> &SynthDefs)
617 : SynthDefs(SynthDefs) {}
618
619 void expand(SetTheory &ST, Record *Def, SetTheory::RecSet &Elts) override {
620 std::vector<Record*> Indices = Def->getValueAsListOfDefs("SubRegIndices");
621 unsigned Dim = Indices.size();
622 ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
623 if (Dim != SubRegs->size())
624 PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
625 if (Dim < 2)
626 PrintFatalError(Def->getLoc(),
627 "Tuples must have at least 2 sub-registers");
628
629 // Evaluate the sub-register lists to be zipped.
630 unsigned Length = ~0u;
631 SmallVector<SetTheory::RecSet, 4> Lists(Dim);
632 for (unsigned i = 0; i != Dim; ++i) {
633 ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
634 Length = std::min(Length, unsigned(Lists[i].size()));
635 }
636
637 if (Length == 0)
638 return;
639
640 // Precompute some types.
641 Record *RegisterCl = Def->getRecords().getClass("Register");
642 RecTy *RegisterRecTy = RecordRecTy::get(RegisterCl);
643 StringInit *BlankName = StringInit::get("");
644
645 // Zip them up.
646 for (unsigned n = 0; n != Length; ++n) {
647 std::string Name;
648 Record *Proto = Lists[0][n];
649 std::vector<Init*> Tuple;
650 unsigned CostPerUse = 0;
651 for (unsigned i = 0; i != Dim; ++i) {
652 Record *Reg = Lists[i][n];
653 if (i) Name += '_';
654 Name += Reg->getName();
655 Tuple.push_back(DefInit::get(Reg));
656 CostPerUse = std::max(CostPerUse,
657 unsigned(Reg->getValueAsInt("CostPerUse")));
658 }
659
660 // Create a new Record representing the synthesized register. This record
661 // is only for consumption by CodeGenRegister, it is not added to the
662 // RecordKeeper.
663 SynthDefs.emplace_back(
664 llvm::make_unique<Record>(Name, Def->getLoc(), Def->getRecords()));
665 Record *NewReg = SynthDefs.back().get();
666 Elts.insert(NewReg);
667
668 // Copy Proto super-classes.
669 ArrayRef<std::pair<Record *, SMRange>> Supers = Proto->getSuperClasses();
670 for (const auto &SuperPair : Supers)
671 NewReg->addSuperClass(SuperPair.first, SuperPair.second);
672
673 // Copy Proto fields.
674 for (unsigned i = 0, e = Proto->getValues().size(); i != e; ++i) {
675 RecordVal RV = Proto->getValues()[i];
676
677 // Skip existing fields, like NAME.
678 if (NewReg->getValue(RV.getNameInit()))
679 continue;
680
681 StringRef Field = RV.getName();
682
683 // Replace the sub-register list with Tuple.
684 if (Field == "SubRegs")
685 RV.setValue(ListInit::get(Tuple, RegisterRecTy));
686
687 // Provide a blank AsmName. MC hacks are required anyway.
688 if (Field == "AsmName")
689 RV.setValue(BlankName);
690
691 // CostPerUse is aggregated from all Tuple members.
692 if (Field == "CostPerUse")
693 RV.setValue(IntInit::get(CostPerUse));
694
695 // Composite registers are always covered by sub-registers.
696 if (Field == "CoveredBySubRegs")
697 RV.setValue(BitInit::get(true));
698
699 // Copy fields from the RegisterTuples def.
700 if (Field == "SubRegIndices" ||
701 Field == "CompositeIndices") {
702 NewReg->addValue(*Def->getValue(Field));
703 continue;
704 }
705
706 // Some fields get their default uninitialized value.
707 if (Field == "DwarfNumbers" ||
708 Field == "DwarfAlias" ||
709 Field == "Aliases") {
710 if (const RecordVal *DefRV = RegisterCl->getValue(Field))
711 NewReg->addValue(*DefRV);
712 continue;
713 }
714
715 // Everything else is copied from Proto.
716 NewReg->addValue(RV);
717 }
718 }
719 }
720};
721
722} // end anonymous namespace
723
724//===----------------------------------------------------------------------===//
725// CodeGenRegisterClass
726//===----------------------------------------------------------------------===//
727
728static void sortAndUniqueRegisters(CodeGenRegister::Vec &M) {
729 llvm::sort(M, deref<llvm::less>());
730 M.erase(std::unique(M.begin(), M.end(), deref<llvm::equal>()), M.end());
731}
732
733CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
734 : TheDef(R),
735 Name(R->getName()),
736 TopoSigs(RegBank.getNumTopoSigs()),
737 EnumValue(-1) {
738
739 std::vector<Record*> TypeList = R->getValueAsListOfDefs("RegTypes");
740 for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
741 Record *Type = TypeList[i];
742 if (!Type->isSubClassOf("ValueType"))
743 PrintFatalError("RegTypes list member '" + Type->getName() +
744 "' does not derive from the ValueType class!");
745 VTs.push_back(getValueTypeByHwMode(Type, RegBank.getHwModes()));
746 }
747 assert(!VTs.empty() && "RegisterClass must contain at least one ValueType!")((!VTs.empty() && "RegisterClass must contain at least one ValueType!"
) ? static_cast<void> (0) : __assert_fail ("!VTs.empty() && \"RegisterClass must contain at least one ValueType!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 747, __PRETTY_FUNCTION__))
;
748
749 // Allocation order 0 is the full set. AltOrders provides others.
750 const SetTheory::RecVec *Elements = RegBank.getSets().expand(R);
751 ListInit *AltOrders = R->getValueAsListInit("AltOrders");
752 Orders.resize(1 + AltOrders->size());
753
754 // Default allocation order always contains all registers.
755 Artificial = true;
756 for (unsigned i = 0, e = Elements->size(); i != e; ++i) {
757 Orders[0].push_back((*Elements)[i]);
758 const CodeGenRegister *Reg = RegBank.getReg((*Elements)[i]);
759 Members.push_back(Reg);
760 Artificial &= Reg->Artificial;
761 TopoSigs.set(Reg->getTopoSig());
762 }
763 sortAndUniqueRegisters(Members);
764
765 // Alternative allocation orders may be subsets.
766 SetTheory::RecSet Order;
767 for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
768 RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
769 Orders[1 + i].append(Order.begin(), Order.end());
770 // Verify that all altorder members are regclass members.
771 while (!Order.empty()) {
772 CodeGenRegister *Reg = RegBank.getReg(Order.back());
773 Order.pop_back();
774 if (!contains(Reg))
775 PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
776 " is not a class member");
777 }
778 }
779
780 Namespace = R->getValueAsString("Namespace");
781
782 if (const RecordVal *RV = R->getValue("RegInfos"))
783 if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
784 RSI = RegSizeInfoByHwMode(DI->getDef(), RegBank.getHwModes());
785 unsigned Size = R->getValueAsInt("Size");
786 assert((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&(((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&
"Impossible to determine register size") ? static_cast<void
> (0) : __assert_fail ("(RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) && \"Impossible to determine register size\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 787, __PRETTY_FUNCTION__))
787 "Impossible to determine register size")(((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&
"Impossible to determine register size") ? static_cast<void
> (0) : __assert_fail ("(RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) && \"Impossible to determine register size\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 787, __PRETTY_FUNCTION__))
;
788 if (!RSI.hasDefault()) {
789 RegSizeInfo RI;
790 RI.RegSize = RI.SpillSize = Size ? Size
791 : VTs[0].getSimple().getSizeInBits();
792 RI.SpillAlignment = R->getValueAsInt("Alignment");
793 RSI.Map.insert({DefaultMode, RI});
794 }
795
796 CopyCost = R->getValueAsInt("CopyCost");
797 Allocatable = R->getValueAsBit("isAllocatable");
798 AltOrderSelect = R->getValueAsString("AltOrderSelect");
799 int AllocationPriority = R->getValueAsInt("AllocationPriority");
800 if (AllocationPriority < 0 || AllocationPriority > 63)
801 PrintFatalError(R->getLoc(), "AllocationPriority out of range [0,63]");
802 this->AllocationPriority = AllocationPriority;
803}
804
805// Create an inferred register class that was missing from the .td files.
806// Most properties will be inherited from the closest super-class after the
807// class structure has been computed.
808CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
809 StringRef Name, Key Props)
810 : Members(*Props.Members),
811 TheDef(nullptr),
812 Name(Name),
813 TopoSigs(RegBank.getNumTopoSigs()),
814 EnumValue(-1),
815 RSI(Props.RSI),
816 CopyCost(0),
817 Allocatable(true),
818 AllocationPriority(0) {
819 Artificial = true;
820 for (const auto R : Members) {
821 TopoSigs.set(R->getTopoSig());
822 Artificial &= R->Artificial;
823 }
824}
825
826// Compute inherited propertied for a synthesized register class.
827void CodeGenRegisterClass::inheritProperties(CodeGenRegBank &RegBank) {
828 assert(!getDef() && "Only synthesized classes can inherit properties")((!getDef() && "Only synthesized classes can inherit properties"
) ? static_cast<void> (0) : __assert_fail ("!getDef() && \"Only synthesized classes can inherit properties\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 828, __PRETTY_FUNCTION__))
;
829 assert(!SuperClasses.empty() && "Synthesized class without super class")((!SuperClasses.empty() && "Synthesized class without super class"
) ? static_cast<void> (0) : __assert_fail ("!SuperClasses.empty() && \"Synthesized class without super class\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 829, __PRETTY_FUNCTION__))
;
830
831 // The last super-class is the smallest one.
832 CodeGenRegisterClass &Super = *SuperClasses.back();
833
834 // Most properties are copied directly.
835 // Exceptions are members, size, and alignment
836 Namespace = Super.Namespace;
837 VTs = Super.VTs;
838 CopyCost = Super.CopyCost;
839 Allocatable = Super.Allocatable;
840 AltOrderSelect = Super.AltOrderSelect;
841 AllocationPriority = Super.AllocationPriority;
842
843 // Copy all allocation orders, filter out foreign registers from the larger
844 // super-class.
845 Orders.resize(Super.Orders.size());
846 for (unsigned i = 0, ie = Super.Orders.size(); i != ie; ++i)
847 for (unsigned j = 0, je = Super.Orders[i].size(); j != je; ++j)
848 if (contains(RegBank.getReg(Super.Orders[i][j])))
849 Orders[i].push_back(Super.Orders[i][j]);
850}
851
852bool CodeGenRegisterClass::contains(const CodeGenRegister *Reg) const {
853 return std::binary_search(Members.begin(), Members.end(), Reg,
854 deref<llvm::less>());
855}
856
857namespace llvm {
858
859 raw_ostream &operator<<(raw_ostream &OS, const CodeGenRegisterClass::Key &K) {
860 OS << "{ " << K.RSI;
861 for (const auto R : *K.Members)
862 OS << ", " << R->getName();
863 return OS << " }";
864 }
865
866} // end namespace llvm
867
868// This is a simple lexicographical order that can be used to search for sets.
869// It is not the same as the topological order provided by TopoOrderRC.
870bool CodeGenRegisterClass::Key::
871operator<(const CodeGenRegisterClass::Key &B) const {
872 assert(Members && B.Members)((Members && B.Members) ? static_cast<void> (0)
: __assert_fail ("Members && B.Members", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 872, __PRETTY_FUNCTION__))
;
873 return std::tie(*Members, RSI) < std::tie(*B.Members, B.RSI);
874}
875
876// Returns true if RC is a strict subclass.
877// RC is a sub-class of this class if it is a valid replacement for any
878// instruction operand where a register of this classis required. It must
879// satisfy these conditions:
880//
881// 1. All RC registers are also in this.
882// 2. The RC spill size must not be smaller than our spill size.
883// 3. RC spill alignment must be compatible with ours.
884//
885static bool testSubClass(const CodeGenRegisterClass *A,
886 const CodeGenRegisterClass *B) {
887 return A->RSI.isSubClassOf(B->RSI) &&
888 std::includes(A->getMembers().begin(), A->getMembers().end(),
889 B->getMembers().begin(), B->getMembers().end(),
890 deref<llvm::less>());
891}
892
893/// Sorting predicate for register classes. This provides a topological
894/// ordering that arranges all register classes before their sub-classes.
895///
896/// Register classes with the same registers, spill size, and alignment form a
897/// clique. They will be ordered alphabetically.
898///
899static bool TopoOrderRC(const CodeGenRegisterClass &PA,
900 const CodeGenRegisterClass &PB) {
901 auto *A = &PA;
902 auto *B = &PB;
903 if (A == B)
904 return false;
905
906 if (A->RSI < B->RSI)
907 return true;
908 if (A->RSI != B->RSI)
909 return false;
910
911 // Order by descending set size. Note that the classes' allocation order may
912 // not have been computed yet. The Members set is always vaild.
913 if (A->getMembers().size() > B->getMembers().size())
914 return true;
915 if (A->getMembers().size() < B->getMembers().size())
916 return false;
917
918 // Finally order by name as a tie breaker.
919 return StringRef(A->getName()) < B->getName();
920}
921
922std::string CodeGenRegisterClass::getQualifiedName() const {
923 if (Namespace.empty())
924 return getName();
925 else
926 return (Namespace + "::" + getName()).str();
927}
928
929// Compute sub-classes of all register classes.
930// Assume the classes are ordered topologically.
931void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
932 auto &RegClasses = RegBank.getRegClasses();
933
934 // Visit backwards so sub-classes are seen first.
935 for (auto I = RegClasses.rbegin(), E = RegClasses.rend(); I != E; ++I) {
936 CodeGenRegisterClass &RC = *I;
937 RC.SubClasses.resize(RegClasses.size());
938 RC.SubClasses.set(RC.EnumValue);
939 if (RC.Artificial)
940 continue;
941
942 // Normally, all subclasses have IDs >= rci, unless RC is part of a clique.
943 for (auto I2 = I.base(), E2 = RegClasses.end(); I2 != E2; ++I2) {
944 CodeGenRegisterClass &SubRC = *I2;
945 if (RC.SubClasses.test(SubRC.EnumValue))
946 continue;
947 if (!testSubClass(&RC, &SubRC))
948 continue;
949 // SubRC is a sub-class. Grap all its sub-classes so we won't have to
950 // check them again.
951 RC.SubClasses |= SubRC.SubClasses;
952 }
953
954 // Sweep up missed clique members. They will be immediately preceding RC.
955 for (auto I2 = std::next(I); I2 != E && testSubClass(&RC, &*I2); ++I2)
956 RC.SubClasses.set(I2->EnumValue);
957 }
958
959 // Compute the SuperClasses lists from the SubClasses vectors.
960 for (auto &RC : RegClasses) {
961 const BitVector &SC = RC.getSubClasses();
962 auto I = RegClasses.begin();
963 for (int s = 0, next_s = SC.find_first(); next_s != -1;
964 next_s = SC.find_next(s)) {
965 std::advance(I, next_s - s);
966 s = next_s;
967 if (&*I == &RC)
968 continue;
969 I->SuperClasses.push_back(&RC);
970 }
971 }
972
973 // With the class hierarchy in place, let synthesized register classes inherit
974 // properties from their closest super-class. The iteration order here can
975 // propagate properties down multiple levels.
976 for (auto &RC : RegClasses)
977 if (!RC.getDef())
978 RC.inheritProperties(RegBank);
979}
980
981Optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
982CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
983 CodeGenRegBank &RegBank, const CodeGenSubRegIndex *SubIdx) const {
984 auto SizeOrder = [](const CodeGenRegisterClass *A,
985 const CodeGenRegisterClass *B) {
986 return A->getMembers().size() > B->getMembers().size();
987 };
988
989 auto &RegClasses = RegBank.getRegClasses();
990
991 // Find all the subclasses of this one that fully support the sub-register
992 // index and order them by size. BiggestSuperRC should always be first.
993 CodeGenRegisterClass *BiggestSuperRegRC = getSubClassWithSubReg(SubIdx);
994 if (!BiggestSuperRegRC)
995 return None;
996 BitVector SuperRegRCsBV = BiggestSuperRegRC->getSubClasses();
997 std::vector<CodeGenRegisterClass *> SuperRegRCs;
998 for (auto &RC : RegClasses)
999 if (SuperRegRCsBV[RC.EnumValue])
1000 SuperRegRCs.emplace_back(&RC);
1001 llvm::sort(SuperRegRCs, SizeOrder);
1002 assert(SuperRegRCs.front() == BiggestSuperRegRC && "Biggest class wasn't first")((SuperRegRCs.front() == BiggestSuperRegRC && "Biggest class wasn't first"
) ? static_cast<void> (0) : __assert_fail ("SuperRegRCs.front() == BiggestSuperRegRC && \"Biggest class wasn't first\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1002, __PRETTY_FUNCTION__))
;
1003
1004 // Find all the subreg classes and order them by size too.
1005 std::vector<std::pair<CodeGenRegisterClass *, BitVector>> SuperRegClasses;
1006 for (auto &RC: RegClasses) {
1007 BitVector SuperRegClassesBV(RegClasses.size());
1008 RC.getSuperRegClasses(SubIdx, SuperRegClassesBV);
1009 if (SuperRegClassesBV.any())
1010 SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
1011 }
1012 llvm::sort(SuperRegClasses,
1013 [&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
1014 const std::pair<CodeGenRegisterClass *, BitVector> &B) {
1015 return SizeOrder(A.first, B.first);
1016 });
1017
1018 // Find the biggest subclass and subreg class such that R:subidx is in the
1019 // subreg class for all R in subclass.
1020 //
1021 // For example:
1022 // All registers in X86's GR64 have a sub_32bit subregister but no class
1023 // exists that contains all the 32-bit subregisters because GR64 contains RIP
1024 // but GR32 does not contain EIP. Instead, we constrain SuperRegRC to
1025 // GR32_with_sub_8bit (which is identical to GR32_with_sub_32bit) and then,
1026 // having excluded RIP, we are able to find a SubRegRC (GR32).
1027 CodeGenRegisterClass *ChosenSuperRegClass = nullptr;
1028 CodeGenRegisterClass *SubRegRC = nullptr;
1029 for (auto *SuperRegRC : SuperRegRCs) {
1030 for (const auto &SuperRegClassPair : SuperRegClasses) {
1031 const BitVector &SuperRegClassBV = SuperRegClassPair.second;
1032 if (SuperRegClassBV[SuperRegRC->EnumValue]) {
1033 SubRegRC = SuperRegClassPair.first;
1034 ChosenSuperRegClass = SuperRegRC;
1035
1036 // If SubRegRC is bigger than SuperRegRC then there are members of
1037 // SubRegRC that don't have super registers via SubIdx. Keep looking to
1038 // find a better fit and fall back on this one if there isn't one.
1039 //
1040 // This is intended to prevent X86 from making odd choices such as
1041 // picking LOW32_ADDR_ACCESS_RBP instead of GR32 in the example above.
1042 // LOW32_ADDR_ACCESS_RBP is a valid choice but contains registers that
1043 // aren't subregisters of SuperRegRC whereas GR32 has a direct 1:1
1044 // mapping.
1045 if (SuperRegRC->getMembers().size() >= SubRegRC->getMembers().size())
1046 return std::make_pair(ChosenSuperRegClass, SubRegRC);
1047 }
1048 }
1049
1050 // If we found a fit but it wasn't quite ideal because SubRegRC had excess
1051 // registers, then we're done.
1052 if (ChosenSuperRegClass)
1053 return std::make_pair(ChosenSuperRegClass, SubRegRC);
1054 }
1055
1056 return None;
1057}
1058
1059void CodeGenRegisterClass::getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
1060 BitVector &Out) const {
1061 auto FindI = SuperRegClasses.find(SubIdx);
1062 if (FindI == SuperRegClasses.end())
1063 return;
1064 for (CodeGenRegisterClass *RC : FindI->second)
1065 Out.set(RC->EnumValue);
1066}
1067
1068// Populate a unique sorted list of units from a register set.
1069void CodeGenRegisterClass::buildRegUnitSet(const CodeGenRegBank &RegBank,
1070 std::vector<unsigned> &RegUnits) const {
1071 std::vector<unsigned> TmpUnits;
1072 for (RegUnitIterator UnitI(Members); UnitI.isValid(); ++UnitI) {
1073 const RegUnit &RU = RegBank.getRegUnit(*UnitI);
1074 if (!RU.Artificial)
1075 TmpUnits.push_back(*UnitI);
1076 }
1077 llvm::sort(TmpUnits);
1078 std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
1079 std::back_inserter(RegUnits));
1080}
1081
1082//===----------------------------------------------------------------------===//
1083// CodeGenRegBank
1084//===----------------------------------------------------------------------===//
1085
1086CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
1087 const CodeGenHwModes &Modes) : CGH(Modes) {
1088 // Configure register Sets to understand register classes and tuples.
1089 Sets.addFieldExpander("RegisterClass", "MemberList");
1090 Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
1091 Sets.addExpander("RegisterTuples",
1092 llvm::make_unique<TupleExpander>(SynthDefs));
1093
1094 // Read in the user-defined (named) sub-register indices.
1095 // More indices will be synthesized later.
1096 std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
1097 llvm::sort(SRIs, LessRecord());
1098 for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
1099 getSubRegIdx(SRIs[i]);
1100 // Build composite maps from ComposedOf fields.
1101 for (auto &Idx : SubRegIndices)
1102 Idx.updateComponents(*this);
1103
1104 // Read in the register definitions.
1105 std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
1106 llvm::sort(Regs, LessRecordRegister());
1107 // Assign the enumeration values.
1108 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1109 getReg(Regs[i]);
1110
1111 // Expand tuples and number the new registers.
1112 std::vector<Record*> Tups =
1113 Records.getAllDerivedDefinitions("RegisterTuples");
1114
1115 for (Record *R : Tups) {
1116 std::vector<Record *> TupRegs = *Sets.expand(R);
1117 llvm::sort(TupRegs, LessRecordRegister());
1118 for (Record *RC : TupRegs)
1119 getReg(RC);
1120 }
1121
1122 // Now all the registers are known. Build the object graph of explicit
1123 // register-register references.
1124 for (auto &Reg : Registers)
1125 Reg.buildObjectGraph(*this);
1126
1127 // Compute register name map.
1128 for (auto &Reg : Registers)
1129 // FIXME: This could just be RegistersByName[name] = register, except that
1130 // causes some failures in MIPS - perhaps they have duplicate register name
1131 // entries? (or maybe there's a reason for it - I don't know much about this
1132 // code, just drive-by refactoring)
1133 RegistersByName.insert(
1134 std::make_pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
1135
1136 // Precompute all sub-register maps.
1137 // This will create Composite entries for all inferred sub-register indices.
1138 for (auto &Reg : Registers)
1139 Reg.computeSubRegs(*this);
1140
1141 // Compute transitive closure of subregister index ConcatenationOf vectors
1142 // and initialize ConcatIdx map.
1143 for (CodeGenSubRegIndex &SRI : SubRegIndices) {
1144 SRI.computeConcatTransitiveClosure();
1145 if (!SRI.ConcatenationOf.empty())
1146 ConcatIdx.insert(std::make_pair(
1147 SmallVector<CodeGenSubRegIndex*,8>(SRI.ConcatenationOf.begin(),
1148 SRI.ConcatenationOf.end()), &SRI));
1149 }
1150
1151 // Infer even more sub-registers by combining leading super-registers.
1152 for (auto &Reg : Registers)
1153 if (Reg.CoveredBySubRegs)
1154 Reg.computeSecondarySubRegs(*this);
1155
1156 // After the sub-register graph is complete, compute the topologically
1157 // ordered SuperRegs list.
1158 for (auto &Reg : Registers)
1159 Reg.computeSuperRegs(*this);
1160
1161 // For each pair of Reg:SR, if both are non-artificial, mark the
1162 // corresponding sub-register index as non-artificial.
1163 for (auto &Reg : Registers) {
1164 if (Reg.Artificial)
1165 continue;
1166 for (auto P : Reg.getSubRegs()) {
1167 const CodeGenRegister *SR = P.second;
1168 if (!SR->Artificial)
1169 P.first->Artificial = false;
1170 }
1171 }
1172
1173 // Native register units are associated with a leaf register. They've all been
1174 // discovered now.
1175 NumNativeRegUnits = RegUnits.size();
1176
1177 // Read in register class definitions.
1178 std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
1179 if (RCs.empty())
1180 PrintFatalError("No 'RegisterClass' subclasses defined!");
1181
1182 // Allocate user-defined register classes.
1183 for (auto *R : RCs) {
1184 RegClasses.emplace_back(*this, R);
1185 CodeGenRegisterClass &RC = RegClasses.back();
1186 if (!RC.Artificial)
1187 addToMaps(&RC);
1188 }
1189
1190 // Infer missing classes to create a full algebra.
1191 computeInferredRegisterClasses();
1192
1193 // Order register classes topologically and assign enum values.
1194 RegClasses.sort(TopoOrderRC);
1195 unsigned i = 0;
1196 for (auto &RC : RegClasses)
1197 RC.EnumValue = i++;
1198 CodeGenRegisterClass::computeSubClasses(*this);
1199}
1200
1201// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
1202CodeGenSubRegIndex*
1203CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
1204 SubRegIndices.emplace_back(Name, Namespace, SubRegIndices.size() + 1);
1205 return &SubRegIndices.back();
1206}
1207
1208CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
1209 CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
1210 if (Idx)
1211 return Idx;
1212 SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1);
1213 Idx = &SubRegIndices.back();
1214 return Idx;
1215}
1216
1217CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
1218 CodeGenRegister *&Reg = Def2Reg[Def];
1219 if (Reg)
1220 return Reg;
1221 Registers.emplace_back(Def, Registers.size() + 1);
1222 Reg = &Registers.back();
1223 return Reg;
1224}
1225
1226void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
1227 if (Record *Def = RC->getDef())
1228 Def2RC.insert(std::make_pair(Def, RC));
1229
1230 // Duplicate classes are rejected by insert().
1231 // That's OK, we only care about the properties handled by CGRC::Key.
1232 CodeGenRegisterClass::Key K(*RC);
1233 Key2RC.insert(std::make_pair(K, RC));
1234}
1235
1236// Create a synthetic sub-class if it is missing.
1237CodeGenRegisterClass*
1238CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
1239 const CodeGenRegister::Vec *Members,
1240 StringRef Name) {
1241 // Synthetic sub-class has the same size and alignment as RC.
1242 CodeGenRegisterClass::Key K(Members, RC->RSI);
1243 RCKeyMap::const_iterator FoundI = Key2RC.find(K);
1244 if (FoundI != Key2RC.end())
1245 return FoundI->second;
1246
1247 // Sub-class doesn't exist, create a new one.
1248 RegClasses.emplace_back(*this, Name, K);
1249 addToMaps(&RegClasses.back());
1250 return &RegClasses.back();
1251}
1252
1253CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) {
1254 if (CodeGenRegisterClass *RC = Def2RC[Def])
1255 return RC;
1256
1257 PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
1258}
1259
1260CodeGenSubRegIndex*
1261CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
1262 CodeGenSubRegIndex *B) {
1263 // Look for an existing entry.
1264 CodeGenSubRegIndex *Comp = A->compose(B);
1265 if (Comp)
1266 return Comp;
1267
1268 // None exists, synthesize one.
1269 std::string Name = A->getName() + "_then_" + B->getName();
1270 Comp = createSubRegIndex(Name, A->getNamespace());
1271 A->addComposite(B, Comp);
1272 return Comp;
1273}
1274
1275CodeGenSubRegIndex *CodeGenRegBank::
1276getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
1277 assert(Parts.size() > 1 && "Need two parts to concatenate")((Parts.size() > 1 && "Need two parts to concatenate"
) ? static_cast<void> (0) : __assert_fail ("Parts.size() > 1 && \"Need two parts to concatenate\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1277, __PRETTY_FUNCTION__))
;
1278#ifndef NDEBUG
1279 for (CodeGenSubRegIndex *Idx : Parts) {
1280 assert(Idx->ConcatenationOf.empty() && "No transitive closure?")((Idx->ConcatenationOf.empty() && "No transitive closure?"
) ? static_cast<void> (0) : __assert_fail ("Idx->ConcatenationOf.empty() && \"No transitive closure?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1280, __PRETTY_FUNCTION__))
;
1281 }
1282#endif
1283
1284 // Look for an existing entry.
1285 CodeGenSubRegIndex *&Idx = ConcatIdx[Parts];
1286 if (Idx)
1287 return Idx;
1288
1289 // None exists, synthesize one.
1290 std::string Name = Parts.front()->getName();
1291 // Determine whether all parts are contiguous.
1292 bool isContinuous = true;
1293 unsigned Size = Parts.front()->Size;
1294 unsigned LastOffset = Parts.front()->Offset;
1295 unsigned LastSize = Parts.front()->Size;
1296 for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
1297 Name += '_';
1298 Name += Parts[i]->getName();
1299 Size += Parts[i]->Size;
1300 if (Parts[i]->Offset != (LastOffset + LastSize))
1301 isContinuous = false;
1302 LastOffset = Parts[i]->Offset;
1303 LastSize = Parts[i]->Size;
1304 }
1305 Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
1306 Idx->Size = Size;
1307 Idx->Offset = isContinuous ? Parts.front()->Offset : -1;
1308 Idx->ConcatenationOf.assign(Parts.begin(), Parts.end());
1309 return Idx;
1310}
1311
1312void CodeGenRegBank::computeComposites() {
1313 using RegMap = std::map<const CodeGenRegister*, const CodeGenRegister*>;
1314
1315 // Subreg -> { Reg->Reg }, where the right-hand side is the mapping from
1316 // register to (sub)register associated with the action of the left-hand
1317 // side subregister.
1318 std::map<const CodeGenSubRegIndex*, RegMap> SubRegAction;
1319 for (const CodeGenRegister &R : Registers) {
1320 const CodeGenRegister::SubRegMap &SM = R.getSubRegs();
1321 for (std::pair<const CodeGenSubRegIndex*, const CodeGenRegister*> P : SM)
1322 SubRegAction[P.first].insert({&R, P.second});
1323 }
1324
1325 // Calculate the composition of two subregisters as compositions of their
1326 // associated actions.
1327 auto compose = [&SubRegAction] (const CodeGenSubRegIndex *Sub1,
1328 const CodeGenSubRegIndex *Sub2) {
1329 RegMap C;
1330 const RegMap &Img1 = SubRegAction.at(Sub1);
1331 const RegMap &Img2 = SubRegAction.at(Sub2);
1332 for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Img1) {
1333 auto F = Img2.find(P.second);
1334 if (F != Img2.end())
1335 C.insert({P.first, F->second});
1336 }
1337 return C;
1338 };
1339
1340 // Check if the two maps agree on the intersection of their domains.
1341 auto agree = [] (const RegMap &Map1, const RegMap &Map2) {
1342 // Technically speaking, an empty map agrees with any other map, but
1343 // this could flag false positives. We're interested in non-vacuous
1344 // agreements.
1345 if (Map1.empty() || Map2.empty())
1346 return false;
1347 for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Map1) {
1348 auto F = Map2.find(P.first);
1349 if (F == Map2.end() || P.second != F->second)
1350 return false;
1351 }
1352 return true;
1353 };
1354
1355 using CompositePair = std::pair<const CodeGenSubRegIndex*,
1356 const CodeGenSubRegIndex*>;
1357 SmallSet<CompositePair,4> UserDefined;
1358 for (const CodeGenSubRegIndex &Idx : SubRegIndices)
1359 for (auto P : Idx.getComposites())
1360 UserDefined.insert(std::make_pair(&Idx, P.first));
1361
1362 // Keep track of TopoSigs visited. We only need to visit each TopoSig once,
1363 // and many registers will share TopoSigs on regular architectures.
1364 BitVector TopoSigs(getNumTopoSigs());
1365
1366 for (const auto &Reg1 : Registers) {
1367 // Skip identical subreg structures already processed.
1368 if (TopoSigs.test(Reg1.getTopoSig()))
1369 continue;
1370 TopoSigs.set(Reg1.getTopoSig());
1371
1372 const CodeGenRegister::SubRegMap &SRM1 = Reg1.getSubRegs();
1373 for (CodeGenRegister::SubRegMap::const_iterator i1 = SRM1.begin(),
1374 e1 = SRM1.end(); i1 != e1; ++i1) {
1375 CodeGenSubRegIndex *Idx1 = i1->first;
1376 CodeGenRegister *Reg2 = i1->second;
1377 // Ignore identity compositions.
1378 if (&Reg1 == Reg2)
1379 continue;
1380 const CodeGenRegister::SubRegMap &SRM2 = Reg2->getSubRegs();
1381 // Try composing Idx1 with another SubRegIndex.
1382 for (CodeGenRegister::SubRegMap::const_iterator i2 = SRM2.begin(),
1383 e2 = SRM2.end(); i2 != e2; ++i2) {
1384 CodeGenSubRegIndex *Idx2 = i2->first;
1385 CodeGenRegister *Reg3 = i2->second;
1386 // Ignore identity compositions.
1387 if (Reg2 == Reg3)
1388 continue;
1389 // OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
1390 CodeGenSubRegIndex *Idx3 = Reg1.getSubRegIndex(Reg3);
1391 assert(Idx3 && "Sub-register doesn't have an index")((Idx3 && "Sub-register doesn't have an index") ? static_cast
<void> (0) : __assert_fail ("Idx3 && \"Sub-register doesn't have an index\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1391, __PRETTY_FUNCTION__))
;
1392
1393 // Conflicting composition? Emit a warning but allow it.
1394 if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3)) {
1395 // If the composition was not user-defined, always emit a warning.
1396 if (!UserDefined.count({Idx1, Idx2}) ||
1397 agree(compose(Idx1, Idx2), SubRegAction.at(Idx3)))
1398 PrintWarning(Twine("SubRegIndex ") + Idx1->getQualifiedName() +
1399 " and " + Idx2->getQualifiedName() +
1400 " compose ambiguously as " + Prev->getQualifiedName() +
1401 " or " + Idx3->getQualifiedName());
1402 }
1403 }
1404 }
1405 }
1406}
1407
1408// Compute lane masks. This is similar to register units, but at the
1409// sub-register index level. Each bit in the lane mask is like a register unit
1410// class, and two lane masks will have a bit in common if two sub-register
1411// indices overlap in some register.
1412//
1413// Conservatively share a lane mask bit if two sub-register indices overlap in
1414// some registers, but not in others. That shouldn't happen a lot.
1415void CodeGenRegBank::computeSubRegLaneMasks() {
1416 // First assign individual bits to all the leaf indices.
1417 unsigned Bit = 0;
1418 // Determine mask of lanes that cover their registers.
1419 CoveringLanes = LaneBitmask::getAll();
1420 for (auto &Idx : SubRegIndices) {
1421 if (Idx.getComposites().empty()) {
1422 if (Bit > LaneBitmask::BitWidth) {
1423 PrintFatalError(
1424 Twine("Ran out of lanemask bits to represent subregister ")
1425 + Idx.getName());
1426 }
1427 Idx.LaneMask = LaneBitmask::getLane(Bit);
1428 ++Bit;
1429 } else {
1430 Idx.LaneMask = LaneBitmask::getNone();
1431 }
1432 }
1433
1434 // Compute transformation sequences for composeSubRegIndexLaneMask. The idea
1435 // here is that for each possible target subregister we look at the leafs
1436 // in the subregister graph that compose for this target and create
1437 // transformation sequences for the lanemasks. Each step in the sequence
1438 // consists of a bitmask and a bitrotate operation. As the rotation amounts
1439 // are usually the same for many subregisters we can easily combine the steps
1440 // by combining the masks.
1441 for (const auto &Idx : SubRegIndices) {
1442 const auto &Composites = Idx.getComposites();
1443 auto &LaneTransforms = Idx.CompositionLaneMaskTransform;
1444
1445 if (Composites.empty()) {
2
Assuming the condition is true
3
Taking true branch
1446 // Moving from a class with no subregisters we just had a single lane:
1447 // The subregister must be a leaf subregister and only occupies 1 bit.
1448 // Move the bit from the class without subregisters into that position.
1449 unsigned DstBit = Idx.LaneMask.getHighestLane();
4
Calling 'LaneBitmask::getHighestLane'
9
Returning from 'LaneBitmask::getHighestLane'
10
'DstBit' initialized to 4294967295
1450 assert(Idx.LaneMask == LaneBitmask::getLane(DstBit) &&((Idx.LaneMask == LaneBitmask::getLane(DstBit) && "Must be a leaf subregister"
) ? static_cast<void> (0) : __assert_fail ("Idx.LaneMask == LaneBitmask::getLane(DstBit) && \"Must be a leaf subregister\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1451, __PRETTY_FUNCTION__))
11
Passing the value 4294967295 via 1st parameter 'Lane'
12
Calling 'LaneBitmask::getLane'
1451 "Must be a leaf subregister")((Idx.LaneMask == LaneBitmask::getLane(DstBit) && "Must be a leaf subregister"
) ? static_cast<void> (0) : __assert_fail ("Idx.LaneMask == LaneBitmask::getLane(DstBit) && \"Must be a leaf subregister\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1451, __PRETTY_FUNCTION__))
;
1452 MaskRolPair MaskRol = { LaneBitmask::getLane(0), (uint8_t)DstBit };
1453 LaneTransforms.push_back(MaskRol);
1454 } else {
1455 // Go through all leaf subregisters and find the ones that compose with
1456 // Idx. These make out all possible valid bits in the lane mask we want to
1457 // transform. Looking only at the leafs ensure that only a single bit in
1458 // the mask is set.
1459 unsigned NextBit = 0;
1460 for (auto &Idx2 : SubRegIndices) {
1461 // Skip non-leaf subregisters.
1462 if (!Idx2.getComposites().empty())
1463 continue;
1464 // Replicate the behaviour from the lane mask generation loop above.
1465 unsigned SrcBit = NextBit;
1466 LaneBitmask SrcMask = LaneBitmask::getLane(SrcBit);
1467 if (NextBit < LaneBitmask::BitWidth-1)
1468 ++NextBit;
1469 assert(Idx2.LaneMask == SrcMask)((Idx2.LaneMask == SrcMask) ? static_cast<void> (0) : __assert_fail
("Idx2.LaneMask == SrcMask", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1469, __PRETTY_FUNCTION__))
;
1470
1471 // Get the composed subregister if there is any.
1472 auto C = Composites.find(&Idx2);
1473 if (C == Composites.end())
1474 continue;
1475 const CodeGenSubRegIndex *Composite = C->second;
1476 // The Composed subreg should be a leaf subreg too
1477 assert(Composite->getComposites().empty())((Composite->getComposites().empty()) ? static_cast<void
> (0) : __assert_fail ("Composite->getComposites().empty()"
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1477, __PRETTY_FUNCTION__))
;
1478
1479 // Create Mask+Rotate operation and merge with existing ops if possible.
1480 unsigned DstBit = Composite->LaneMask.getHighestLane();
1481 int Shift = DstBit - SrcBit;
1482 uint8_t RotateLeft = Shift >= 0 ? (uint8_t)Shift
1483 : LaneBitmask::BitWidth + Shift;
1484 for (auto &I : LaneTransforms) {
1485 if (I.RotateLeft == RotateLeft) {
1486 I.Mask |= SrcMask;
1487 SrcMask = LaneBitmask::getNone();
1488 }
1489 }
1490 if (SrcMask.any()) {
1491 MaskRolPair MaskRol = { SrcMask, RotateLeft };
1492 LaneTransforms.push_back(MaskRol);
1493 }
1494 }
1495 }
1496
1497 // Optimize if the transformation consists of one step only: Set mask to
1498 // 0xffffffff (including some irrelevant invalid bits) so that it should
1499 // merge with more entries later while compressing the table.
1500 if (LaneTransforms.size() == 1)
1501 LaneTransforms[0].Mask = LaneBitmask::getAll();
1502
1503 // Further compression optimization: For invalid compositions resulting
1504 // in a sequence with 0 entries we can just pick any other. Choose
1505 // Mask 0xffffffff with Rotation 0.
1506 if (LaneTransforms.size() == 0) {
1507 MaskRolPair P = { LaneBitmask::getAll(), 0 };
1508 LaneTransforms.push_back(P);
1509 }
1510 }
1511
1512 // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
1513 // by the sub-register graph? This doesn't occur in any known targets.
1514
1515 // Inherit lanes from composites.
1516 for (const auto &Idx : SubRegIndices) {
1517 LaneBitmask Mask = Idx.computeLaneMask();
1518 // If some super-registers without CoveredBySubRegs use this index, we can
1519 // no longer assume that the lanes are covering their registers.
1520 if (!Idx.AllSuperRegsCovered)
1521 CoveringLanes &= ~Mask;
1522 }
1523
1524 // Compute lane mask combinations for register classes.
1525 for (auto &RegClass : RegClasses) {
1526 LaneBitmask LaneMask;
1527 for (const auto &SubRegIndex : SubRegIndices) {
1528 if (RegClass.getSubClassWithSubReg(&SubRegIndex) == nullptr)
1529 continue;
1530 LaneMask |= SubRegIndex.LaneMask;
1531 }
1532
1533 // For classes without any subregisters set LaneMask to 1 instead of 0.
1534 // This makes it easier for client code to handle classes uniformly.
1535 if (LaneMask.none())
1536 LaneMask = LaneBitmask::getLane(0);
1537
1538 RegClass.LaneMask = LaneMask;
1539 }
1540}
1541
1542namespace {
1543
1544// UberRegSet is a helper class for computeRegUnitWeights. Each UberRegSet is
1545// the transitive closure of the union of overlapping register
1546// classes. Together, the UberRegSets form a partition of the registers. If we
1547// consider overlapping register classes to be connected, then each UberRegSet
1548// is a set of connected components.
1549//
1550// An UberRegSet will likely be a horizontal slice of register names of
1551// the same width. Nontrivial subregisters should then be in a separate
1552// UberRegSet. But this property isn't required for valid computation of
1553// register unit weights.
1554//
1555// A Weight field caches the max per-register unit weight in each UberRegSet.
1556//
1557// A set of SingularDeterminants flags single units of some register in this set
1558// for which the unit weight equals the set weight. These units should not have
1559// their weight increased.
1560struct UberRegSet {
1561 CodeGenRegister::Vec Regs;
1562 unsigned Weight = 0;
1563 CodeGenRegister::RegUnitList SingularDeterminants;
1564
1565 UberRegSet() = default;
1566};
1567
1568} // end anonymous namespace
1569
1570// Partition registers into UberRegSets, where each set is the transitive
1571// closure of the union of overlapping register classes.
1572//
1573// UberRegSets[0] is a special non-allocatable set.
1574static void computeUberSets(std::vector<UberRegSet> &UberSets,
1575 std::vector<UberRegSet*> &RegSets,
1576 CodeGenRegBank &RegBank) {
1577 const auto &Registers = RegBank.getRegisters();
1578
1579 // The Register EnumValue is one greater than its index into Registers.
1580 assert(Registers.size() == Registers.back().EnumValue &&((Registers.size() == Registers.back().EnumValue && "register enum value mismatch"
) ? static_cast<void> (0) : __assert_fail ("Registers.size() == Registers.back().EnumValue && \"register enum value mismatch\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1581, __PRETTY_FUNCTION__))
1581 "register enum value mismatch")((Registers.size() == Registers.back().EnumValue && "register enum value mismatch"
) ? static_cast<void> (0) : __assert_fail ("Registers.size() == Registers.back().EnumValue && \"register enum value mismatch\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1581, __PRETTY_FUNCTION__))
;
1582
1583 // For simplicitly make the SetID the same as EnumValue.
1584 IntEqClasses UberSetIDs(Registers.size()+1);
1585 std::set<unsigned> AllocatableRegs;
1586 for (auto &RegClass : RegBank.getRegClasses()) {
1587 if (!RegClass.Allocatable)
1588 continue;
1589
1590 const CodeGenRegister::Vec &Regs = RegClass.getMembers();
1591 if (Regs.empty())
1592 continue;
1593
1594 unsigned USetID = UberSetIDs.findLeader((*Regs.begin())->EnumValue);
1595 assert(USetID && "register number 0 is invalid")((USetID && "register number 0 is invalid") ? static_cast
<void> (0) : __assert_fail ("USetID && \"register number 0 is invalid\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1595, __PRETTY_FUNCTION__))
;
1596
1597 AllocatableRegs.insert((*Regs.begin())->EnumValue);
1598 for (auto I = std::next(Regs.begin()), E = Regs.end(); I != E; ++I) {
1599 AllocatableRegs.insert((*I)->EnumValue);
1600 UberSetIDs.join(USetID, (*I)->EnumValue);
1601 }
1602 }
1603 // Combine non-allocatable regs.
1604 for (const auto &Reg : Registers) {
1605 unsigned RegNum = Reg.EnumValue;
1606 if (AllocatableRegs.count(RegNum))
1607 continue;
1608
1609 UberSetIDs.join(0, RegNum);
1610 }
1611 UberSetIDs.compress();
1612
1613 // Make the first UberSet a special unallocatable set.
1614 unsigned ZeroID = UberSetIDs[0];
1615
1616 // Insert Registers into the UberSets formed by union-find.
1617 // Do not resize after this.
1618 UberSets.resize(UberSetIDs.getNumClasses());
1619 unsigned i = 0;
1620 for (const CodeGenRegister &Reg : Registers) {
1621 unsigned USetID = UberSetIDs[Reg.EnumValue];
1622 if (!USetID)
1623 USetID = ZeroID;
1624 else if (USetID == ZeroID)
1625 USetID = 0;
1626
1627 UberRegSet *USet = &UberSets[USetID];
1628 USet->Regs.push_back(&Reg);
1629 sortAndUniqueRegisters(USet->Regs);
1630 RegSets[i++] = USet;
1631 }
1632}
1633
1634// Recompute each UberSet weight after changing unit weights.
1635static void computeUberWeights(std::vector<UberRegSet> &UberSets,
1636 CodeGenRegBank &RegBank) {
1637 // Skip the first unallocatable set.
1638 for (std::vector<UberRegSet>::iterator I = std::next(UberSets.begin()),
1639 E = UberSets.end(); I != E; ++I) {
1640
1641 // Initialize all unit weights in this set, and remember the max units/reg.
1642 const CodeGenRegister *Reg = nullptr;
1643 unsigned MaxWeight = 0, Weight = 0;
1644 for (RegUnitIterator UnitI(I->Regs); UnitI.isValid(); ++UnitI) {
1645 if (Reg != UnitI.getReg()) {
1646 if (Weight > MaxWeight)
1647 MaxWeight = Weight;
1648 Reg = UnitI.getReg();
1649 Weight = 0;
1650 }
1651 if (!RegBank.getRegUnit(*UnitI).Artificial) {
1652 unsigned UWeight = RegBank.getRegUnit(*UnitI).Weight;
1653 if (!UWeight) {
1654 UWeight = 1;
1655 RegBank.increaseRegUnitWeight(*UnitI, UWeight);
1656 }
1657 Weight += UWeight;
1658 }
1659 }
1660 if (Weight > MaxWeight)
1661 MaxWeight = Weight;
1662 if (I->Weight != MaxWeight) {
1663 LLVM_DEBUG(dbgs() << "UberSet " << I - UberSets.begin() << " Weight "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1664 << MaxWeight;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1665 for (auto &Unitdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1666 : I->Regs) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1667 << " " << Unit->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1668 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
;
1669 // Update the set weight.
1670 I->Weight = MaxWeight;
1671 }
1672
1673 // Find singular determinants.
1674 for (const auto R : I->Regs) {
1675 if (R->getRegUnits().count() == 1 && R->getWeight(RegBank) == I->Weight) {
1676 I->SingularDeterminants |= R->getRegUnits();
1677 }
1678 }
1679 }
1680}
1681
1682// normalizeWeight is a computeRegUnitWeights helper that adjusts the weight of
1683// a register and its subregisters so that they have the same weight as their
1684// UberSet. Self-recursion processes the subregister tree in postorder so
1685// subregisters are normalized first.
1686//
1687// Side effects:
1688// - creates new adopted register units
1689// - causes superregisters to inherit adopted units
1690// - increases the weight of "singular" units
1691// - induces recomputation of UberWeights.
1692static bool normalizeWeight(CodeGenRegister *Reg,
1693 std::vector<UberRegSet> &UberSets,
1694 std::vector<UberRegSet*> &RegSets,
1695 BitVector &NormalRegs,
1696 CodeGenRegister::RegUnitList &NormalUnits,
1697 CodeGenRegBank &RegBank) {
1698 NormalRegs.resize(std::max(Reg->EnumValue + 1, NormalRegs.size()));
1699 if (NormalRegs.test(Reg->EnumValue))
1700 return false;
1701 NormalRegs.set(Reg->EnumValue);
1702
1703 bool Changed = false;
1704 const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
1705 for (CodeGenRegister::SubRegMap::const_iterator SRI = SRM.begin(),
1706 SRE = SRM.end(); SRI != SRE; ++SRI) {
1707 if (SRI->second == Reg)
1708 continue; // self-cycles happen
1709
1710 Changed |= normalizeWeight(SRI->second, UberSets, RegSets,
1711 NormalRegs, NormalUnits, RegBank);
1712 }
1713 // Postorder register normalization.
1714
1715 // Inherit register units newly adopted by subregisters.
1716 if (Reg->inheritRegUnits(RegBank))
1717 computeUberWeights(UberSets, RegBank);
1718
1719 // Check if this register is too skinny for its UberRegSet.
1720 UberRegSet *UberSet = RegSets[RegBank.getRegIndex(Reg)];
1721
1722 unsigned RegWeight = Reg->getWeight(RegBank);
1723 if (UberSet->Weight > RegWeight) {
1724 // A register unit's weight can be adjusted only if it is the singular unit
1725 // for this register, has not been used to normalize a subregister's set,
1726 // and has not already been used to singularly determine this UberRegSet.
1727 unsigned AdjustUnit = *Reg->getRegUnits().begin();
1728 if (Reg->getRegUnits().count() != 1
1729 || hasRegUnit(NormalUnits, AdjustUnit)
1730 || hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
1731 // We don't have an adjustable unit, so adopt a new one.
1732 AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
1733 Reg->adoptRegUnit(AdjustUnit);
1734 // Adopting a unit does not immediately require recomputing set weights.
1735 }
1736 else {
1737 // Adjust the existing single unit.
1738 if (!RegBank.getRegUnit(AdjustUnit).Artificial)
1739 RegBank.increaseRegUnitWeight(AdjustUnit, UberSet->Weight - RegWeight);
1740 // The unit may be shared among sets and registers within this set.
1741 computeUberWeights(UberSets, RegBank);
1742 }
1743 Changed = true;
1744 }
1745
1746 // Mark these units normalized so superregisters can't change their weights.
1747 NormalUnits |= Reg->getRegUnits();
1748
1749 return Changed;
1750}
1751
1752// Compute a weight for each register unit created during getSubRegs.
1753//
1754// The goal is that two registers in the same class will have the same weight,
1755// where each register's weight is defined as sum of its units' weights.
1756void CodeGenRegBank::computeRegUnitWeights() {
1757 std::vector<UberRegSet> UberSets;
1758 std::vector<UberRegSet*> RegSets(Registers.size());
1759 computeUberSets(UberSets, RegSets, *this);
1760 // UberSets and RegSets are now immutable.
1761
1762 computeUberWeights(UberSets, *this);
1763
1764 // Iterate over each Register, normalizing the unit weights until reaching
1765 // a fix point.
1766 unsigned NumIters = 0;
1767 for (bool Changed = true; Changed; ++NumIters) {
1768 assert(NumIters <= NumNativeRegUnits && "Runaway register unit weights")((NumIters <= NumNativeRegUnits && "Runaway register unit weights"
) ? static_cast<void> (0) : __assert_fail ("NumIters <= NumNativeRegUnits && \"Runaway register unit weights\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1768, __PRETTY_FUNCTION__))
;
1769 Changed = false;
1770 for (auto &Reg : Registers) {
1771 CodeGenRegister::RegUnitList NormalUnits;
1772 BitVector NormalRegs;
1773 Changed |= normalizeWeight(&Reg, UberSets, RegSets, NormalRegs,
1774 NormalUnits, *this);
1775 }
1776 }
1777}
1778
1779// Find a set in UniqueSets with the same elements as Set.
1780// Return an iterator into UniqueSets.
1781static std::vector<RegUnitSet>::const_iterator
1782findRegUnitSet(const std::vector<RegUnitSet> &UniqueSets,
1783 const RegUnitSet &Set) {
1784 std::vector<RegUnitSet>::const_iterator
1785 I = UniqueSets.begin(), E = UniqueSets.end();
1786 for(;I != E; ++I) {
1787 if (I->Units == Set.Units)
1788 break;
1789 }
1790 return I;
1791}
1792
1793// Return true if the RUSubSet is a subset of RUSuperSet.
1794static bool isRegUnitSubSet(const std::vector<unsigned> &RUSubSet,
1795 const std::vector<unsigned> &RUSuperSet) {
1796 return std::includes(RUSuperSet.begin(), RUSuperSet.end(),
1797 RUSubSet.begin(), RUSubSet.end());
1798}
1799
1800/// Iteratively prune unit sets. Prune subsets that are close to the superset,
1801/// but with one or two registers removed. We occasionally have registers like
1802/// APSR and PC thrown in with the general registers. We also see many
1803/// special-purpose register subsets, such as tail-call and Thumb
1804/// encodings. Generating all possible overlapping sets is combinatorial and
1805/// overkill for modeling pressure. Ideally we could fix this statically in
1806/// tablegen by (1) having the target define register classes that only include
1807/// the allocatable registers and marking other classes as non-allocatable and
1808/// (2) having a way to mark special purpose classes as "don't-care" classes for
1809/// the purpose of pressure. However, we make an attempt to handle targets that
1810/// are not nicely defined by merging nearly identical register unit sets
1811/// statically. This generates smaller tables. Then, dynamically, we adjust the
1812/// set limit by filtering the reserved registers.
1813///
1814/// Merge sets only if the units have the same weight. For example, on ARM,
1815/// Q-tuples with ssub index 0 include all S regs but also include D16+. We
1816/// should not expand the S set to include D regs.
1817void CodeGenRegBank::pruneUnitSets() {
1818 assert(RegClassUnitSets.empty() && "this invalidates RegClassUnitSets")((RegClassUnitSets.empty() && "this invalidates RegClassUnitSets"
) ? static_cast<void> (0) : __assert_fail ("RegClassUnitSets.empty() && \"this invalidates RegClassUnitSets\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1818, __PRETTY_FUNCTION__))
;
1819
1820 // Form an equivalence class of UnitSets with no significant difference.
1821 std::vector<unsigned> SuperSetIDs;
1822 for (unsigned SubIdx = 0, EndIdx = RegUnitSets.size();
1823 SubIdx != EndIdx; ++SubIdx) {
1824 const RegUnitSet &SubSet = RegUnitSets[SubIdx];
1825 unsigned SuperIdx = 0;
1826 for (; SuperIdx != EndIdx; ++SuperIdx) {
1827 if (SuperIdx == SubIdx)
1828 continue;
1829
1830 unsigned UnitWeight = RegUnits[SubSet.Units[0]].Weight;
1831 const RegUnitSet &SuperSet = RegUnitSets[SuperIdx];
1832 if (isRegUnitSubSet(SubSet.Units, SuperSet.Units)
1833 && (SubSet.Units.size() + 3 > SuperSet.Units.size())
1834 && UnitWeight == RegUnits[SuperSet.Units[0]].Weight
1835 && UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
1836 LLVM_DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdxdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << SubIdx
<< " subsumed by " << SuperIdx << "\n"; } }
while (false)
1837 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << SubIdx
<< " subsumed by " << SuperIdx << "\n"; } }
while (false)
;
1838 // We can pick any of the set names for the merged set. Go for the
1839 // shortest one to avoid picking the name of one of the classes that are
1840 // artificially created by tablegen. So "FPR128_lo" instead of
1841 // "QQQQ_with_qsub3_in_FPR128_lo".
1842 if (RegUnitSets[SubIdx].Name.size() < RegUnitSets[SuperIdx].Name.size())
1843 RegUnitSets[SuperIdx].Name = RegUnitSets[SubIdx].Name;
1844 break;
1845 }
1846 }
1847 if (SuperIdx == EndIdx)
1848 SuperSetIDs.push_back(SubIdx);
1849 }
1850 // Populate PrunedUnitSets with each equivalence class's superset.
1851 std::vector<RegUnitSet> PrunedUnitSets(SuperSetIDs.size());
1852 for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
1853 unsigned SuperIdx = SuperSetIDs[i];
1854 PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
1855 PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
1856 }
1857 RegUnitSets.swap(PrunedUnitSets);
1858}
1859
1860// Create a RegUnitSet for each RegClass that contains all units in the class
1861// including adopted units that are necessary to model register pressure. Then
1862// iteratively compute RegUnitSets such that the union of any two overlapping
1863// RegUnitSets is repreresented.
1864//
1865// RegisterInfoEmitter will map each RegClass to its RegUnitClass and any
1866// RegUnitSet that is a superset of that RegUnitClass.
1867void CodeGenRegBank::computeRegUnitSets() {
1868 assert(RegUnitSets.empty() && "dirty RegUnitSets")((RegUnitSets.empty() && "dirty RegUnitSets") ? static_cast
<void> (0) : __assert_fail ("RegUnitSets.empty() && \"dirty RegUnitSets\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1868, __PRETTY_FUNCTION__))
;
1869
1870 // Compute a unique RegUnitSet for each RegClass.
1871 auto &RegClasses = getRegClasses();
1872 for (auto &RC : RegClasses) {
1873 if (!RC.Allocatable || RC.Artificial)
1874 continue;
1875
1876 // Speculatively grow the RegUnitSets to hold the new set.
1877 RegUnitSets.resize(RegUnitSets.size() + 1);
1878 RegUnitSets.back().Name = RC.getName();
1879
1880 // Compute a sorted list of units in this class.
1881 RC.buildRegUnitSet(*this, RegUnitSets.back().Units);
1882
1883 // Find an existing RegUnitSet.
1884 std::vector<RegUnitSet>::const_iterator SetI =
1885 findRegUnitSet(RegUnitSets, RegUnitSets.back());
1886 if (SetI != std::prev(RegUnitSets.end()))
1887 RegUnitSets.pop_back();
1888 }
1889
1890 LLVM_DEBUG(dbgs() << "\nBefore pruning:\n"; for (unsigned USIdx = 0,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1891 USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1892 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1893 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1894 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1895 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1896 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1897 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
;
1898
1899 // Iteratively prune unit sets.
1900 pruneUnitSets();
1901
1902 LLVM_DEBUG(dbgs() << "\nBefore union:\n"; for (unsigned USIdx = 0,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1903 USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1904 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1905 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1906 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1907 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1908 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1909 } dbgs() << "\nUnion sets:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
;
1910
1911 // Iterate over all unit sets, including new ones added by this loop.
1912 unsigned NumRegUnitSubSets = RegUnitSets.size();
1913 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
1914 // In theory, this is combinatorial. In practice, it needs to be bounded
1915 // by a small number of sets for regpressure to be efficient.
1916 // If the assert is hit, we need to implement pruning.
1917 assert(Idx < (2*NumRegUnitSubSets) && "runaway unit set inference")((Idx < (2*NumRegUnitSubSets) && "runaway unit set inference"
) ? static_cast<void> (0) : __assert_fail ("Idx < (2*NumRegUnitSubSets) && \"runaway unit set inference\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 1917, __PRETTY_FUNCTION__))
;
1918
1919 // Compare new sets with all original classes.
1920 for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx+1;
1921 SearchIdx != EndIdx; ++SearchIdx) {
1922 std::set<unsigned> Intersection;
1923 std::set_intersection(RegUnitSets[Idx].Units.begin(),
1924 RegUnitSets[Idx].Units.end(),
1925 RegUnitSets[SearchIdx].Units.begin(),
1926 RegUnitSets[SearchIdx].Units.end(),
1927 std::inserter(Intersection, Intersection.begin()));
1928 if (Intersection.empty())
1929 continue;
1930
1931 // Speculatively grow the RegUnitSets to hold the new set.
1932 RegUnitSets.resize(RegUnitSets.size() + 1);
1933 RegUnitSets.back().Name =
1934 RegUnitSets[Idx].Name + "+" + RegUnitSets[SearchIdx].Name;
1935
1936 std::set_union(RegUnitSets[Idx].Units.begin(),
1937 RegUnitSets[Idx].Units.end(),
1938 RegUnitSets[SearchIdx].Units.begin(),
1939 RegUnitSets[SearchIdx].Units.end(),
1940 std::inserter(RegUnitSets.back().Units,
1941 RegUnitSets.back().Units.begin()));
1942
1943 // Find an existing RegUnitSet, or add the union to the unique sets.
1944 std::vector<RegUnitSet>::const_iterator SetI =
1945 findRegUnitSet(RegUnitSets, RegUnitSets.back());
1946 if (SetI != std::prev(RegUnitSets.end()))
1947 RegUnitSets.pop_back();
1948 else {
1949 LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() - 1 << " "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
1950 << RegUnitSets.back().Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
1951 for (auto &Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
1952 : RegUnitSets.back().Units) printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
1953 dbgs() << "\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
;
1954 }
1955 }
1956 }
1957
1958 // Iteratively prune unit sets after inferring supersets.
1959 pruneUnitSets();
1960
1961 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1962 dbgs() << "\n"; for (unsigned USIdx = 0, USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1963 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1964 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1965 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1966 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1967 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
1968 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
;
1969
1970 // For each register class, list the UnitSets that are supersets.
1971 RegClassUnitSets.resize(RegClasses.size());
1972 int RCIdx = -1;
1973 for (auto &RC : RegClasses) {
1974 ++RCIdx;
1975 if (!RC.Allocatable)
1976 continue;
1977
1978 // Recompute the sorted list of units in this class.
1979 std::vector<unsigned> RCRegUnits;
1980 RC.buildRegUnitSet(*this, RCRegUnits);
1981
1982 // Don't increase pressure for unallocatable regclasses.
1983 if (RCRegUnits.empty())
1984 continue;
1985
1986 LLVM_DEBUG(dbgs() << "RC " << RC.getName() << " Units: \n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units: \n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
1987 for (auto Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units: \n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
1988 : RCRegUnits) printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units: \n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
1989 dbgs() << "\n UnitSetIDs:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units: \n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
;
1990
1991 // Find all supersets.
1992 for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
1993 USIdx != USEnd; ++USIdx) {
1994 if (isRegUnitSubSet(RCRegUnits, RegUnitSets[USIdx].Units)) {
1995 LLVM_DEBUG(dbgs() << " " << USIdx)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << " " << USIdx; }
} while (false)
;
1996 RegClassUnitSets[RCIdx].push_back(USIdx);
1997 }
1998 }
1999 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; } } while (false
)
;
2000 assert(!RegClassUnitSets[RCIdx].empty() && "missing unit set for regclass")((!RegClassUnitSets[RCIdx].empty() && "missing unit set for regclass"
) ? static_cast<void> (0) : __assert_fail ("!RegClassUnitSets[RCIdx].empty() && \"missing unit set for regclass\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2000, __PRETTY_FUNCTION__))
;
2001 }
2002
2003 // For each register unit, ensure that we have the list of UnitSets that
2004 // contain the unit. Normally, this matches an existing list of UnitSets for a
2005 // register class. If not, we create a new entry in RegClassUnitSets as a
2006 // "fake" register class.
2007 for (unsigned UnitIdx = 0, UnitEnd = NumNativeRegUnits;
2008 UnitIdx < UnitEnd; ++UnitIdx) {
2009 std::vector<unsigned> RUSets;
2010 for (unsigned i = 0, e = RegUnitSets.size(); i != e; ++i) {
2011 RegUnitSet &RUSet = RegUnitSets[i];
2012 if (!is_contained(RUSet.Units, UnitIdx))
2013 continue;
2014 RUSets.push_back(i);
2015 }
2016 unsigned RCUnitSetsIdx = 0;
2017 for (unsigned e = RegClassUnitSets.size();
2018 RCUnitSetsIdx != e; ++RCUnitSetsIdx) {
2019 if (RegClassUnitSets[RCUnitSetsIdx] == RUSets) {
2020 break;
2021 }
2022 }
2023 RegUnits[UnitIdx].RegClassUnitSetsIdx = RCUnitSetsIdx;
2024 if (RCUnitSetsIdx == RegClassUnitSets.size()) {
2025 // Create a new list of UnitSets as a "fake" register class.
2026 RegClassUnitSets.resize(RCUnitSetsIdx + 1);
2027 RegClassUnitSets[RCUnitSetsIdx].swap(RUSets);
2028 }
2029 }
2030}
2031
2032void CodeGenRegBank::computeRegUnitLaneMasks() {
2033 for (auto &Register : Registers) {
2034 // Create an initial lane mask for all register units.
2035 const auto &RegUnits = Register.getRegUnits();
2036 CodeGenRegister::RegUnitLaneMaskList
2037 RegUnitLaneMasks(RegUnits.count(), LaneBitmask::getNone());
2038 // Iterate through SubRegisters.
2039 typedef CodeGenRegister::SubRegMap SubRegMap;
2040 const SubRegMap &SubRegs = Register.getSubRegs();
2041 for (SubRegMap::const_iterator S = SubRegs.begin(),
2042 SE = SubRegs.end(); S != SE; ++S) {
2043 CodeGenRegister *SubReg = S->second;
2044 // Ignore non-leaf subregisters, their lane masks are fully covered by
2045 // the leaf subregisters anyway.
2046 if (!SubReg->getSubRegs().empty())
2047 continue;
2048 CodeGenSubRegIndex *SubRegIndex = S->first;
2049 const CodeGenRegister *SubRegister = S->second;
2050 LaneBitmask LaneMask = SubRegIndex->LaneMask;
2051 // Distribute LaneMask to Register Units touched.
2052 for (unsigned SUI : SubRegister->getRegUnits()) {
2053 bool Found = false;
2054 unsigned u = 0;
2055 for (unsigned RU : RegUnits) {
2056 if (SUI == RU) {
2057 RegUnitLaneMasks[u] |= LaneMask;
2058 assert(!Found)((!Found) ? static_cast<void> (0) : __assert_fail ("!Found"
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2058, __PRETTY_FUNCTION__))
;
2059 Found = true;
2060 }
2061 ++u;
2062 }
2063 (void)Found;
2064 assert(Found)((Found) ? static_cast<void> (0) : __assert_fail ("Found"
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2064, __PRETTY_FUNCTION__))
;
2065 }
2066 }
2067 Register.setRegUnitLaneMasks(RegUnitLaneMasks);
2068 }
2069}
2070
2071void CodeGenRegBank::computeDerivedInfo() {
2072 computeComposites();
2073 computeSubRegLaneMasks();
1
Calling 'CodeGenRegBank::computeSubRegLaneMasks'
2074
2075 // Compute a weight for each register unit created during getSubRegs.
2076 // This may create adopted register units (with unit # >= NumNativeRegUnits).
2077 computeRegUnitWeights();
2078
2079 // Compute a unique set of RegUnitSets. One for each RegClass and inferred
2080 // supersets for the union of overlapping sets.
2081 computeRegUnitSets();
2082
2083 computeRegUnitLaneMasks();
2084
2085 // Compute register class HasDisjunctSubRegs/CoveredBySubRegs flag.
2086 for (CodeGenRegisterClass &RC : RegClasses) {
2087 RC.HasDisjunctSubRegs = false;
2088 RC.CoveredBySubRegs = true;
2089 for (const CodeGenRegister *Reg : RC.getMembers()) {
2090 RC.HasDisjunctSubRegs |= Reg->HasDisjunctSubRegs;
2091 RC.CoveredBySubRegs &= Reg->CoveredBySubRegs;
2092 }
2093 }
2094
2095 // Get the weight of each set.
2096 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
2097 RegUnitSets[Idx].Weight = getRegUnitSetWeight(RegUnitSets[Idx].Units);
2098
2099 // Find the order of each set.
2100 RegUnitSetOrder.reserve(RegUnitSets.size());
2101 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
2102 RegUnitSetOrder.push_back(Idx);
2103
2104 std::stable_sort(RegUnitSetOrder.begin(), RegUnitSetOrder.end(),
2105 [this](unsigned ID1, unsigned ID2) {
2106 return getRegPressureSet(ID1).Units.size() <
2107 getRegPressureSet(ID2).Units.size();
2108 });
2109 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
2110 RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx;
2111 }
2112}
2113
2114//
2115// Synthesize missing register class intersections.
2116//
2117// Make sure that sub-classes of RC exists such that getCommonSubClass(RC, X)
2118// returns a maximal register class for all X.
2119//
2120void CodeGenRegBank::inferCommonSubClass(CodeGenRegisterClass *RC) {
2121 assert(!RegClasses.empty())((!RegClasses.empty()) ? static_cast<void> (0) : __assert_fail
("!RegClasses.empty()", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2121, __PRETTY_FUNCTION__))
;
2122 // Stash the iterator to the last element so that this loop doesn't visit
2123 // elements added by the getOrCreateSubClass call within it.
2124 for (auto I = RegClasses.begin(), E = std::prev(RegClasses.end());
2125 I != std::next(E); ++I) {
2126 CodeGenRegisterClass *RC1 = RC;
2127 CodeGenRegisterClass *RC2 = &*I;
2128 if (RC1 == RC2)
2129 continue;
2130
2131 // Compute the set intersection of RC1 and RC2.
2132 const CodeGenRegister::Vec &Memb1 = RC1->getMembers();
2133 const CodeGenRegister::Vec &Memb2 = RC2->getMembers();
2134 CodeGenRegister::Vec Intersection;
2135 std::set_intersection(
2136 Memb1.begin(), Memb1.end(), Memb2.begin(), Memb2.end(),
2137 std::inserter(Intersection, Intersection.begin()), deref<llvm::less>());
2138
2139 // Skip disjoint class pairs.
2140 if (Intersection.empty())
2141 continue;
2142
2143 // If RC1 and RC2 have different spill sizes or alignments, use the
2144 // stricter one for sub-classing. If they are equal, prefer RC1.
2145 if (RC2->RSI.hasStricterSpillThan(RC1->RSI))
2146 std::swap(RC1, RC2);
2147
2148 getOrCreateSubClass(RC1, &Intersection,
2149 RC1->getName() + "_and_" + RC2->getName());
2150 }
2151}
2152
2153//
2154// Synthesize missing sub-classes for getSubClassWithSubReg().
2155//
2156// Make sure that the set of registers in RC with a given SubIdx sub-register
2157// form a register class. Update RC->SubClassWithSubReg.
2158//
2159void CodeGenRegBank::inferSubClassWithSubReg(CodeGenRegisterClass *RC) {
2160 // Map SubRegIndex to set of registers in RC supporting that SubRegIndex.
2161 typedef std::map<const CodeGenSubRegIndex *, CodeGenRegister::Vec,
2162 deref<llvm::less>> SubReg2SetMap;
2163
2164 // Compute the set of registers supporting each SubRegIndex.
2165 SubReg2SetMap SRSets;
2166 for (const auto R : RC->getMembers()) {
2167 if (R->Artificial)
2168 continue;
2169 const CodeGenRegister::SubRegMap &SRM = R->getSubRegs();
2170 for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
2171 E = SRM.end(); I != E; ++I) {
2172 if (!I->first->Artificial)
2173 SRSets[I->first].push_back(R);
2174 }
2175 }
2176
2177 for (auto I : SRSets)
2178 sortAndUniqueRegisters(I.second);
2179
2180 // Find matching classes for all SRSets entries. Iterate in SubRegIndex
2181 // numerical order to visit synthetic indices last.
2182 for (const auto &SubIdx : SubRegIndices) {
2183 if (SubIdx.Artificial)
2184 continue;
2185 SubReg2SetMap::const_iterator I = SRSets.find(&SubIdx);
2186 // Unsupported SubRegIndex. Skip it.
2187 if (I == SRSets.end())
2188 continue;
2189 // In most cases, all RC registers support the SubRegIndex.
2190 if (I->second.size() == RC->getMembers().size()) {
2191 RC->setSubClassWithSubReg(&SubIdx, RC);
2192 continue;
2193 }
2194 // This is a real subset. See if we have a matching class.
2195 CodeGenRegisterClass *SubRC =
2196 getOrCreateSubClass(RC, &I->second,
2197 RC->getName() + "_with_" + I->first->getName());
2198 RC->setSubClassWithSubReg(&SubIdx, SubRC);
2199 }
2200}
2201
2202//
2203// Synthesize missing sub-classes of RC for getMatchingSuperRegClass().
2204//
2205// Create sub-classes of RC such that getMatchingSuperRegClass(RC, SubIdx, X)
2206// has a maximal result for any SubIdx and any X >= FirstSubRegRC.
2207//
2208
2209void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
2210 std::list<CodeGenRegisterClass>::iterator FirstSubRegRC) {
2211 SmallVector<std::pair<const CodeGenRegister*,
2212 const CodeGenRegister*>, 16> SSPairs;
2213 BitVector TopoSigs(getNumTopoSigs());
2214
2215 // Iterate in SubRegIndex numerical order to visit synthetic indices last.
2216 for (auto &SubIdx : SubRegIndices) {
2217 // Skip indexes that aren't fully supported by RC's registers. This was
2218 // computed by inferSubClassWithSubReg() above which should have been
2219 // called first.
2220 if (RC->getSubClassWithSubReg(&SubIdx) != RC)
2221 continue;
2222
2223 // Build list of (Super, Sub) pairs for this SubIdx.
2224 SSPairs.clear();
2225 TopoSigs.reset();
2226 for (const auto Super : RC->getMembers()) {
2227 const CodeGenRegister *Sub = Super->getSubRegs().find(&SubIdx)->second;
2228 assert(Sub && "Missing sub-register")((Sub && "Missing sub-register") ? static_cast<void
> (0) : __assert_fail ("Sub && \"Missing sub-register\""
, "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2228, __PRETTY_FUNCTION__))
;
2229 SSPairs.push_back(std::make_pair(Super, Sub));
2230 TopoSigs.set(Sub->getTopoSig());
2231 }
2232
2233 // Iterate over sub-register class candidates. Ignore classes created by
2234 // this loop. They will never be useful.
2235 // Store an iterator to the last element (not end) so that this loop doesn't
2236 // visit newly inserted elements.
2237 assert(!RegClasses.empty())((!RegClasses.empty()) ? static_cast<void> (0) : __assert_fail
("!RegClasses.empty()", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2237, __PRETTY_FUNCTION__))
;
2238 for (auto I = FirstSubRegRC, E = std::prev(RegClasses.end());
2239 I != std::next(E); ++I) {
2240 CodeGenRegisterClass &SubRC = *I;
2241 if (SubRC.Artificial)
2242 continue;
2243 // Topological shortcut: SubRC members have the wrong shape.
2244 if (!TopoSigs.anyCommon(SubRC.getTopoSigs()))
2245 continue;
2246 // Compute the subset of RC that maps into SubRC.
2247 CodeGenRegister::Vec SubSetVec;
2248 for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
2249 if (SubRC.contains(SSPairs[i].second))
2250 SubSetVec.push_back(SSPairs[i].first);
2251
2252 if (SubSetVec.empty())
2253 continue;
2254
2255 // RC injects completely into SubRC.
2256 sortAndUniqueRegisters(SubSetVec);
2257 if (SubSetVec.size() == SSPairs.size()) {
2258 SubRC.addSuperRegClass(&SubIdx, RC);
2259 continue;
2260 }
2261
2262 // Only a subset of RC maps into SubRC. Make sure it is represented by a
2263 // class.
2264 getOrCreateSubClass(RC, &SubSetVec, RC->getName() + "_with_" +
2265 SubIdx.getName() + "_in_" +
2266 SubRC.getName());
2267 }
2268 }
2269}
2270
2271//
2272// Infer missing register classes.
2273//
2274void CodeGenRegBank::computeInferredRegisterClasses() {
2275 assert(!RegClasses.empty())((!RegClasses.empty()) ? static_cast<void> (0) : __assert_fail
("!RegClasses.empty()", "/build/llvm-toolchain-snapshot-8~svn350071/utils/TableGen/CodeGenRegisters.cpp"
, 2275, __PRETTY_FUNCTION__))
;
2276 // When this function is called, the register classes have not been sorted
2277 // and assigned EnumValues yet. That means getSubClasses(),
2278 // getSuperClasses(), and hasSubClass() functions are defunct.
2279
2280 // Use one-before-the-end so it doesn't move forward when new elements are
2281 // added.
2282 auto FirstNewRC = std::prev(RegClasses.end());
2283
2284 // Visit all register classes, including the ones being added by the loop.
2285 // Watch out for iterator invalidation here.
2286 for (auto I = RegClasses.begin(), E = RegClasses.end(); I != E; ++I) {
2287 CodeGenRegisterClass *RC = &*I;
2288 if (RC->Artificial)
2289 continue;
2290
2291 // Synthesize answers for getSubClassWithSubReg().
2292 inferSubClassWithSubReg(RC);
2293
2294 // Synthesize answers for getCommonSubClass().
2295 inferCommonSubClass(RC);
2296
2297 // Synthesize answers for getMatchingSuperRegClass().
2298 inferMatchingSuperRegClass(RC);
2299
2300 // New register classes are created while this loop is running, and we need
2301 // to visit all of them. I particular, inferMatchingSuperRegClass needs
2302 // to match old super-register classes with sub-register classes created
2303 // after inferMatchingSuperRegClass was called. At this point,
2304 // inferMatchingSuperRegClass has checked SuperRC = [0..rci] with SubRC =
2305 // [0..FirstNewRC). We need to cover SubRC = [FirstNewRC..rci].
2306 if (I == FirstNewRC) {
2307 auto NextNewRC = std::prev(RegClasses.end());
2308 for (auto I2 = RegClasses.begin(), E2 = std::next(FirstNewRC); I2 != E2;
2309 ++I2)
2310 inferMatchingSuperRegClass(&*I2, E2);
2311 FirstNewRC = NextNewRC;
2312 }
2313 }
2314}
2315
2316/// getRegisterClassForRegister - Find the register class that contains the
2317/// specified physical register. If the register is not in a register class,
2318/// return null. If the register is in multiple classes, and the classes have a
2319/// superset-subset relationship and the same set of types, return the
2320/// superclass. Otherwise return null.
2321const CodeGenRegisterClass*
2322CodeGenRegBank::getRegClassForRegister(Record *R) {
2323 const CodeGenRegister *Reg = getReg(R);
2324 const CodeGenRegisterClass *FoundRC = nullptr;
2325 for (const auto &RC : getRegClasses()) {
2326 if (!RC.contains(Reg))
2327 continue;
2328
2329 // If this is the first class that contains the register,
2330 // make a note of it and go on to the next class.
2331 if (!FoundRC) {
2332 FoundRC = &RC;
2333 continue;
2334 }
2335
2336 // If a register's classes have different types, return null.
2337 if (RC.getValueTypes() != FoundRC->getValueTypes())
2338 return nullptr;
2339
2340 // Check to see if the previously found class that contains
2341 // the register is a subclass of the current class. If so,
2342 // prefer the superclass.
2343 if (RC.hasSubClass(FoundRC)) {
2344 FoundRC = &RC;
2345 continue;
2346 }
2347
2348 // Check to see if the previously found class that contains
2349 // the register is a superclass of the current class. If so,
2350 // prefer the superclass.
2351 if (FoundRC->hasSubClass(&RC))
2352 continue;
2353
2354 // Multiple classes, and neither is a superclass of the other.
2355 // Return null.
2356 return nullptr;
2357 }
2358 return FoundRC;
2359}
2360
2361BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
2362 SetVector<const CodeGenRegister*> Set;
2363
2364 // First add Regs with all sub-registers.
2365 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
2366 CodeGenRegister *Reg = getReg(Regs[i]);
2367 if (Set.insert(Reg))
2368 // Reg is new, add all sub-registers.
2369 // The pre-ordering is not important here.
2370 Reg->addSubRegsPreOrder(Set, *this);
2371 }
2372
2373 // Second, find all super-registers that are completely covered by the set.
2374 for (unsigned i = 0; i != Set.size(); ++i) {
2375 const CodeGenRegister::SuperRegList &SR = Set[i]->getSuperRegs();
2376 for (unsigned j = 0, e = SR.size(); j != e; ++j) {
2377 const CodeGenRegister *Super = SR[j];
2378 if (!Super->CoveredBySubRegs || Set.count(Super))
2379 continue;
2380 // This new super-register is covered by its sub-registers.
2381 bool AllSubsInSet = true;
2382 const CodeGenRegister::SubRegMap &SRM = Super->getSubRegs();
2383 for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
2384 E = SRM.end(); I != E; ++I)
2385 if (!Set.count(I->second)) {
2386 AllSubsInSet = false;
2387 break;
2388 }
2389 // All sub-registers in Set, add Super as well.
2390 // We will visit Super later to recheck its super-registers.
2391 if (AllSubsInSet)
2392 Set.insert(Super);
2393 }
2394 }
2395
2396 // Convert to BitVector.
2397 BitVector BV(Registers.size() + 1);
2398 for (unsigned i = 0, e = Set.size(); i != e; ++i)
2399 BV.set(Set[i]->EnumValue);
2400 return BV;
2401}
2402
2403void CodeGenRegBank::printRegUnitName(unsigned Unit) const {
2404 if (Unit < NumNativeRegUnits)
2405 dbgs() << ' ' << RegUnits[Unit].Roots[0]->getName();
2406 else
2407 dbgs() << " #" << Unit;
2408}

/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/MC/LaneBitmask.h

1//===- llvm/MC/LaneBitmask.h ------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// \file
11/// A common definition of LaneBitmask for use in TableGen and CodeGen.
12///
13/// A lane mask is a bitmask representing the covering of a register with
14/// sub-registers.
15///
16/// This is typically used to track liveness at sub-register granularity.
17/// Lane masks for sub-register indices are similar to register units for
18/// physical registers. The individual bits in a lane mask can't be assigned
19/// any specific meaning. They can be used to check if two sub-register
20/// indices overlap.
21///
22/// Iff the target has a register such that:
23///
24/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
25///
26/// then:
27///
28/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
29
30#ifndef LLVM_MC_LANEBITMASK_H
31#define LLVM_MC_LANEBITMASK_H
32
33#include "llvm/Support/Compiler.h"
34#include "llvm/Support/Format.h"
35#include "llvm/Support/Printable.h"
36#include "llvm/Support/raw_ostream.h"
37
38namespace llvm {
39
40 struct LaneBitmask {
41 // When changing the underlying type, change the format string as well.
42 using Type = unsigned;
43 enum : unsigned { BitWidth = 8*sizeof(Type) };
44 constexpr static const char *const FormatStr = "%08X";
45
46 constexpr LaneBitmask() = default;
47 explicit constexpr LaneBitmask(Type V) : Mask(V) {}
48
49 constexpr bool operator== (LaneBitmask M) const { return Mask == M.Mask; }
50 constexpr bool operator!= (LaneBitmask M) const { return Mask != M.Mask; }
51 constexpr bool operator< (LaneBitmask M) const { return Mask < M.Mask; }
52 constexpr bool none() const { return Mask == 0; }
53 constexpr bool any() const { return Mask != 0; }
54 constexpr bool all() const { return ~Mask == 0; }
55
56 constexpr LaneBitmask operator~() const {
57 return LaneBitmask(~Mask);
58 }
59 constexpr LaneBitmask operator|(LaneBitmask M) const {
60 return LaneBitmask(Mask | M.Mask);
61 }
62 constexpr LaneBitmask operator&(LaneBitmask M) const {
63 return LaneBitmask(Mask & M.Mask);
64 }
65 LaneBitmask &operator|=(LaneBitmask M) {
66 Mask |= M.Mask;
67 return *this;
68 }
69 LaneBitmask &operator&=(LaneBitmask M) {
70 Mask &= M.Mask;
71 return *this;
72 }
73
74 constexpr Type getAsInteger() const { return Mask; }
75
76 unsigned getNumLanes() const {
77 return countPopulation(Mask);
78 }
79 unsigned getHighestLane() const {
80 return Log2_32(Mask);
5
Calling 'Log2_32'
7
Returning from 'Log2_32'
8
Returning the value 4294967295
81 }
82
83 static constexpr LaneBitmask getNone() { return LaneBitmask(0); }
84 static constexpr LaneBitmask getAll() { return ~LaneBitmask(0); }
85 static constexpr LaneBitmask getLane(unsigned Lane) {
86 return LaneBitmask(Type(1) << Lane);
13
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'llvm::LaneBitmask::Type'
87 }
88
89 private:
90 Type Mask = 0;
91 };
92
93 /// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
94 inline Printable PrintLaneMask(LaneBitmask LaneMask) {
95 return Printable([LaneMask](raw_ostream &OS) {
96 OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
97 });
98 }
99
100} // end namespace llvm
101
102#endif // LLVM_MC_LANEBITMASK_H

/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains some functions that are useful for math stuff.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_MATHEXTRAS_H
15#define LLVM_SUPPORT_MATHEXTRAS_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/SwapByteOrder.h"
19#include <algorithm>
20#include <cassert>
21#include <climits>
22#include <cstring>
23#include <limits>
24#include <type_traits>
25
26#ifdef __ANDROID_NDK__
27#include <android/api-level.h>
28#endif
29
30#ifdef _MSC_VER
31// Declare these intrinsics manually rather including intrin.h. It's very
32// expensive, and MathExtras.h is popular.
33// #include <intrin.h>
34extern "C" {
35unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
36unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
37unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
38unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
39}
40#endif
41
42namespace llvm {
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53namespace detail {
54template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
55 static std::size_t count(T Val, ZeroBehavior) {
56 if (!Val)
57 return std::numeric_limits<T>::digits;
58 if (Val & 0x1)
59 return 0;
60
61 // Bisection method.
62 std::size_t ZeroBits = 0;
63 T Shift = std::numeric_limits<T>::digits >> 1;
64 T Mask = std::numeric_limits<T>::max() >> Shift;
65 while (Shift) {
66 if ((Val & Mask) == 0) {
67 Val >>= Shift;
68 ZeroBits |= Shift;
69 }
70 Shift >>= 1;
71 Mask >>= Shift;
72 }
73 return ZeroBits;
74 }
75};
76
77#if __GNUC__4 >= 4 || defined(_MSC_VER)
78template <typename T> struct TrailingZerosCounter<T, 4> {
79 static std::size_t count(T Val, ZeroBehavior ZB) {
80 if (ZB != ZB_Undefined && Val == 0)
81 return 32;
82
83#if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
84 return __builtin_ctz(Val);
85#elif defined(_MSC_VER)
86 unsigned long Index;
87 _BitScanForward(&Index, Val);
88 return Index;
89#endif
90 }
91};
92
93#if !defined(_MSC_VER) || defined(_M_X64)
94template <typename T> struct TrailingZerosCounter<T, 8> {
95 static std::size_t count(T Val, ZeroBehavior ZB) {
96 if (ZB != ZB_Undefined && Val == 0)
97 return 64;
98
99#if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
100 return __builtin_ctzll(Val);
101#elif defined(_MSC_VER)
102 unsigned long Index;
103 _BitScanForward64(&Index, Val);
104 return Index;
105#endif
106 }
107};
108#endif
109#endif
110} // namespace detail
111
112/// Count number of 0's from the least significant bit to the most
113/// stopping at the first 1.
114///
115/// Only unsigned integral types are allowed.
116///
117/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
118/// valid arguments.
119template <typename T>
120std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
121 static_assert(std::numeric_limits<T>::is_integer &&
122 !std::numeric_limits<T>::is_signed,
123 "Only unsigned integral types are allowed.");
124 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
125}
126
127namespace detail {
128template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
129 static std::size_t count(T Val, ZeroBehavior) {
130 if (!Val)
131 return std::numeric_limits<T>::digits;
132
133 // Bisection method.
134 std::size_t ZeroBits = 0;
135 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
136 T Tmp = Val >> Shift;
137 if (Tmp)
138 Val = Tmp;
139 else
140 ZeroBits |= Shift;
141 }
142 return ZeroBits;
143 }
144};
145
146#if __GNUC__4 >= 4 || defined(_MSC_VER)
147template <typename T> struct LeadingZerosCounter<T, 4> {
148 static std::size_t count(T Val, ZeroBehavior ZB) {
149 if (ZB != ZB_Undefined && Val == 0)
150 return 32;
151
152#if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
153 return __builtin_clz(Val);
154#elif defined(_MSC_VER)
155 unsigned long Index;
156 _BitScanReverse(&Index, Val);
157 return Index ^ 31;
158#endif
159 }
160};
161
162#if !defined(_MSC_VER) || defined(_M_X64)
163template <typename T> struct LeadingZerosCounter<T, 8> {
164 static std::size_t count(T Val, ZeroBehavior ZB) {
165 if (ZB != ZB_Undefined && Val == 0)
166 return 64;
167
168#if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
169 return __builtin_clzll(Val);
170#elif defined(_MSC_VER)
171 unsigned long Index;
172 _BitScanReverse64(&Index, Val);
173 return Index ^ 63;
174#endif
175 }
176};
177#endif
178#endif
179} // namespace detail
180
181/// Count number of 0's from the most significant bit to the least
182/// stopping at the first 1.
183///
184/// Only unsigned integral types are allowed.
185///
186/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
187/// valid arguments.
188template <typename T>
189std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
190 static_assert(std::numeric_limits<T>::is_integer &&
191 !std::numeric_limits<T>::is_signed,
192 "Only unsigned integral types are allowed.");
193 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
194}
195
196/// Get the index of the first set bit starting from the least
197/// significant bit.
198///
199/// Only unsigned integral types are allowed.
200///
201/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
202/// valid arguments.
203template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
204 if (ZB == ZB_Max && Val == 0)
205 return std::numeric_limits<T>::max();
206
207 return countTrailingZeros(Val, ZB_Undefined);
208}
209
210/// Create a bitmask with the N right-most bits set to 1, and all other
211/// bits set to 0. Only unsigned types are allowed.
212template <typename T> T maskTrailingOnes(unsigned N) {
213 static_assert(std::is_unsigned<T>::value, "Invalid type!");
214 const unsigned Bits = CHAR_BIT8 * sizeof(T);
215 assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast<
void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 215, __PRETTY_FUNCTION__))
;
216 return N == 0 ? 0 : (T(-1) >> (Bits - N));
217}
218
219/// Create a bitmask with the N left-most bits set to 1, and all other
220/// bits set to 0. Only unsigned types are allowed.
221template <typename T> T maskLeadingOnes(unsigned N) {
222 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
223}
224
225/// Create a bitmask with the N right-most bits set to 0, and all other
226/// bits set to 1. Only unsigned types are allowed.
227template <typename T> T maskTrailingZeros(unsigned N) {
228 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
229}
230
231/// Create a bitmask with the N left-most bits set to 0, and all other
232/// bits set to 1. Only unsigned types are allowed.
233template <typename T> T maskLeadingZeros(unsigned N) {
234 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
235}
236
237/// Get the index of the last set bit starting from the least
238/// significant bit.
239///
240/// Only unsigned integral types are allowed.
241///
242/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
243/// valid arguments.
244template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
245 if (ZB == ZB_Max && Val == 0)
246 return std::numeric_limits<T>::max();
247
248 // Use ^ instead of - because both gcc and llvm can remove the associated ^
249 // in the __builtin_clz intrinsic on x86.
250 return countLeadingZeros(Val, ZB_Undefined) ^
251 (std::numeric_limits<T>::digits - 1);
252}
253
254/// Macro compressed bit reversal table for 256 bits.
255///
256/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
257static const unsigned char BitReverseTable256[256] = {
258#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
259#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
260#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
261 R6(0), R6(2), R6(1), R6(3)
262#undef R2
263#undef R4
264#undef R6
265};
266
267/// Reverse the bits in \p Val.
268template <typename T>
269T reverseBits(T Val) {
270 unsigned char in[sizeof(Val)];
271 unsigned char out[sizeof(Val)];
272 std::memcpy(in, &Val, sizeof(Val));
273 for (unsigned i = 0; i < sizeof(Val); ++i)
274 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
275 std::memcpy(&Val, out, sizeof(Val));
276 return Val;
277}
278
279// NOTE: The following support functions use the _32/_64 extensions instead of
280// type overloading so that signed and unsigned integers can be used without
281// ambiguity.
282
283/// Return the high 32 bits of a 64 bit value.
284constexpr inline uint32_t Hi_32(uint64_t Value) {
285 return static_cast<uint32_t>(Value >> 32);
286}
287
288/// Return the low 32 bits of a 64 bit value.
289constexpr inline uint32_t Lo_32(uint64_t Value) {
290 return static_cast<uint32_t>(Value);
291}
292
293/// Make a 64-bit integer from a high / low pair of 32-bit integers.
294constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
295 return ((uint64_t)High << 32) | (uint64_t)Low;
296}
297
298/// Checks if an integer fits into the given bit width.
299template <unsigned N> constexpr inline bool isInt(int64_t x) {
300 return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1)));
301}
302// Template specializations to get better code for common cases.
303template <> constexpr inline bool isInt<8>(int64_t x) {
304 return static_cast<int8_t>(x) == x;
305}
306template <> constexpr inline bool isInt<16>(int64_t x) {
307 return static_cast<int16_t>(x) == x;
308}
309template <> constexpr inline bool isInt<32>(int64_t x) {
310 return static_cast<int32_t>(x) == x;
311}
312
313/// Checks if a signed integer is an N bit number shifted left by S.
314template <unsigned N, unsigned S>
315constexpr inline bool isShiftedInt(int64_t x) {
316 static_assert(
317 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
318 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
319 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
320}
321
322/// Checks if an unsigned integer fits into the given bit width.
323///
324/// This is written as two functions rather than as simply
325///
326/// return N >= 64 || X < (UINT64_C(1) << N);
327///
328/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
329/// left too many places.
330template <unsigned N>
331constexpr inline typename std::enable_if<(N < 64), bool>::type
332isUInt(uint64_t X) {
333 static_assert(N > 0, "isUInt<0> doesn't make sense");
334 return X < (UINT64_C(1)1UL << (N));
335}
336template <unsigned N>
337constexpr inline typename std::enable_if<N >= 64, bool>::type
338isUInt(uint64_t X) {
339 return true;
340}
341
342// Template specializations to get better code for common cases.
343template <> constexpr inline bool isUInt<8>(uint64_t x) {
344 return static_cast<uint8_t>(x) == x;
345}
346template <> constexpr inline bool isUInt<16>(uint64_t x) {
347 return static_cast<uint16_t>(x) == x;
348}
349template <> constexpr inline bool isUInt<32>(uint64_t x) {
350 return static_cast<uint32_t>(x) == x;
351}
352
353/// Checks if a unsigned integer is an N bit number shifted left by S.
354template <unsigned N, unsigned S>
355constexpr inline bool isShiftedUInt(uint64_t x) {
356 static_assert(
357 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
358 static_assert(N + S <= 64,
359 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
360 // Per the two static_asserts above, S must be strictly less than 64. So
361 // 1 << S is not undefined behavior.
362 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
363}
364
365/// Gets the maximum value for a N-bit unsigned integer.
366inline uint64_t maxUIntN(uint64_t N) {
367 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 367, __PRETTY_FUNCTION__))
;
368
369 // uint64_t(1) << 64 is undefined behavior, so we can't do
370 // (uint64_t(1) << N) - 1
371 // without checking first that N != 64. But this works and doesn't have a
372 // branch.
373 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
374}
375
376/// Gets the minimum value for a N-bit signed integer.
377inline int64_t minIntN(int64_t N) {
378 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 378, __PRETTY_FUNCTION__))
;
379
380 return -(UINT64_C(1)1UL<<(N-1));
381}
382
383/// Gets the maximum value for a N-bit signed integer.
384inline int64_t maxIntN(int64_t N) {
385 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 385, __PRETTY_FUNCTION__))
;
386
387 // This relies on two's complement wraparound when N == 64, so we convert to
388 // int64_t only at the very end to avoid UB.
389 return (UINT64_C(1)1UL << (N - 1)) - 1;
390}
391
392/// Checks if an unsigned integer fits into the given (dynamic) bit width.
393inline bool isUIntN(unsigned N, uint64_t x) {
394 return N >= 64 || x <= maxUIntN(N);
395}
396
397/// Checks if an signed integer fits into the given (dynamic) bit width.
398inline bool isIntN(unsigned N, int64_t x) {
399 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
400}
401
402/// Return true if the argument is a non-empty sequence of ones starting at the
403/// least significant bit with the remainder zero (32 bit version).
404/// Ex. isMask_32(0x0000FFFFU) == true.
405constexpr inline bool isMask_32(uint32_t Value) {
406 return Value && ((Value + 1) & Value) == 0;
407}
408
409/// Return true if the argument is a non-empty sequence of ones starting at the
410/// least significant bit with the remainder zero (64 bit version).
411constexpr inline bool isMask_64(uint64_t Value) {
412 return Value && ((Value + 1) & Value) == 0;
413}
414
415/// Return true if the argument contains a non-empty sequence of ones with the
416/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
417constexpr inline bool isShiftedMask_32(uint32_t Value) {
418 return Value && isMask_32((Value - 1) | Value);
419}
420
421/// Return true if the argument contains a non-empty sequence of ones with the
422/// remainder zero (64 bit version.)
423constexpr inline bool isShiftedMask_64(uint64_t Value) {
424 return Value && isMask_64((Value - 1) | Value);
425}
426
427/// Return true if the argument is a power of two > 0.
428/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
429constexpr inline bool isPowerOf2_32(uint32_t Value) {
430 return Value && !(Value & (Value - 1));
431}
432
433/// Return true if the argument is a power of two > 0 (64 bit edition.)
434constexpr inline bool isPowerOf2_64(uint64_t Value) {
435 return Value && !(Value & (Value - 1));
436}
437
438/// Return a byte-swapped representation of the 16-bit argument.
439inline uint16_t ByteSwap_16(uint16_t Value) {
440 return sys::SwapByteOrder_16(Value);
441}
442
443/// Return a byte-swapped representation of the 32-bit argument.
444inline uint32_t ByteSwap_32(uint32_t Value) {
445 return sys::SwapByteOrder_32(Value);
446}
447
448/// Return a byte-swapped representation of the 64-bit argument.
449inline uint64_t ByteSwap_64(uint64_t Value) {
450 return sys::SwapByteOrder_64(Value);
451}
452
453/// Count the number of ones from the most significant bit to the first
454/// zero bit.
455///
456/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
457/// Only unsigned integral types are allowed.
458///
459/// \param ZB the behavior on an input of all ones. Only ZB_Width and
460/// ZB_Undefined are valid arguments.
461template <typename T>
462std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
463 static_assert(std::numeric_limits<T>::is_integer &&
464 !std::numeric_limits<T>::is_signed,
465 "Only unsigned integral types are allowed.");
466 return countLeadingZeros<T>(~Value, ZB);
467}
468
469/// Count the number of ones from the least significant bit to the first
470/// zero bit.
471///
472/// Ex. countTrailingOnes(0x00FF00FF) == 8.
473/// Only unsigned integral types are allowed.
474///
475/// \param ZB the behavior on an input of all ones. Only ZB_Width and
476/// ZB_Undefined are valid arguments.
477template <typename T>
478std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
479 static_assert(std::numeric_limits<T>::is_integer &&
480 !std::numeric_limits<T>::is_signed,
481 "Only unsigned integral types are allowed.");
482 return countTrailingZeros<T>(~Value, ZB);
483}
484
485namespace detail {
486template <typename T, std::size_t SizeOfT> struct PopulationCounter {
487 static unsigned count(T Value) {
488 // Generic version, forward to 32 bits.
489 static_assert(SizeOfT <= 4, "Not implemented!");
490#if __GNUC__4 >= 4
491 return __builtin_popcount(Value);
492#else
493 uint32_t v = Value;
494 v = v - ((v >> 1) & 0x55555555);
495 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
496 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
497#endif
498 }
499};
500
501template <typename T> struct PopulationCounter<T, 8> {
502 static unsigned count(T Value) {
503#if __GNUC__4 >= 4
504 return __builtin_popcountll(Value);
505#else
506 uint64_t v = Value;
507 v = v - ((v >> 1) & 0x5555555555555555ULL);
508 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
509 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
510 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
511#endif
512 }
513};
514} // namespace detail
515
516/// Count the number of set bits in a value.
517/// Ex. countPopulation(0xF000F000) = 8
518/// Returns 0 if the word is zero.
519template <typename T>
520inline unsigned countPopulation(T Value) {
521 static_assert(std::numeric_limits<T>::is_integer &&
522 !std::numeric_limits<T>::is_signed,
523 "Only unsigned integral types are allowed.");
524 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
525}
526
527/// Return the log base 2 of the specified value.
528inline double Log2(double Value) {
529#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
530 return __builtin_log(Value) / __builtin_log(2.0);
531#else
532 return log2(Value);
533#endif
534}
535
536/// Return the floor log base 2 of the specified value, -1 if the value is zero.
537/// (32 bit edition.)
538/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
539inline unsigned Log2_32(uint32_t Value) {
540 return 31 - countLeadingZeros(Value);
6
Returning the value 4294967295
541}
542
543/// Return the floor log base 2 of the specified value, -1 if the value is zero.
544/// (64 bit edition.)
545inline unsigned Log2_64(uint64_t Value) {
546 return 63 - countLeadingZeros(Value);
547}
548
549/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
550/// (32 bit edition).
551/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
552inline unsigned Log2_32_Ceil(uint32_t Value) {
553 return 32 - countLeadingZeros(Value - 1);
554}
555
556/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
557/// (64 bit edition.)
558inline unsigned Log2_64_Ceil(uint64_t Value) {
559 return 64 - countLeadingZeros(Value - 1);
560}
561
562/// Return the greatest common divisor of the values using Euclid's algorithm.
563inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
564 while (B) {
565 uint64_t T = B;
566 B = A % B;
567 A = T;
568 }
569 return A;
570}
571
572/// This function takes a 64-bit integer and returns the bit equivalent double.
573inline double BitsToDouble(uint64_t Bits) {
574 double D;
575 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
576 memcpy(&D, &Bits, sizeof(Bits));
577 return D;
578}
579
580/// This function takes a 32-bit integer and returns the bit equivalent float.
581inline float BitsToFloat(uint32_t Bits) {
582 float F;
583 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
584 memcpy(&F, &Bits, sizeof(Bits));
585 return F;
586}
587
588/// This function takes a double and returns the bit equivalent 64-bit integer.
589/// Note that copying doubles around changes the bits of NaNs on some hosts,
590/// notably x86, so this routine cannot be used if these bits are needed.
591inline uint64_t DoubleToBits(double Double) {
592 uint64_t Bits;
593 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
594 memcpy(&Bits, &Double, sizeof(Double));
595 return Bits;
596}
597
598/// This function takes a float and returns the bit equivalent 32-bit integer.
599/// Note that copying floats around changes the bits of NaNs on some hosts,
600/// notably x86, so this routine cannot be used if these bits are needed.
601inline uint32_t FloatToBits(float Float) {
602 uint32_t Bits;
603 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
604 memcpy(&Bits, &Float, sizeof(Float));
605 return Bits;
606}
607
608/// A and B are either alignments or offsets. Return the minimum alignment that
609/// may be assumed after adding the two together.
610constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
611 // The largest power of 2 that divides both A and B.
612 //
613 // Replace "-Value" by "1+~Value" in the following commented code to avoid
614 // MSVC warning C4146
615 // return (A | B) & -(A | B);
616 return (A | B) & (1 + ~(A | B));
617}
618
619/// Aligns \c Addr to \c Alignment bytes, rounding up.
620///
621/// Alignment should be a power of two. This method rounds up, so
622/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
623inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
624 assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
625 "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
;
626
627 assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast
<void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr"
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 627, __PRETTY_FUNCTION__))
;
628
629 return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
630}
631
632/// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
633/// bytes, rounding up.
634inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
635 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
636}
637
638/// Returns the next power of two (in 64-bits) that is strictly greater than A.
639/// Returns zero on overflow.
640inline uint64_t NextPowerOf2(uint64_t A) {
641 A |= (A >> 1);
642 A |= (A >> 2);
643 A |= (A >> 4);
644 A |= (A >> 8);
645 A |= (A >> 16);
646 A |= (A >> 32);
647 return A + 1;
648}
649
650/// Returns the power of two which is less than or equal to the given value.
651/// Essentially, it is a floor operation across the domain of powers of two.
652inline uint64_t PowerOf2Floor(uint64_t A) {
653 if (!A) return 0;
654 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
655}
656
657/// Returns the power of two which is greater than or equal to the given value.
658/// Essentially, it is a ceil operation across the domain of powers of two.
659inline uint64_t PowerOf2Ceil(uint64_t A) {
660 if (!A)
661 return 0;
662 return NextPowerOf2(A - 1);
663}
664
665/// Returns the next integer (mod 2**64) that is greater than or equal to
666/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
667///
668/// If non-zero \p Skew is specified, the return value will be a minimal
669/// integer that is greater than or equal to \p Value and equal to
670/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
671/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
672///
673/// Examples:
674/// \code
675/// alignTo(5, 8) = 8
676/// alignTo(17, 8) = 24
677/// alignTo(~0LL, 8) = 0
678/// alignTo(321, 255) = 510
679///
680/// alignTo(5, 8, 7) = 7
681/// alignTo(17, 8, 1) = 17
682/// alignTo(~0LL, 8, 3) = 3
683/// alignTo(321, 255, 42) = 552
684/// \endcode
685inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
686 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 686, __PRETTY_FUNCTION__))
;
687 Skew %= Align;
688 return (Value + Align - 1 - Skew) / Align * Align + Skew;
689}
690
691/// Returns the next integer (mod 2**64) that is greater than or equal to
692/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
693template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
694 static_assert(Align != 0u, "Align must be non-zero");
695 return (Value + Align - 1) / Align * Align;
696}
697
698/// Returns the integer ceil(Numerator / Denominator).
699inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
700 return alignTo(Numerator, Denominator) / Denominator;
701}
702
703/// \c alignTo for contexts where a constant expression is required.
704/// \sa alignTo
705///
706/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
707template <uint64_t Align>
708struct AlignTo {
709 static_assert(Align != 0u, "Align must be non-zero");
710 template <uint64_t Value>
711 struct from_value {
712 static const uint64_t value = (Value + Align - 1) / Align * Align;
713 };
714};
715
716/// Returns the largest uint64_t less than or equal to \p Value and is
717/// \p Skew mod \p Align. \p Align must be non-zero
718inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
719 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 719, __PRETTY_FUNCTION__))
;
720 Skew %= Align;
721 return (Value - Skew) / Align * Align + Skew;
722}
723
724/// Returns the offset to the next integer (mod 2**64) that is greater than
725/// or equal to \p Value and is a multiple of \p Align. \p Align must be
726/// non-zero.
727inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
728 return alignTo(Value, Align) - Value;
729}
730
731/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
732/// Requires 0 < B <= 32.
733template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
734 static_assert(B > 0, "Bit width can't be 0.");
735 static_assert(B <= 32, "Bit width out of range.");
736 return int32_t(X << (32 - B)) >> (32 - B);
737}
738
739/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
740/// Requires 0 < B < 32.
741inline int32_t SignExtend32(uint32_t X, unsigned B) {
742 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 742, __PRETTY_FUNCTION__))
;
743 assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 743, __PRETTY_FUNCTION__))
;
744 return int32_t(X << (32 - B)) >> (32 - B);
745}
746
747/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
748/// Requires 0 < B < 64.
749template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
750 static_assert(B > 0, "Bit width can't be 0.");
751 static_assert(B <= 64, "Bit width out of range.");
752 return int64_t(x << (64 - B)) >> (64 - B);
753}
754
755/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
756/// Requires 0 < B < 64.
757inline int64_t SignExtend64(uint64_t X, unsigned B) {
758 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 758, __PRETTY_FUNCTION__))
;
759 assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/Support/MathExtras.h"
, 759, __PRETTY_FUNCTION__))
;
760 return int64_t(X << (64 - B)) >> (64 - B);
761}
762
763/// Subtract two unsigned integers, X and Y, of type T and return the absolute
764/// value of the result.
765template <typename T>
766typename std::enable_if<std::is_unsigned<T>::value, T>::type
767AbsoluteDifference(T X, T Y) {
768 return std::max(X, Y) - std::min(X, Y);
769}
770
771/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
772/// maximum representable value of T on overflow. ResultOverflowed indicates if
773/// the result is larger than the maximum representable value of type T.
774template <typename T>
775typename std::enable_if<std::is_unsigned<T>::value, T>::type
776SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
777 bool Dummy;
778 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
779 // Hacker's Delight, p. 29
780 T Z = X + Y;
781 Overflowed = (Z < X || Z < Y);
782 if (Overflowed)
783 return std::numeric_limits<T>::max();
784 else
785 return Z;
786}
787
788/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
789/// maximum representable value of T on overflow. ResultOverflowed indicates if
790/// the result is larger than the maximum representable value of type T.
791template <typename T>
792typename std::enable_if<std::is_unsigned<T>::value, T>::type
793SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
794 bool Dummy;
795 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
796
797 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
798 // because it fails for uint16_t (where multiplication can have undefined
799 // behavior due to promotion to int), and requires a division in addition
800 // to the multiplication.
801
802 Overflowed = false;
803
804 // Log2(Z) would be either Log2Z or Log2Z + 1.
805 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
806 // will necessarily be less than Log2Max as desired.
807 int Log2Z = Log2_64(X) + Log2_64(Y);
808 const T Max = std::numeric_limits<T>::max();
809 int Log2Max = Log2_64(Max);
810 if (Log2Z < Log2Max) {
811 return X * Y;
812 }
813 if (Log2Z > Log2Max) {
814 Overflowed = true;
815 return Max;
816 }
817
818 // We're going to use the top bit, and maybe overflow one
819 // bit past it. Multiply all but the bottom bit then add
820 // that on at the end.
821 T Z = (X >> 1) * Y;
822 if (Z & ~(Max >> 1)) {
823 Overflowed = true;
824 return Max;
825 }
826 Z <<= 1;
827 if (X & 1)
828 return SaturatingAdd(Z, Y, ResultOverflowed);
829
830 return Z;
831}
832
833/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
834/// the product. Clamp the result to the maximum representable value of T on
835/// overflow. ResultOverflowed indicates if the result is larger than the
836/// maximum representable value of type T.
837template <typename T>
838typename std::enable_if<std::is_unsigned<T>::value, T>::type
839SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
840 bool Dummy;
841 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
842
843 T Product = SaturatingMultiply(X, Y, &Overflowed);
844 if (Overflowed)
845 return Product;
846
847 return SaturatingAdd(A, Product, &Overflowed);
848}
849
850/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
851extern const float huge_valf;
852} // End llvm namespace
853
854#endif