Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/MC/LaneBitmask.h
Warning:line 86, column 34
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'llvm::LaneBitmask::Type'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CodeGenRegisters.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I utils/TableGen -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/utils/TableGen -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/utils/TableGen/CodeGenRegisters.cpp

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/utils/TableGen/CodeGenRegisters.cpp

1//===- CodeGenRegisters.cpp - Register and RegisterClass Info -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines structures to encapsulate information gleaned from the
10// target register and register class definitions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenRegisters.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/IntEqClasses.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/TableGen/Error.h"
29#include "llvm/TableGen/Record.h"
30#include <algorithm>
31#include <cassert>
32#include <cstdint>
33#include <iterator>
34#include <map>
35#include <queue>
36#include <set>
37#include <string>
38#include <tuple>
39#include <utility>
40#include <vector>
41
42using namespace llvm;
43
44#define DEBUG_TYPE"regalloc-emitter" "regalloc-emitter"
45
46//===----------------------------------------------------------------------===//
47// CodeGenSubRegIndex
48//===----------------------------------------------------------------------===//
49
50CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
51 : TheDef(R), EnumValue(Enum), AllSuperRegsCovered(true), Artificial(true) {
52 Name = std::string(R->getName());
53 if (R->getValue("Namespace"))
54 Namespace = std::string(R->getValueAsString("Namespace"));
55 Size = R->getValueAsInt("Size");
56 Offset = R->getValueAsInt("Offset");
57}
58
59CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
60 unsigned Enum)
61 : TheDef(nullptr), Name(std::string(N)), Namespace(std::string(Nspace)),
62 Size(-1), Offset(-1), EnumValue(Enum), AllSuperRegsCovered(true),
63 Artificial(true) {}
64
65std::string CodeGenSubRegIndex::getQualifiedName() const {
66 std::string N = getNamespace();
67 if (!N.empty())
68 N += "::";
69 N += getName();
70 return N;
71}
72
73void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
74 if (!TheDef)
75 return;
76
77 std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
78 if (!Comps.empty()) {
79 if (Comps.size() != 2)
80 PrintFatalError(TheDef->getLoc(),
81 "ComposedOf must have exactly two entries");
82 CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
83 CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
84 CodeGenSubRegIndex *X = A->addComposite(B, this);
85 if (X)
86 PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
87 }
88
89 std::vector<Record*> Parts =
90 TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
91 if (!Parts.empty()) {
92 if (Parts.size() < 2)
93 PrintFatalError(TheDef->getLoc(),
94 "CoveredBySubRegs must have two or more entries");
95 SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
96 for (Record *Part : Parts)
97 IdxParts.push_back(RegBank.getSubRegIdx(Part));
98 setConcatenationOf(IdxParts);
99 }
100}
101
102LaneBitmask CodeGenSubRegIndex::computeLaneMask() const {
103 // Already computed?
104 if (LaneMask.any())
105 return LaneMask;
106
107 // Recursion guard, shouldn't be required.
108 LaneMask = LaneBitmask::getAll();
109
110 // The lane mask is simply the union of all sub-indices.
111 LaneBitmask M;
112 for (const auto &C : Composed)
113 M |= C.second->computeLaneMask();
114 assert(M.any() && "Missing lane mask, sub-register cycle?")(static_cast <bool> (M.any() && "Missing lane mask, sub-register cycle?"
) ? void (0) : __assert_fail ("M.any() && \"Missing lane mask, sub-register cycle?\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 114, __extension__
__PRETTY_FUNCTION__))
;
115 LaneMask = M;
116 return LaneMask;
117}
118
119void CodeGenSubRegIndex::setConcatenationOf(
120 ArrayRef<CodeGenSubRegIndex*> Parts) {
121 if (ConcatenationOf.empty())
122 ConcatenationOf.assign(Parts.begin(), Parts.end());
123 else
124 assert(std::equal(Parts.begin(), Parts.end(),(static_cast <bool> (std::equal(Parts.begin(), Parts.end
(), ConcatenationOf.begin()) && "parts consistent") ?
void (0) : __assert_fail ("std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin()) && \"parts consistent\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 125, __extension__
__PRETTY_FUNCTION__))
125 ConcatenationOf.begin()) && "parts consistent")(static_cast <bool> (std::equal(Parts.begin(), Parts.end
(), ConcatenationOf.begin()) && "parts consistent") ?
void (0) : __assert_fail ("std::equal(Parts.begin(), Parts.end(), ConcatenationOf.begin()) && \"parts consistent\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 125, __extension__
__PRETTY_FUNCTION__))
;
126}
127
128void CodeGenSubRegIndex::computeConcatTransitiveClosure() {
129 for (SmallVectorImpl<CodeGenSubRegIndex*>::iterator
130 I = ConcatenationOf.begin(); I != ConcatenationOf.end(); /*empty*/) {
131 CodeGenSubRegIndex *SubIdx = *I;
132 SubIdx->computeConcatTransitiveClosure();
133#ifndef NDEBUG
134 for (CodeGenSubRegIndex *SRI : SubIdx->ConcatenationOf)
135 assert(SRI->ConcatenationOf.empty() && "No transitive closure?")(static_cast <bool> (SRI->ConcatenationOf.empty() &&
"No transitive closure?") ? void (0) : __assert_fail ("SRI->ConcatenationOf.empty() && \"No transitive closure?\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 135, __extension__
__PRETTY_FUNCTION__))
;
136#endif
137
138 if (SubIdx->ConcatenationOf.empty()) {
139 ++I;
140 } else {
141 I = ConcatenationOf.erase(I);
142 I = ConcatenationOf.insert(I, SubIdx->ConcatenationOf.begin(),
143 SubIdx->ConcatenationOf.end());
144 I += SubIdx->ConcatenationOf.size();
145 }
146 }
147}
148
149//===----------------------------------------------------------------------===//
150// CodeGenRegister
151//===----------------------------------------------------------------------===//
152
153CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
154 : TheDef(R), EnumValue(Enum),
155 CostPerUse(R->getValueAsListOfInts("CostPerUse")),
156 CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
157 HasDisjunctSubRegs(false), SubRegsComplete(false),
158 SuperRegsComplete(false), TopoSig(~0u) {
159 Artificial = R->getValueAsBit("isArtificial");
160}
161
162void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
163 std::vector<Record*> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
164 std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
165
166 if (SRIs.size() != SRs.size())
167 PrintFatalError(TheDef->getLoc(),
168 "SubRegs and SubRegIndices must have the same size");
169
170 for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
171 ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
172 ExplicitSubRegs.push_back(RegBank.getReg(SRs[i]));
173 }
174
175 // Also compute leading super-registers. Each register has a list of
176 // covered-by-subregs super-registers where it appears as the first explicit
177 // sub-register.
178 //
179 // This is used by computeSecondarySubRegs() to find candidates.
180 if (CoveredBySubRegs && !ExplicitSubRegs.empty())
181 ExplicitSubRegs.front()->LeadingSuperRegs.push_back(this);
182
183 // Add ad hoc alias links. This is a symmetric relationship between two
184 // registers, so build a symmetric graph by adding links in both ends.
185 std::vector<Record*> Aliases = TheDef->getValueAsListOfDefs("Aliases");
186 for (Record *Alias : Aliases) {
187 CodeGenRegister *Reg = RegBank.getReg(Alias);
188 ExplicitAliases.push_back(Reg);
189 Reg->ExplicitAliases.push_back(this);
190 }
191}
192
193StringRef CodeGenRegister::getName() const {
194 assert(TheDef && "no def")(static_cast <bool> (TheDef && "no def") ? void
(0) : __assert_fail ("TheDef && \"no def\"", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 return TheDef->getName();
196}
197
198namespace {
199
200// Iterate over all register units in a set of registers.
201class RegUnitIterator {
202 CodeGenRegister::Vec::const_iterator RegI, RegE;
203 CodeGenRegister::RegUnitList::iterator UnitI, UnitE;
204 static CodeGenRegister::RegUnitList Sentinel;
205
206public:
207 RegUnitIterator(const CodeGenRegister::Vec &Regs):
208 RegI(Regs.begin()), RegE(Regs.end()) {
209
210 if (RegI == RegE) {
211 UnitI = Sentinel.end();
212 UnitE = Sentinel.end();
213 } else {
214 UnitI = (*RegI)->getRegUnits().begin();
215 UnitE = (*RegI)->getRegUnits().end();
216 advance();
217 }
218 }
219
220 bool isValid() const { return UnitI != UnitE; }
221
222 unsigned operator* () const { assert(isValid())(static_cast <bool> (isValid()) ? void (0) : __assert_fail
("isValid()", "llvm/utils/TableGen/CodeGenRegisters.cpp", 222
, __extension__ __PRETTY_FUNCTION__))
; return *UnitI; }
223
224 const CodeGenRegister *getReg() const { assert(isValid())(static_cast <bool> (isValid()) ? void (0) : __assert_fail
("isValid()", "llvm/utils/TableGen/CodeGenRegisters.cpp", 224
, __extension__ __PRETTY_FUNCTION__))
; return *RegI; }
225
226 /// Preincrement. Move to the next unit.
227 void operator++() {
228 assert(isValid() && "Cannot advance beyond the last operand")(static_cast <bool> (isValid() && "Cannot advance beyond the last operand"
) ? void (0) : __assert_fail ("isValid() && \"Cannot advance beyond the last operand\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 228, __extension__
__PRETTY_FUNCTION__))
;
229 ++UnitI;
230 advance();
231 }
232
233protected:
234 void advance() {
235 while (UnitI == UnitE) {
236 if (++RegI == RegE)
237 break;
238 UnitI = (*RegI)->getRegUnits().begin();
239 UnitE = (*RegI)->getRegUnits().end();
240 }
241 }
242};
243
244CodeGenRegister::RegUnitList RegUnitIterator::Sentinel;
245
246} // end anonymous namespace
247
248// Return true of this unit appears in RegUnits.
249static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
250 return RegUnits.test(Unit);
251}
252
253// Inherit register units from subregisters.
254// Return true if the RegUnits changed.
255bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
256 bool changed = false;
257 for (const auto &SubReg : SubRegs) {
258 CodeGenRegister *SR = SubReg.second;
259 // Merge the subregister's units into this register's RegUnits.
260 changed |= (RegUnits |= SR->RegUnits);
261 }
262
263 return changed;
264}
265
266const CodeGenRegister::SubRegMap &
267CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
268 // Only compute this map once.
269 if (SubRegsComplete)
270 return SubRegs;
271 SubRegsComplete = true;
272
273 HasDisjunctSubRegs = ExplicitSubRegs.size() > 1;
274
275 // First insert the explicit subregs and make sure they are fully indexed.
276 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
277 CodeGenRegister *SR = ExplicitSubRegs[i];
278 CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
279 if (!SR->Artificial)
280 Idx->Artificial = false;
281 if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
282 PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
283 " appears twice in Register " + getName());
284 // Map explicit sub-registers first, so the names take precedence.
285 // The inherited sub-registers are mapped below.
286 SubReg2Idx.insert(std::make_pair(SR, Idx));
287 }
288
289 // Keep track of inherited subregs and how they can be reached.
290 SmallPtrSet<CodeGenRegister*, 8> Orphans;
291
292 // Clone inherited subregs and place duplicate entries in Orphans.
293 // Here the order is important - earlier subregs take precedence.
294 for (CodeGenRegister *ESR : ExplicitSubRegs) {
295 const SubRegMap &Map = ESR->computeSubRegs(RegBank);
296 HasDisjunctSubRegs |= ESR->HasDisjunctSubRegs;
297
298 for (const auto &SR : Map) {
299 if (!SubRegs.insert(SR).second)
300 Orphans.insert(SR.second);
301 }
302 }
303
304 // Expand any composed subreg indices.
305 // If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
306 // qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
307 // expanded subreg indices recursively.
308 SmallVector<CodeGenSubRegIndex*, 8> Indices = ExplicitSubRegIndices;
309 for (unsigned i = 0; i != Indices.size(); ++i) {
310 CodeGenSubRegIndex *Idx = Indices[i];
311 const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
312 CodeGenRegister *SR = SubRegs[Idx];
313 const SubRegMap &Map = SR->computeSubRegs(RegBank);
314
315 // Look at the possible compositions of Idx.
316 // They may not all be supported by SR.
317 for (auto Comp : Comps) {
318 SubRegMap::const_iterator SRI = Map.find(Comp.first);
319 if (SRI == Map.end())
320 continue; // Idx + I->first doesn't exist in SR.
321 // Add I->second as a name for the subreg SRI->second, assuming it is
322 // orphaned, and the name isn't already used for something else.
323 if (SubRegs.count(Comp.second) || !Orphans.erase(SRI->second))
324 continue;
325 // We found a new name for the orphaned sub-register.
326 SubRegs.insert(std::make_pair(Comp.second, SRI->second));
327 Indices.push_back(Comp.second);
328 }
329 }
330
331 // Now Orphans contains the inherited subregisters without a direct index.
332 // Create inferred indexes for all missing entries.
333 // Work backwards in the Indices vector in order to compose subregs bottom-up.
334 // Consider this subreg sequence:
335 //
336 // qsub_1 -> dsub_0 -> ssub_0
337 //
338 // The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
339 // can be reached in two different ways:
340 //
341 // qsub_1 -> ssub_0
342 // dsub_2 -> ssub_0
343 //
344 // We pick the latter composition because another register may have [dsub_0,
345 // dsub_1, dsub_2] subregs without necessarily having a qsub_1 subreg. The
346 // dsub_2 -> ssub_0 composition can be shared.
347 while (!Indices.empty() && !Orphans.empty()) {
348 CodeGenSubRegIndex *Idx = Indices.pop_back_val();
349 CodeGenRegister *SR = SubRegs[Idx];
350 const SubRegMap &Map = SR->computeSubRegs(RegBank);
351 for (const auto &SubReg : Map)
352 if (Orphans.erase(SubReg.second))
353 SubRegs[RegBank.getCompositeSubRegIndex(Idx, SubReg.first)] = SubReg.second;
354 }
355
356 // Compute the inverse SubReg -> Idx map.
357 for (const auto &SubReg : SubRegs) {
358 if (SubReg.second == this) {
359 ArrayRef<SMLoc> Loc;
360 if (TheDef)
361 Loc = TheDef->getLoc();
362 PrintFatalError(Loc, "Register " + getName() +
363 " has itself as a sub-register");
364 }
365
366 // Compute AllSuperRegsCovered.
367 if (!CoveredBySubRegs)
368 SubReg.first->AllSuperRegsCovered = false;
369
370 // Ensure that every sub-register has a unique name.
371 DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
372 SubReg2Idx.insert(std::make_pair(SubReg.second, SubReg.first)).first;
373 if (Ins->second == SubReg.first)
374 continue;
375 // Trouble: Two different names for SubReg.second.
376 ArrayRef<SMLoc> Loc;
377 if (TheDef)
378 Loc = TheDef->getLoc();
379 PrintFatalError(Loc, "Sub-register can't have two names: " +
380 SubReg.second->getName() + " available as " +
381 SubReg.first->getName() + " and " + Ins->second->getName());
382 }
383
384 // Derive possible names for sub-register concatenations from any explicit
385 // sub-registers. By doing this before computeSecondarySubRegs(), we ensure
386 // that getConcatSubRegIndex() won't invent any concatenated indices that the
387 // user already specified.
388 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
389 CodeGenRegister *SR = ExplicitSubRegs[i];
390 if (!SR->CoveredBySubRegs || SR->ExplicitSubRegs.size() <= 1 ||
391 SR->Artificial)
392 continue;
393
394 // SR is composed of multiple sub-regs. Find their names in this register.
395 SmallVector<CodeGenSubRegIndex*, 8> Parts;
396 for (unsigned j = 0, e = SR->ExplicitSubRegs.size(); j != e; ++j) {
397 CodeGenSubRegIndex &I = *SR->ExplicitSubRegIndices[j];
398 if (!I.Artificial)
399 Parts.push_back(getSubRegIndex(SR->ExplicitSubRegs[j]));
400 }
401
402 // Offer this as an existing spelling for the concatenation of Parts.
403 CodeGenSubRegIndex &Idx = *ExplicitSubRegIndices[i];
404 Idx.setConcatenationOf(Parts);
405 }
406
407 // Initialize RegUnitList. Because getSubRegs is called recursively, this
408 // processes the register hierarchy in postorder.
409 //
410 // Inherit all sub-register units. It is good enough to look at the explicit
411 // sub-registers, the other registers won't contribute any more units.
412 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
413 CodeGenRegister *SR = ExplicitSubRegs[i];
414 RegUnits |= SR->RegUnits;
415 }
416
417 // Absent any ad hoc aliasing, we create one register unit per leaf register.
418 // These units correspond to the maximal cliques in the register overlap
419 // graph which is optimal.
420 //
421 // When there is ad hoc aliasing, we simply create one unit per edge in the
422 // undirected ad hoc aliasing graph. Technically, we could do better by
423 // identifying maximal cliques in the ad hoc graph, but cliques larger than 2
424 // are extremely rare anyway (I've never seen one), so we don't bother with
425 // the added complexity.
426 for (unsigned i = 0, e = ExplicitAliases.size(); i != e; ++i) {
427 CodeGenRegister *AR = ExplicitAliases[i];
428 // Only visit each edge once.
429 if (AR->SubRegsComplete)
430 continue;
431 // Create a RegUnit representing this alias edge, and add it to both
432 // registers.
433 unsigned Unit = RegBank.newRegUnit(this, AR);
434 RegUnits.set(Unit);
435 AR->RegUnits.set(Unit);
436 }
437
438 // Finally, create units for leaf registers without ad hoc aliases. Note that
439 // a leaf register with ad hoc aliases doesn't get its own unit - it isn't
440 // necessary. This means the aliasing leaf registers can share a single unit.
441 if (RegUnits.empty())
442 RegUnits.set(RegBank.newRegUnit(this));
443
444 // We have now computed the native register units. More may be adopted later
445 // for balancing purposes.
446 NativeRegUnits = RegUnits;
447
448 return SubRegs;
449}
450
451// In a register that is covered by its sub-registers, try to find redundant
452// sub-registers. For example:
453//
454// QQ0 = {Q0, Q1}
455// Q0 = {D0, D1}
456// Q1 = {D2, D3}
457//
458// We can infer that D1_D2 is also a sub-register, even if it wasn't named in
459// the register definition.
460//
461// The explicitly specified registers form a tree. This function discovers
462// sub-register relationships that would force a DAG.
463//
464void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
465 SmallVector<SubRegMap::value_type, 8> NewSubRegs;
466
467 std::queue<std::pair<CodeGenSubRegIndex*,CodeGenRegister*>> SubRegQueue;
468 for (std::pair<CodeGenSubRegIndex*,CodeGenRegister*> P : SubRegs)
469 SubRegQueue.push(P);
470
471 // Look at the leading super-registers of each sub-register. Those are the
472 // candidates for new sub-registers, assuming they are fully contained in
473 // this register.
474 while (!SubRegQueue.empty()) {
475 CodeGenSubRegIndex *SubRegIdx;
476 const CodeGenRegister *SubReg;
477 std::tie(SubRegIdx, SubReg) = SubRegQueue.front();
478 SubRegQueue.pop();
479
480 const CodeGenRegister::SuperRegList &Leads = SubReg->LeadingSuperRegs;
481 for (unsigned i = 0, e = Leads.size(); i != e; ++i) {
482 CodeGenRegister *Cand = const_cast<CodeGenRegister*>(Leads[i]);
483 // Already got this sub-register?
484 if (Cand == this || getSubRegIndex(Cand))
485 continue;
486 // Check if each component of Cand is already a sub-register.
487 assert(!Cand->ExplicitSubRegs.empty() &&(static_cast <bool> (!Cand->ExplicitSubRegs.empty() &&
"Super-register has no sub-registers") ? void (0) : __assert_fail
("!Cand->ExplicitSubRegs.empty() && \"Super-register has no sub-registers\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 488, __extension__
__PRETTY_FUNCTION__))
488 "Super-register has no sub-registers")(static_cast <bool> (!Cand->ExplicitSubRegs.empty() &&
"Super-register has no sub-registers") ? void (0) : __assert_fail
("!Cand->ExplicitSubRegs.empty() && \"Super-register has no sub-registers\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 488, __extension__
__PRETTY_FUNCTION__))
;
489 if (Cand->ExplicitSubRegs.size() == 1)
490 continue;
491 SmallVector<CodeGenSubRegIndex*, 8> Parts;
492 // We know that the first component is (SubRegIdx,SubReg). However we
493 // may still need to split it into smaller subregister parts.
494 assert(Cand->ExplicitSubRegs[0] == SubReg && "LeadingSuperRegs correct")(static_cast <bool> (Cand->ExplicitSubRegs[0] == SubReg
&& "LeadingSuperRegs correct") ? void (0) : __assert_fail
("Cand->ExplicitSubRegs[0] == SubReg && \"LeadingSuperRegs correct\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 494, __extension__
__PRETTY_FUNCTION__))
;
495 assert(getSubRegIndex(SubReg) == SubRegIdx && "LeadingSuperRegs correct")(static_cast <bool> (getSubRegIndex(SubReg) == SubRegIdx
&& "LeadingSuperRegs correct") ? void (0) : __assert_fail
("getSubRegIndex(SubReg) == SubRegIdx && \"LeadingSuperRegs correct\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 495, __extension__
__PRETTY_FUNCTION__))
;
496 for (CodeGenRegister *SubReg : Cand->ExplicitSubRegs) {
497 if (CodeGenSubRegIndex *SubRegIdx = getSubRegIndex(SubReg)) {
498 if (SubRegIdx->ConcatenationOf.empty())
499 Parts.push_back(SubRegIdx);
500 else
501 append_range(Parts, SubRegIdx->ConcatenationOf);
502 } else {
503 // Sub-register doesn't exist.
504 Parts.clear();
505 break;
506 }
507 }
508 // There is nothing to do if some Cand sub-register is not part of this
509 // register.
510 if (Parts.empty())
511 continue;
512
513 // Each part of Cand is a sub-register of this. Make the full Cand also
514 // a sub-register with a concatenated sub-register index.
515 CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts);
516 std::pair<CodeGenSubRegIndex*,CodeGenRegister*> NewSubReg =
517 std::make_pair(Concat, Cand);
518
519 if (!SubRegs.insert(NewSubReg).second)
520 continue;
521
522 // We inserted a new subregister.
523 NewSubRegs.push_back(NewSubReg);
524 SubRegQueue.push(NewSubReg);
525 SubReg2Idx.insert(std::make_pair(Cand, Concat));
526 }
527 }
528
529 // Create sub-register index composition maps for the synthesized indices.
530 for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
531 CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
532 CodeGenRegister *NewSubReg = NewSubRegs[i].second;
533 for (auto SubReg : NewSubReg->SubRegs) {
534 CodeGenSubRegIndex *SubIdx = getSubRegIndex(SubReg.second);
535 if (!SubIdx)
536 PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
537 SubReg.second->getName() +
538 " in " + getName());
539 NewIdx->addComposite(SubReg.first, SubIdx);
540 }
541 }
542}
543
544void CodeGenRegister::computeSuperRegs(CodeGenRegBank &RegBank) {
545 // Only visit each register once.
546 if (SuperRegsComplete)
547 return;
548 SuperRegsComplete = true;
549
550 // Make sure all sub-registers have been visited first, so the super-reg
551 // lists will be topologically ordered.
552 for (auto SubReg : SubRegs)
553 SubReg.second->computeSuperRegs(RegBank);
554
555 // Now add this as a super-register on all sub-registers.
556 // Also compute the TopoSigId in post-order.
557 TopoSigId Id;
558 for (auto SubReg : SubRegs) {
559 // Topological signature computed from SubIdx, TopoId(SubReg).
560 // Loops and idempotent indices have TopoSig = ~0u.
561 Id.push_back(SubReg.first->EnumValue);
562 Id.push_back(SubReg.second->TopoSig);
563
564 // Don't add duplicate entries.
565 if (!SubReg.second->SuperRegs.empty() &&
566 SubReg.second->SuperRegs.back() == this)
567 continue;
568 SubReg.second->SuperRegs.push_back(this);
569 }
570 TopoSig = RegBank.getTopoSig(Id);
571}
572
573void
574CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
575 CodeGenRegBank &RegBank) const {
576 assert(SubRegsComplete && "Must precompute sub-registers")(static_cast <bool> (SubRegsComplete && "Must precompute sub-registers"
) ? void (0) : __assert_fail ("SubRegsComplete && \"Must precompute sub-registers\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 576, __extension__
__PRETTY_FUNCTION__))
;
577 for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
578 CodeGenRegister *SR = ExplicitSubRegs[i];
579 if (OSet.insert(SR))
580 SR->addSubRegsPreOrder(OSet, RegBank);
581 }
582 // Add any secondary sub-registers that weren't part of the explicit tree.
583 for (auto SubReg : SubRegs)
584 OSet.insert(SubReg.second);
585}
586
587// Get the sum of this register's unit weights.
588unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
589 unsigned Weight = 0;
590 for (unsigned RegUnit : RegUnits) {
591 Weight += RegBank.getRegUnit(RegUnit).Weight;
592 }
593 return Weight;
594}
595
596//===----------------------------------------------------------------------===//
597// RegisterTuples
598//===----------------------------------------------------------------------===//
599
600// A RegisterTuples def is used to generate pseudo-registers from lists of
601// sub-registers. We provide a SetTheory expander class that returns the new
602// registers.
603namespace {
604
605struct TupleExpander : SetTheory::Expander {
606 // Reference to SynthDefs in the containing CodeGenRegBank, to keep track of
607 // the synthesized definitions for their lifetime.
608 std::vector<std::unique_ptr<Record>> &SynthDefs;
609
610 TupleExpander(std::vector<std::unique_ptr<Record>> &SynthDefs)
611 : SynthDefs(SynthDefs) {}
612
613 void expand(SetTheory &ST, Record *Def, SetTheory::RecSet &Elts) override {
614 std::vector<Record*> Indices = Def->getValueAsListOfDefs("SubRegIndices");
615 unsigned Dim = Indices.size();
616 ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
617 if (Dim != SubRegs->size())
618 PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
619 if (Dim < 2)
620 PrintFatalError(Def->getLoc(),
621 "Tuples must have at least 2 sub-registers");
622
623 // Evaluate the sub-register lists to be zipped.
624 unsigned Length = ~0u;
625 SmallVector<SetTheory::RecSet, 4> Lists(Dim);
626 for (unsigned i = 0; i != Dim; ++i) {
627 ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
628 Length = std::min(Length, unsigned(Lists[i].size()));
629 }
630
631 if (Length == 0)
632 return;
633
634 // Precompute some types.
635 Record *RegisterCl = Def->getRecords().getClass("Register");
636 RecTy *RegisterRecTy = RecordRecTy::get(RegisterCl);
637 std::vector<StringRef> RegNames =
638 Def->getValueAsListOfStrings("RegAsmNames");
639
640 // Zip them up.
641 for (unsigned n = 0; n != Length; ++n) {
642 std::string Name;
643 Record *Proto = Lists[0][n];
644 std::vector<Init*> Tuple;
645 for (unsigned i = 0; i != Dim; ++i) {
646 Record *Reg = Lists[i][n];
647 if (i) Name += '_';
648 Name += Reg->getName();
649 Tuple.push_back(DefInit::get(Reg));
650 }
651
652 // Take the cost list of the first register in the tuple.
653 ListInit *CostList = Proto->getValueAsListInit("CostPerUse");
654 SmallVector<Init *, 2> CostPerUse;
655 CostPerUse.insert(CostPerUse.end(), CostList->begin(), CostList->end());
656
657 StringInit *AsmName = StringInit::get("");
658 if (!RegNames.empty()) {
659 if (RegNames.size() <= n)
660 PrintFatalError(Def->getLoc(),
661 "Register tuple definition missing name for '" +
662 Name + "'.");
663 AsmName = StringInit::get(RegNames[n]);
664 }
665
666 // Create a new Record representing the synthesized register. This record
667 // is only for consumption by CodeGenRegister, it is not added to the
668 // RecordKeeper.
669 SynthDefs.emplace_back(
670 std::make_unique<Record>(Name, Def->getLoc(), Def->getRecords()));
671 Record *NewReg = SynthDefs.back().get();
672 Elts.insert(NewReg);
673
674 // Copy Proto super-classes.
675 ArrayRef<std::pair<Record *, SMRange>> Supers = Proto->getSuperClasses();
676 for (const auto &SuperPair : Supers)
677 NewReg->addSuperClass(SuperPair.first, SuperPair.second);
678
679 // Copy Proto fields.
680 for (unsigned i = 0, e = Proto->getValues().size(); i != e; ++i) {
681 RecordVal RV = Proto->getValues()[i];
682
683 // Skip existing fields, like NAME.
684 if (NewReg->getValue(RV.getNameInit()))
685 continue;
686
687 StringRef Field = RV.getName();
688
689 // Replace the sub-register list with Tuple.
690 if (Field == "SubRegs")
691 RV.setValue(ListInit::get(Tuple, RegisterRecTy));
692
693 if (Field == "AsmName")
694 RV.setValue(AsmName);
695
696 // CostPerUse is aggregated from all Tuple members.
697 if (Field == "CostPerUse")
698 RV.setValue(ListInit::get(CostPerUse, CostList->getElementType()));
699
700 // Composite registers are always covered by sub-registers.
701 if (Field == "CoveredBySubRegs")
702 RV.setValue(BitInit::get(true));
703
704 // Copy fields from the RegisterTuples def.
705 if (Field == "SubRegIndices" ||
706 Field == "CompositeIndices") {
707 NewReg->addValue(*Def->getValue(Field));
708 continue;
709 }
710
711 // Some fields get their default uninitialized value.
712 if (Field == "DwarfNumbers" ||
713 Field == "DwarfAlias" ||
714 Field == "Aliases") {
715 if (const RecordVal *DefRV = RegisterCl->getValue(Field))
716 NewReg->addValue(*DefRV);
717 continue;
718 }
719
720 // Everything else is copied from Proto.
721 NewReg->addValue(RV);
722 }
723 }
724 }
725};
726
727} // end anonymous namespace
728
729//===----------------------------------------------------------------------===//
730// CodeGenRegisterClass
731//===----------------------------------------------------------------------===//
732
733static void sortAndUniqueRegisters(CodeGenRegister::Vec &M) {
734 llvm::sort(M, deref<std::less<>>());
735 M.erase(std::unique(M.begin(), M.end(), deref<std::equal_to<>>()), M.end());
736}
737
738CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
739 : TheDef(R), Name(std::string(R->getName())),
740 TopoSigs(RegBank.getNumTopoSigs()), EnumValue(-1), TSFlags(0) {
741 GeneratePressureSet = R->getValueAsBit("GeneratePressureSet");
742 std::vector<Record*> TypeList = R->getValueAsListOfDefs("RegTypes");
743 if (TypeList.empty())
744 PrintFatalError(R->getLoc(), "RegTypes list must not be empty!");
745 for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
746 Record *Type = TypeList[i];
747 if (!Type->isSubClassOf("ValueType"))
748 PrintFatalError(R->getLoc(),
749 "RegTypes list member '" + Type->getName() +
750 "' does not derive from the ValueType class!");
751 VTs.push_back(getValueTypeByHwMode(Type, RegBank.getHwModes()));
752 }
753
754 // Allocation order 0 is the full set. AltOrders provides others.
755 const SetTheory::RecVec *Elements = RegBank.getSets().expand(R);
756 ListInit *AltOrders = R->getValueAsListInit("AltOrders");
757 Orders.resize(1 + AltOrders->size());
758
759 // Default allocation order always contains all registers.
760 Artificial = true;
761 for (unsigned i = 0, e = Elements->size(); i != e; ++i) {
762 Orders[0].push_back((*Elements)[i]);
763 const CodeGenRegister *Reg = RegBank.getReg((*Elements)[i]);
764 Members.push_back(Reg);
765 Artificial &= Reg->Artificial;
766 TopoSigs.set(Reg->getTopoSig());
767 }
768 sortAndUniqueRegisters(Members);
769
770 // Alternative allocation orders may be subsets.
771 SetTheory::RecSet Order;
772 for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
773 RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
774 Orders[1 + i].append(Order.begin(), Order.end());
775 // Verify that all altorder members are regclass members.
776 while (!Order.empty()) {
777 CodeGenRegister *Reg = RegBank.getReg(Order.back());
778 Order.pop_back();
779 if (!contains(Reg))
780 PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
781 " is not a class member");
782 }
783 }
784
785 Namespace = R->getValueAsString("Namespace");
786
787 if (const RecordVal *RV = R->getValue("RegInfos"))
788 if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
789 RSI = RegSizeInfoByHwMode(DI->getDef(), RegBank.getHwModes());
790 unsigned Size = R->getValueAsInt("Size");
791 assert((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&(static_cast <bool> ((RSI.hasDefault() || Size != 0 || VTs
[0].isSimple()) && "Impossible to determine register size"
) ? void (0) : __assert_fail ("(RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) && \"Impossible to determine register size\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 792, __extension__
__PRETTY_FUNCTION__))
792 "Impossible to determine register size")(static_cast <bool> ((RSI.hasDefault() || Size != 0 || VTs
[0].isSimple()) && "Impossible to determine register size"
) ? void (0) : __assert_fail ("(RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) && \"Impossible to determine register size\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 792, __extension__
__PRETTY_FUNCTION__))
;
793 if (!RSI.hasDefault()) {
794 RegSizeInfo RI;
795 RI.RegSize = RI.SpillSize = Size ? Size
796 : VTs[0].getSimple().getSizeInBits();
797 RI.SpillAlignment = R->getValueAsInt("Alignment");
798 RSI.insertRegSizeForMode(DefaultMode, RI);
799 }
800
801 CopyCost = R->getValueAsInt("CopyCost");
802 Allocatable = R->getValueAsBit("isAllocatable");
803 AltOrderSelect = R->getValueAsString("AltOrderSelect");
804 int AllocationPriority = R->getValueAsInt("AllocationPriority");
805 if (AllocationPriority < 0 || AllocationPriority > 63)
806 PrintFatalError(R->getLoc(), "AllocationPriority out of range [0,63]");
807 this->AllocationPriority = AllocationPriority;
808
809 BitsInit *TSF = R->getValueAsBitsInit("TSFlags");
810 for (unsigned I = 0, E = TSF->getNumBits(); I != E; ++I) {
811 BitInit *Bit = cast<BitInit>(TSF->getBit(I));
812 TSFlags |= uint8_t(Bit->getValue()) << I;
813 }
814}
815
816// Create an inferred register class that was missing from the .td files.
817// Most properties will be inherited from the closest super-class after the
818// class structure has been computed.
819CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
820 StringRef Name, Key Props)
821 : Members(*Props.Members), TheDef(nullptr), Name(std::string(Name)),
822 TopoSigs(RegBank.getNumTopoSigs()), EnumValue(-1), RSI(Props.RSI),
823 CopyCost(0), Allocatable(true), AllocationPriority(0), TSFlags(0) {
824 Artificial = true;
825 GeneratePressureSet = false;
826 for (const auto R : Members) {
827 TopoSigs.set(R->getTopoSig());
828 Artificial &= R->Artificial;
829 }
830}
831
832// Compute inherited propertied for a synthesized register class.
833void CodeGenRegisterClass::inheritProperties(CodeGenRegBank &RegBank) {
834 assert(!getDef() && "Only synthesized classes can inherit properties")(static_cast <bool> (!getDef() && "Only synthesized classes can inherit properties"
) ? void (0) : __assert_fail ("!getDef() && \"Only synthesized classes can inherit properties\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 834, __extension__
__PRETTY_FUNCTION__))
;
835 assert(!SuperClasses.empty() && "Synthesized class without super class")(static_cast <bool> (!SuperClasses.empty() && "Synthesized class without super class"
) ? void (0) : __assert_fail ("!SuperClasses.empty() && \"Synthesized class without super class\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 835, __extension__
__PRETTY_FUNCTION__))
;
836
837 // The last super-class is the smallest one.
838 CodeGenRegisterClass &Super = *SuperClasses.back();
839
840 // Most properties are copied directly.
841 // Exceptions are members, size, and alignment
842 Namespace = Super.Namespace;
843 VTs = Super.VTs;
844 CopyCost = Super.CopyCost;
845 // Check for allocatable superclasses.
846 Allocatable = any_of(SuperClasses, [&](const CodeGenRegisterClass *S) {
847 return S->Allocatable;
848 });
849 AltOrderSelect = Super.AltOrderSelect;
850 AllocationPriority = Super.AllocationPriority;
851 TSFlags = Super.TSFlags;
852 GeneratePressureSet |= Super.GeneratePressureSet;
853
854 // Copy all allocation orders, filter out foreign registers from the larger
855 // super-class.
856 Orders.resize(Super.Orders.size());
857 for (unsigned i = 0, ie = Super.Orders.size(); i != ie; ++i)
858 for (unsigned j = 0, je = Super.Orders[i].size(); j != je; ++j)
859 if (contains(RegBank.getReg(Super.Orders[i][j])))
860 Orders[i].push_back(Super.Orders[i][j]);
861}
862
863bool CodeGenRegisterClass::contains(const CodeGenRegister *Reg) const {
864 return std::binary_search(Members.begin(), Members.end(), Reg,
865 deref<std::less<>>());
866}
867
868unsigned CodeGenRegisterClass::getWeight(const CodeGenRegBank& RegBank) const {
869 if (TheDef && !TheDef->isValueUnset("Weight"))
870 return TheDef->getValueAsInt("Weight");
871
872 if (Members.empty() || Artificial)
873 return 0;
874
875 return (*Members.begin())->getWeight(RegBank);
876}
877
878namespace llvm {
879
880 raw_ostream &operator<<(raw_ostream &OS, const CodeGenRegisterClass::Key &K) {
881 OS << "{ " << K.RSI;
882 for (const auto R : *K.Members)
883 OS << ", " << R->getName();
884 return OS << " }";
885 }
886
887} // end namespace llvm
888
889// This is a simple lexicographical order that can be used to search for sets.
890// It is not the same as the topological order provided by TopoOrderRC.
891bool CodeGenRegisterClass::Key::
892operator<(const CodeGenRegisterClass::Key &B) const {
893 assert(Members && B.Members)(static_cast <bool> (Members && B.Members) ? void
(0) : __assert_fail ("Members && B.Members", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 893, __extension__ __PRETTY_FUNCTION__))
;
894 return std::tie(*Members, RSI) < std::tie(*B.Members, B.RSI);
895}
896
897// Returns true if RC is a strict subclass.
898// RC is a sub-class of this class if it is a valid replacement for any
899// instruction operand where a register of this classis required. It must
900// satisfy these conditions:
901//
902// 1. All RC registers are also in this.
903// 2. The RC spill size must not be smaller than our spill size.
904// 3. RC spill alignment must be compatible with ours.
905//
906static bool testSubClass(const CodeGenRegisterClass *A,
907 const CodeGenRegisterClass *B) {
908 return A->RSI.isSubClassOf(B->RSI) &&
909 std::includes(A->getMembers().begin(), A->getMembers().end(),
910 B->getMembers().begin(), B->getMembers().end(),
911 deref<std::less<>>());
912}
913
914/// Sorting predicate for register classes. This provides a topological
915/// ordering that arranges all register classes before their sub-classes.
916///
917/// Register classes with the same registers, spill size, and alignment form a
918/// clique. They will be ordered alphabetically.
919///
920static bool TopoOrderRC(const CodeGenRegisterClass &PA,
921 const CodeGenRegisterClass &PB) {
922 auto *A = &PA;
923 auto *B = &PB;
924 if (A == B)
925 return false;
926
927 if (A->RSI < B->RSI)
928 return true;
929 if (A->RSI != B->RSI)
930 return false;
931
932 // Order by descending set size. Note that the classes' allocation order may
933 // not have been computed yet. The Members set is always vaild.
934 if (A->getMembers().size() > B->getMembers().size())
935 return true;
936 if (A->getMembers().size() < B->getMembers().size())
937 return false;
938
939 // Finally order by name as a tie breaker.
940 return StringRef(A->getName()) < B->getName();
941}
942
943std::string CodeGenRegisterClass::getQualifiedName() const {
944 if (Namespace.empty())
945 return getName();
946 else
947 return (Namespace + "::" + getName()).str();
948}
949
950// Compute sub-classes of all register classes.
951// Assume the classes are ordered topologically.
952void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
953 auto &RegClasses = RegBank.getRegClasses();
954
955 // Visit backwards so sub-classes are seen first.
956 for (auto I = RegClasses.rbegin(), E = RegClasses.rend(); I != E; ++I) {
957 CodeGenRegisterClass &RC = *I;
958 RC.SubClasses.resize(RegClasses.size());
959 RC.SubClasses.set(RC.EnumValue);
960 if (RC.Artificial)
961 continue;
962
963 // Normally, all subclasses have IDs >= rci, unless RC is part of a clique.
964 for (auto I2 = I.base(), E2 = RegClasses.end(); I2 != E2; ++I2) {
965 CodeGenRegisterClass &SubRC = *I2;
966 if (RC.SubClasses.test(SubRC.EnumValue))
967 continue;
968 if (!testSubClass(&RC, &SubRC))
969 continue;
970 // SubRC is a sub-class. Grap all its sub-classes so we won't have to
971 // check them again.
972 RC.SubClasses |= SubRC.SubClasses;
973 }
974
975 // Sweep up missed clique members. They will be immediately preceding RC.
976 for (auto I2 = std::next(I); I2 != E && testSubClass(&RC, &*I2); ++I2)
977 RC.SubClasses.set(I2->EnumValue);
978 }
979
980 // Compute the SuperClasses lists from the SubClasses vectors.
981 for (auto &RC : RegClasses) {
982 const BitVector &SC = RC.getSubClasses();
983 auto I = RegClasses.begin();
984 for (int s = 0, next_s = SC.find_first(); next_s != -1;
985 next_s = SC.find_next(s)) {
986 std::advance(I, next_s - s);
987 s = next_s;
988 if (&*I == &RC)
989 continue;
990 I->SuperClasses.push_back(&RC);
991 }
992 }
993
994 // With the class hierarchy in place, let synthesized register classes inherit
995 // properties from their closest super-class. The iteration order here can
996 // propagate properties down multiple levels.
997 for (auto &RC : RegClasses)
998 if (!RC.getDef())
999 RC.inheritProperties(RegBank);
1000}
1001
1002Optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
1003CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
1004 CodeGenRegBank &RegBank, const CodeGenSubRegIndex *SubIdx) const {
1005 auto SizeOrder = [this](const CodeGenRegisterClass *A,
1006 const CodeGenRegisterClass *B) {
1007 // If there are multiple, identical register classes, prefer the original
1008 // register class.
1009 if (A == B)
1010 return false;
1011 if (A->getMembers().size() == B->getMembers().size())
1012 return A == this;
1013 return A->getMembers().size() > B->getMembers().size();
1014 };
1015
1016 auto &RegClasses = RegBank.getRegClasses();
1017
1018 // Find all the subclasses of this one that fully support the sub-register
1019 // index and order them by size. BiggestSuperRC should always be first.
1020 CodeGenRegisterClass *BiggestSuperRegRC = getSubClassWithSubReg(SubIdx);
1021 if (!BiggestSuperRegRC)
1022 return None;
1023 BitVector SuperRegRCsBV = BiggestSuperRegRC->getSubClasses();
1024 std::vector<CodeGenRegisterClass *> SuperRegRCs;
1025 for (auto &RC : RegClasses)
1026 if (SuperRegRCsBV[RC.EnumValue])
1027 SuperRegRCs.emplace_back(&RC);
1028 llvm::stable_sort(SuperRegRCs, SizeOrder);
1029
1030 assert(SuperRegRCs.front() == BiggestSuperRegRC &&(static_cast <bool> (SuperRegRCs.front() == BiggestSuperRegRC
&& "Biggest class wasn't first") ? void (0) : __assert_fail
("SuperRegRCs.front() == BiggestSuperRegRC && \"Biggest class wasn't first\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1031, __extension__
__PRETTY_FUNCTION__))
1031 "Biggest class wasn't first")(static_cast <bool> (SuperRegRCs.front() == BiggestSuperRegRC
&& "Biggest class wasn't first") ? void (0) : __assert_fail
("SuperRegRCs.front() == BiggestSuperRegRC && \"Biggest class wasn't first\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1031, __extension__
__PRETTY_FUNCTION__))
;
1032
1033 // Find all the subreg classes and order them by size too.
1034 std::vector<std::pair<CodeGenRegisterClass *, BitVector>> SuperRegClasses;
1035 for (auto &RC: RegClasses) {
1036 BitVector SuperRegClassesBV(RegClasses.size());
1037 RC.getSuperRegClasses(SubIdx, SuperRegClassesBV);
1038 if (SuperRegClassesBV.any())
1039 SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
1040 }
1041 llvm::sort(SuperRegClasses,
1042 [&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
1043 const std::pair<CodeGenRegisterClass *, BitVector> &B) {
1044 return SizeOrder(A.first, B.first);
1045 });
1046
1047 // Find the biggest subclass and subreg class such that R:subidx is in the
1048 // subreg class for all R in subclass.
1049 //
1050 // For example:
1051 // All registers in X86's GR64 have a sub_32bit subregister but no class
1052 // exists that contains all the 32-bit subregisters because GR64 contains RIP
1053 // but GR32 does not contain EIP. Instead, we constrain SuperRegRC to
1054 // GR32_with_sub_8bit (which is identical to GR32_with_sub_32bit) and then,
1055 // having excluded RIP, we are able to find a SubRegRC (GR32).
1056 CodeGenRegisterClass *ChosenSuperRegClass = nullptr;
1057 CodeGenRegisterClass *SubRegRC = nullptr;
1058 for (auto *SuperRegRC : SuperRegRCs) {
1059 for (const auto &SuperRegClassPair : SuperRegClasses) {
1060 const BitVector &SuperRegClassBV = SuperRegClassPair.second;
1061 if (SuperRegClassBV[SuperRegRC->EnumValue]) {
1062 SubRegRC = SuperRegClassPair.first;
1063 ChosenSuperRegClass = SuperRegRC;
1064
1065 // If SubRegRC is bigger than SuperRegRC then there are members of
1066 // SubRegRC that don't have super registers via SubIdx. Keep looking to
1067 // find a better fit and fall back on this one if there isn't one.
1068 //
1069 // This is intended to prevent X86 from making odd choices such as
1070 // picking LOW32_ADDR_ACCESS_RBP instead of GR32 in the example above.
1071 // LOW32_ADDR_ACCESS_RBP is a valid choice but contains registers that
1072 // aren't subregisters of SuperRegRC whereas GR32 has a direct 1:1
1073 // mapping.
1074 if (SuperRegRC->getMembers().size() >= SubRegRC->getMembers().size())
1075 return std::make_pair(ChosenSuperRegClass, SubRegRC);
1076 }
1077 }
1078
1079 // If we found a fit but it wasn't quite ideal because SubRegRC had excess
1080 // registers, then we're done.
1081 if (ChosenSuperRegClass)
1082 return std::make_pair(ChosenSuperRegClass, SubRegRC);
1083 }
1084
1085 return None;
1086}
1087
1088void CodeGenRegisterClass::getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
1089 BitVector &Out) const {
1090 auto FindI = SuperRegClasses.find(SubIdx);
1091 if (FindI == SuperRegClasses.end())
1092 return;
1093 for (CodeGenRegisterClass *RC : FindI->second)
1094 Out.set(RC->EnumValue);
1095}
1096
1097// Populate a unique sorted list of units from a register set.
1098void CodeGenRegisterClass::buildRegUnitSet(const CodeGenRegBank &RegBank,
1099 std::vector<unsigned> &RegUnits) const {
1100 std::vector<unsigned> TmpUnits;
1101 for (RegUnitIterator UnitI(Members); UnitI.isValid(); ++UnitI) {
1102 const RegUnit &RU = RegBank.getRegUnit(*UnitI);
1103 if (!RU.Artificial)
1104 TmpUnits.push_back(*UnitI);
1105 }
1106 llvm::sort(TmpUnits);
1107 std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
1108 std::back_inserter(RegUnits));
1109}
1110
1111//===----------------------------------------------------------------------===//
1112// CodeGenRegisterCategory
1113//===----------------------------------------------------------------------===//
1114
1115CodeGenRegisterCategory::CodeGenRegisterCategory(CodeGenRegBank &RegBank,
1116 Record *R)
1117 : TheDef(R), Name(std::string(R->getName())) {
1118 for (Record *RegClass : R->getValueAsListOfDefs("Classes"))
1119 Classes.push_back(RegBank.getRegClass(RegClass));
1120}
1121
1122//===----------------------------------------------------------------------===//
1123// CodeGenRegBank
1124//===----------------------------------------------------------------------===//
1125
1126CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
1127 const CodeGenHwModes &Modes) : CGH(Modes) {
1128 // Configure register Sets to understand register classes and tuples.
1129 Sets.addFieldExpander("RegisterClass", "MemberList");
1130 Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
1131 Sets.addExpander("RegisterTuples",
1132 std::make_unique<TupleExpander>(SynthDefs));
1133
1134 // Read in the user-defined (named) sub-register indices.
1135 // More indices will be synthesized later.
1136 std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
1137 llvm::sort(SRIs, LessRecord());
1138 for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
1139 getSubRegIdx(SRIs[i]);
1140 // Build composite maps from ComposedOf fields.
1141 for (auto &Idx : SubRegIndices)
1142 Idx.updateComponents(*this);
1143
1144 // Read in the register definitions.
1145 std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
1146 llvm::sort(Regs, LessRecordRegister());
1147 // Assign the enumeration values.
1148 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1149 getReg(Regs[i]);
1150
1151 // Expand tuples and number the new registers.
1152 std::vector<Record*> Tups =
1153 Records.getAllDerivedDefinitions("RegisterTuples");
1154
1155 for (Record *R : Tups) {
1156 std::vector<Record *> TupRegs = *Sets.expand(R);
1157 llvm::sort(TupRegs, LessRecordRegister());
1158 for (Record *RC : TupRegs)
1159 getReg(RC);
1160 }
1161
1162 // Now all the registers are known. Build the object graph of explicit
1163 // register-register references.
1164 for (auto &Reg : Registers)
1165 Reg.buildObjectGraph(*this);
1166
1167 // Compute register name map.
1168 for (auto &Reg : Registers)
1169 // FIXME: This could just be RegistersByName[name] = register, except that
1170 // causes some failures in MIPS - perhaps they have duplicate register name
1171 // entries? (or maybe there's a reason for it - I don't know much about this
1172 // code, just drive-by refactoring)
1173 RegistersByName.insert(
1174 std::make_pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
1175
1176 // Precompute all sub-register maps.
1177 // This will create Composite entries for all inferred sub-register indices.
1178 for (auto &Reg : Registers)
1179 Reg.computeSubRegs(*this);
1180
1181 // Compute transitive closure of subregister index ConcatenationOf vectors
1182 // and initialize ConcatIdx map.
1183 for (CodeGenSubRegIndex &SRI : SubRegIndices) {
1184 SRI.computeConcatTransitiveClosure();
1185 if (!SRI.ConcatenationOf.empty())
1186 ConcatIdx.insert(std::make_pair(
1187 SmallVector<CodeGenSubRegIndex*,8>(SRI.ConcatenationOf.begin(),
1188 SRI.ConcatenationOf.end()), &SRI));
1189 }
1190
1191 // Infer even more sub-registers by combining leading super-registers.
1192 for (auto &Reg : Registers)
1193 if (Reg.CoveredBySubRegs)
1194 Reg.computeSecondarySubRegs(*this);
1195
1196 // After the sub-register graph is complete, compute the topologically
1197 // ordered SuperRegs list.
1198 for (auto &Reg : Registers)
1199 Reg.computeSuperRegs(*this);
1200
1201 // For each pair of Reg:SR, if both are non-artificial, mark the
1202 // corresponding sub-register index as non-artificial.
1203 for (auto &Reg : Registers) {
1204 if (Reg.Artificial)
1205 continue;
1206 for (auto P : Reg.getSubRegs()) {
1207 const CodeGenRegister *SR = P.second;
1208 if (!SR->Artificial)
1209 P.first->Artificial = false;
1210 }
1211 }
1212
1213 // Native register units are associated with a leaf register. They've all been
1214 // discovered now.
1215 NumNativeRegUnits = RegUnits.size();
1216
1217 // Read in register class definitions.
1218 std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
1219 if (RCs.empty())
1220 PrintFatalError("No 'RegisterClass' subclasses defined!");
1221
1222 // Allocate user-defined register classes.
1223 for (auto *R : RCs) {
1224 RegClasses.emplace_back(*this, R);
1225 CodeGenRegisterClass &RC = RegClasses.back();
1226 if (!RC.Artificial)
1227 addToMaps(&RC);
1228 }
1229
1230 // Infer missing classes to create a full algebra.
1231 computeInferredRegisterClasses();
1232
1233 // Order register classes topologically and assign enum values.
1234 RegClasses.sort(TopoOrderRC);
1235 unsigned i = 0;
1236 for (auto &RC : RegClasses)
1237 RC.EnumValue = i++;
1238 CodeGenRegisterClass::computeSubClasses(*this);
1239
1240 // Read in the register category definitions.
1241 std::vector<Record *> RCats =
1242 Records.getAllDerivedDefinitions("RegisterCategory");
1243 for (auto *R : RCats)
1244 RegCategories.emplace_back(*this, R);
1245}
1246
1247// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
1248CodeGenSubRegIndex*
1249CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
1250 SubRegIndices.emplace_back(Name, Namespace, SubRegIndices.size() + 1);
1251 return &SubRegIndices.back();
1252}
1253
1254CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
1255 CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
1256 if (Idx)
1257 return Idx;
1258 SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1);
1259 Idx = &SubRegIndices.back();
1260 return Idx;
1261}
1262
1263const CodeGenSubRegIndex *
1264CodeGenRegBank::findSubRegIdx(const Record* Def) const {
1265 return Def2SubRegIdx.lookup(Def);
1266}
1267
1268CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
1269 CodeGenRegister *&Reg = Def2Reg[Def];
1270 if (Reg)
1271 return Reg;
1272 Registers.emplace_back(Def, Registers.size() + 1);
1273 Reg = &Registers.back();
1274 return Reg;
1275}
1276
1277void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
1278 if (Record *Def = RC->getDef())
1279 Def2RC.insert(std::make_pair(Def, RC));
1280
1281 // Duplicate classes are rejected by insert().
1282 // That's OK, we only care about the properties handled by CGRC::Key.
1283 CodeGenRegisterClass::Key K(*RC);
1284 Key2RC.insert(std::make_pair(K, RC));
1285}
1286
1287// Create a synthetic sub-class if it is missing.
1288CodeGenRegisterClass*
1289CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
1290 const CodeGenRegister::Vec *Members,
1291 StringRef Name) {
1292 // Synthetic sub-class has the same size and alignment as RC.
1293 CodeGenRegisterClass::Key K(Members, RC->RSI);
1294 RCKeyMap::const_iterator FoundI = Key2RC.find(K);
1295 if (FoundI != Key2RC.end())
1296 return FoundI->second;
1297
1298 // Sub-class doesn't exist, create a new one.
1299 RegClasses.emplace_back(*this, Name, K);
1300 addToMaps(&RegClasses.back());
1301 return &RegClasses.back();
1302}
1303
1304CodeGenRegisterClass *CodeGenRegBank::getRegClass(const Record *Def) const {
1305 if (CodeGenRegisterClass *RC = Def2RC.lookup(Def))
1306 return RC;
1307
1308 PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
1309}
1310
1311CodeGenSubRegIndex*
1312CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
1313 CodeGenSubRegIndex *B) {
1314 // Look for an existing entry.
1315 CodeGenSubRegIndex *Comp = A->compose(B);
1316 if (Comp)
1317 return Comp;
1318
1319 // None exists, synthesize one.
1320 std::string Name = A->getName() + "_then_" + B->getName();
1321 Comp = createSubRegIndex(Name, A->getNamespace());
1322 A->addComposite(B, Comp);
1323 return Comp;
1324}
1325
1326CodeGenSubRegIndex *CodeGenRegBank::
1327getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
1328 assert(Parts.size() > 1 && "Need two parts to concatenate")(static_cast <bool> (Parts.size() > 1 && "Need two parts to concatenate"
) ? void (0) : __assert_fail ("Parts.size() > 1 && \"Need two parts to concatenate\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1328, __extension__
__PRETTY_FUNCTION__))
;
1329#ifndef NDEBUG
1330 for (CodeGenSubRegIndex *Idx : Parts) {
1331 assert(Idx->ConcatenationOf.empty() && "No transitive closure?")(static_cast <bool> (Idx->ConcatenationOf.empty() &&
"No transitive closure?") ? void (0) : __assert_fail ("Idx->ConcatenationOf.empty() && \"No transitive closure?\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1331, __extension__
__PRETTY_FUNCTION__))
;
1332 }
1333#endif
1334
1335 // Look for an existing entry.
1336 CodeGenSubRegIndex *&Idx = ConcatIdx[Parts];
1337 if (Idx)
1338 return Idx;
1339
1340 // None exists, synthesize one.
1341 std::string Name = Parts.front()->getName();
1342 // Determine whether all parts are contiguous.
1343 bool isContinuous = true;
1344 unsigned Size = Parts.front()->Size;
1345 unsigned LastOffset = Parts.front()->Offset;
1346 unsigned LastSize = Parts.front()->Size;
1347 for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
1348 Name += '_';
1349 Name += Parts[i]->getName();
1350 Size += Parts[i]->Size;
1351 if (Parts[i]->Offset != (LastOffset + LastSize))
1352 isContinuous = false;
1353 LastOffset = Parts[i]->Offset;
1354 LastSize = Parts[i]->Size;
1355 }
1356 Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
1357 Idx->Size = Size;
1358 Idx->Offset = isContinuous ? Parts.front()->Offset : -1;
1359 Idx->ConcatenationOf.assign(Parts.begin(), Parts.end());
1360 return Idx;
1361}
1362
1363void CodeGenRegBank::computeComposites() {
1364 using RegMap = std::map<const CodeGenRegister*, const CodeGenRegister*>;
1365
1366 // Subreg -> { Reg->Reg }, where the right-hand side is the mapping from
1367 // register to (sub)register associated with the action of the left-hand
1368 // side subregister.
1369 std::map<const CodeGenSubRegIndex*, RegMap> SubRegAction;
1370 for (const CodeGenRegister &R : Registers) {
1371 const CodeGenRegister::SubRegMap &SM = R.getSubRegs();
1372 for (std::pair<const CodeGenSubRegIndex*, const CodeGenRegister*> P : SM)
1373 SubRegAction[P.first].insert({&R, P.second});
1374 }
1375
1376 // Calculate the composition of two subregisters as compositions of their
1377 // associated actions.
1378 auto compose = [&SubRegAction] (const CodeGenSubRegIndex *Sub1,
1379 const CodeGenSubRegIndex *Sub2) {
1380 RegMap C;
1381 const RegMap &Img1 = SubRegAction.at(Sub1);
1382 const RegMap &Img2 = SubRegAction.at(Sub2);
1383 for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Img1) {
1384 auto F = Img2.find(P.second);
1385 if (F != Img2.end())
1386 C.insert({P.first, F->second});
1387 }
1388 return C;
1389 };
1390
1391 // Check if the two maps agree on the intersection of their domains.
1392 auto agree = [] (const RegMap &Map1, const RegMap &Map2) {
1393 // Technically speaking, an empty map agrees with any other map, but
1394 // this could flag false positives. We're interested in non-vacuous
1395 // agreements.
1396 if (Map1.empty() || Map2.empty())
1397 return false;
1398 for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Map1) {
1399 auto F = Map2.find(P.first);
1400 if (F == Map2.end() || P.second != F->second)
1401 return false;
1402 }
1403 return true;
1404 };
1405
1406 using CompositePair = std::pair<const CodeGenSubRegIndex*,
1407 const CodeGenSubRegIndex*>;
1408 SmallSet<CompositePair,4> UserDefined;
1409 for (const CodeGenSubRegIndex &Idx : SubRegIndices)
1410 for (auto P : Idx.getComposites())
1411 UserDefined.insert(std::make_pair(&Idx, P.first));
1412
1413 // Keep track of TopoSigs visited. We only need to visit each TopoSig once,
1414 // and many registers will share TopoSigs on regular architectures.
1415 BitVector TopoSigs(getNumTopoSigs());
1416
1417 for (const auto &Reg1 : Registers) {
1418 // Skip identical subreg structures already processed.
1419 if (TopoSigs.test(Reg1.getTopoSig()))
1420 continue;
1421 TopoSigs.set(Reg1.getTopoSig());
1422
1423 const CodeGenRegister::SubRegMap &SRM1 = Reg1.getSubRegs();
1424 for (auto I1 : SRM1) {
1425 CodeGenSubRegIndex *Idx1 = I1.first;
1426 CodeGenRegister *Reg2 = I1.second;
1427 // Ignore identity compositions.
1428 if (&Reg1 == Reg2)
1429 continue;
1430 const CodeGenRegister::SubRegMap &SRM2 = Reg2->getSubRegs();
1431 // Try composing Idx1 with another SubRegIndex.
1432 for (auto I2 : SRM2) {
1433 CodeGenSubRegIndex *Idx2 = I2.first;
1434 CodeGenRegister *Reg3 = I2.second;
1435 // Ignore identity compositions.
1436 if (Reg2 == Reg3)
1437 continue;
1438 // OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
1439 CodeGenSubRegIndex *Idx3 = Reg1.getSubRegIndex(Reg3);
1440 assert(Idx3 && "Sub-register doesn't have an index")(static_cast <bool> (Idx3 && "Sub-register doesn't have an index"
) ? void (0) : __assert_fail ("Idx3 && \"Sub-register doesn't have an index\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1440, __extension__
__PRETTY_FUNCTION__))
;
1441
1442 // Conflicting composition? Emit a warning but allow it.
1443 if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3)) {
1444 // If the composition was not user-defined, always emit a warning.
1445 if (!UserDefined.count({Idx1, Idx2}) ||
1446 agree(compose(Idx1, Idx2), SubRegAction.at(Idx3)))
1447 PrintWarning(Twine("SubRegIndex ") + Idx1->getQualifiedName() +
1448 " and " + Idx2->getQualifiedName() +
1449 " compose ambiguously as " + Prev->getQualifiedName() +
1450 " or " + Idx3->getQualifiedName());
1451 }
1452 }
1453 }
1454 }
1455}
1456
1457// Compute lane masks. This is similar to register units, but at the
1458// sub-register index level. Each bit in the lane mask is like a register unit
1459// class, and two lane masks will have a bit in common if two sub-register
1460// indices overlap in some register.
1461//
1462// Conservatively share a lane mask bit if two sub-register indices overlap in
1463// some registers, but not in others. That shouldn't happen a lot.
1464void CodeGenRegBank::computeSubRegLaneMasks() {
1465 // First assign individual bits to all the leaf indices.
1466 unsigned Bit = 0;
1467 // Determine mask of lanes that cover their registers.
1468 CoveringLanes = LaneBitmask::getAll();
1469 for (auto &Idx : SubRegIndices) {
1470 if (Idx.getComposites().empty()) {
1471 if (Bit > LaneBitmask::BitWidth) {
1472 PrintFatalError(
1473 Twine("Ran out of lanemask bits to represent subregister ")
1474 + Idx.getName());
1475 }
1476 Idx.LaneMask = LaneBitmask::getLane(Bit);
1477 ++Bit;
1478 } else {
1479 Idx.LaneMask = LaneBitmask::getNone();
1480 }
1481 }
1482
1483 // Compute transformation sequences for composeSubRegIndexLaneMask. The idea
1484 // here is that for each possible target subregister we look at the leafs
1485 // in the subregister graph that compose for this target and create
1486 // transformation sequences for the lanemasks. Each step in the sequence
1487 // consists of a bitmask and a bitrotate operation. As the rotation amounts
1488 // are usually the same for many subregisters we can easily combine the steps
1489 // by combining the masks.
1490 for (const auto &Idx : SubRegIndices) {
1491 const auto &Composites = Idx.getComposites();
1492 auto &LaneTransforms = Idx.CompositionLaneMaskTransform;
1493
1494 if (Composites.empty()) {
2
Assuming the condition is true
3
Taking true branch
1495 // Moving from a class with no subregisters we just had a single lane:
1496 // The subregister must be a leaf subregister and only occupies 1 bit.
1497 // Move the bit from the class without subregisters into that position.
1498 unsigned DstBit = Idx.LaneMask.getHighestLane();
4
Calling 'LaneBitmask::getHighestLane'
9
Returning from 'LaneBitmask::getHighestLane'
10
'DstBit' initialized to 4294967295
1499 assert(Idx.LaneMask == LaneBitmask::getLane(DstBit) &&(static_cast <bool> (Idx.LaneMask == LaneBitmask::getLane
(DstBit) && "Must be a leaf subregister") ? void (0) :
__assert_fail ("Idx.LaneMask == LaneBitmask::getLane(DstBit) && \"Must be a leaf subregister\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1500, __extension__
__PRETTY_FUNCTION__))
11
Passing the value 4294967295 via 1st parameter 'Lane'
12
Calling 'LaneBitmask::getLane'
1500 "Must be a leaf subregister")(static_cast <bool> (Idx.LaneMask == LaneBitmask::getLane
(DstBit) && "Must be a leaf subregister") ? void (0) :
__assert_fail ("Idx.LaneMask == LaneBitmask::getLane(DstBit) && \"Must be a leaf subregister\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1500, __extension__
__PRETTY_FUNCTION__))
;
1501 MaskRolPair MaskRol = { LaneBitmask::getLane(0), (uint8_t)DstBit };
1502 LaneTransforms.push_back(MaskRol);
1503 } else {
1504 // Go through all leaf subregisters and find the ones that compose with
1505 // Idx. These make out all possible valid bits in the lane mask we want to
1506 // transform. Looking only at the leafs ensure that only a single bit in
1507 // the mask is set.
1508 unsigned NextBit = 0;
1509 for (auto &Idx2 : SubRegIndices) {
1510 // Skip non-leaf subregisters.
1511 if (!Idx2.getComposites().empty())
1512 continue;
1513 // Replicate the behaviour from the lane mask generation loop above.
1514 unsigned SrcBit = NextBit;
1515 LaneBitmask SrcMask = LaneBitmask::getLane(SrcBit);
1516 if (NextBit < LaneBitmask::BitWidth-1)
1517 ++NextBit;
1518 assert(Idx2.LaneMask == SrcMask)(static_cast <bool> (Idx2.LaneMask == SrcMask) ? void (
0) : __assert_fail ("Idx2.LaneMask == SrcMask", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 1518, __extension__ __PRETTY_FUNCTION__))
;
1519
1520 // Get the composed subregister if there is any.
1521 auto C = Composites.find(&Idx2);
1522 if (C == Composites.end())
1523 continue;
1524 const CodeGenSubRegIndex *Composite = C->second;
1525 // The Composed subreg should be a leaf subreg too
1526 assert(Composite->getComposites().empty())(static_cast <bool> (Composite->getComposites().empty
()) ? void (0) : __assert_fail ("Composite->getComposites().empty()"
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1526, __extension__
__PRETTY_FUNCTION__))
;
1527
1528 // Create Mask+Rotate operation and merge with existing ops if possible.
1529 unsigned DstBit = Composite->LaneMask.getHighestLane();
1530 int Shift = DstBit - SrcBit;
1531 uint8_t RotateLeft = Shift >= 0 ? (uint8_t)Shift
1532 : LaneBitmask::BitWidth + Shift;
1533 for (auto &I : LaneTransforms) {
1534 if (I.RotateLeft == RotateLeft) {
1535 I.Mask |= SrcMask;
1536 SrcMask = LaneBitmask::getNone();
1537 }
1538 }
1539 if (SrcMask.any()) {
1540 MaskRolPair MaskRol = { SrcMask, RotateLeft };
1541 LaneTransforms.push_back(MaskRol);
1542 }
1543 }
1544 }
1545
1546 // Optimize if the transformation consists of one step only: Set mask to
1547 // 0xffffffff (including some irrelevant invalid bits) so that it should
1548 // merge with more entries later while compressing the table.
1549 if (LaneTransforms.size() == 1)
1550 LaneTransforms[0].Mask = LaneBitmask::getAll();
1551
1552 // Further compression optimization: For invalid compositions resulting
1553 // in a sequence with 0 entries we can just pick any other. Choose
1554 // Mask 0xffffffff with Rotation 0.
1555 if (LaneTransforms.size() == 0) {
1556 MaskRolPair P = { LaneBitmask::getAll(), 0 };
1557 LaneTransforms.push_back(P);
1558 }
1559 }
1560
1561 // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
1562 // by the sub-register graph? This doesn't occur in any known targets.
1563
1564 // Inherit lanes from composites.
1565 for (const auto &Idx : SubRegIndices) {
1566 LaneBitmask Mask = Idx.computeLaneMask();
1567 // If some super-registers without CoveredBySubRegs use this index, we can
1568 // no longer assume that the lanes are covering their registers.
1569 if (!Idx.AllSuperRegsCovered)
1570 CoveringLanes &= ~Mask;
1571 }
1572
1573 // Compute lane mask combinations for register classes.
1574 for (auto &RegClass : RegClasses) {
1575 LaneBitmask LaneMask;
1576 for (const auto &SubRegIndex : SubRegIndices) {
1577 if (RegClass.getSubClassWithSubReg(&SubRegIndex) == nullptr)
1578 continue;
1579 LaneMask |= SubRegIndex.LaneMask;
1580 }
1581
1582 // For classes without any subregisters set LaneMask to 1 instead of 0.
1583 // This makes it easier for client code to handle classes uniformly.
1584 if (LaneMask.none())
1585 LaneMask = LaneBitmask::getLane(0);
1586
1587 RegClass.LaneMask = LaneMask;
1588 }
1589}
1590
1591namespace {
1592
1593// UberRegSet is a helper class for computeRegUnitWeights. Each UberRegSet is
1594// the transitive closure of the union of overlapping register
1595// classes. Together, the UberRegSets form a partition of the registers. If we
1596// consider overlapping register classes to be connected, then each UberRegSet
1597// is a set of connected components.
1598//
1599// An UberRegSet will likely be a horizontal slice of register names of
1600// the same width. Nontrivial subregisters should then be in a separate
1601// UberRegSet. But this property isn't required for valid computation of
1602// register unit weights.
1603//
1604// A Weight field caches the max per-register unit weight in each UberRegSet.
1605//
1606// A set of SingularDeterminants flags single units of some register in this set
1607// for which the unit weight equals the set weight. These units should not have
1608// their weight increased.
1609struct UberRegSet {
1610 CodeGenRegister::Vec Regs;
1611 unsigned Weight = 0;
1612 CodeGenRegister::RegUnitList SingularDeterminants;
1613
1614 UberRegSet() = default;
1615};
1616
1617} // end anonymous namespace
1618
1619// Partition registers into UberRegSets, where each set is the transitive
1620// closure of the union of overlapping register classes.
1621//
1622// UberRegSets[0] is a special non-allocatable set.
1623static void computeUberSets(std::vector<UberRegSet> &UberSets,
1624 std::vector<UberRegSet*> &RegSets,
1625 CodeGenRegBank &RegBank) {
1626 const auto &Registers = RegBank.getRegisters();
1627
1628 // The Register EnumValue is one greater than its index into Registers.
1629 assert(Registers.size() == Registers.back().EnumValue &&(static_cast <bool> (Registers.size() == Registers.back
().EnumValue && "register enum value mismatch") ? void
(0) : __assert_fail ("Registers.size() == Registers.back().EnumValue && \"register enum value mismatch\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1630, __extension__
__PRETTY_FUNCTION__))
1630 "register enum value mismatch")(static_cast <bool> (Registers.size() == Registers.back
().EnumValue && "register enum value mismatch") ? void
(0) : __assert_fail ("Registers.size() == Registers.back().EnumValue && \"register enum value mismatch\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1630, __extension__
__PRETTY_FUNCTION__))
;
1631
1632 // For simplicitly make the SetID the same as EnumValue.
1633 IntEqClasses UberSetIDs(Registers.size()+1);
1634 std::set<unsigned> AllocatableRegs;
1635 for (auto &RegClass : RegBank.getRegClasses()) {
1636 if (!RegClass.Allocatable)
1637 continue;
1638
1639 const CodeGenRegister::Vec &Regs = RegClass.getMembers();
1640 if (Regs.empty())
1641 continue;
1642
1643 unsigned USetID = UberSetIDs.findLeader((*Regs.begin())->EnumValue);
1644 assert(USetID && "register number 0 is invalid")(static_cast <bool> (USetID && "register number 0 is invalid"
) ? void (0) : __assert_fail ("USetID && \"register number 0 is invalid\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1644, __extension__
__PRETTY_FUNCTION__))
;
1645
1646 AllocatableRegs.insert((*Regs.begin())->EnumValue);
1647 for (const CodeGenRegister *CGR : llvm::drop_begin(Regs)) {
1648 AllocatableRegs.insert(CGR->EnumValue);
1649 UberSetIDs.join(USetID, CGR->EnumValue);
1650 }
1651 }
1652 // Combine non-allocatable regs.
1653 for (const auto &Reg : Registers) {
1654 unsigned RegNum = Reg.EnumValue;
1655 if (AllocatableRegs.count(RegNum))
1656 continue;
1657
1658 UberSetIDs.join(0, RegNum);
1659 }
1660 UberSetIDs.compress();
1661
1662 // Make the first UberSet a special unallocatable set.
1663 unsigned ZeroID = UberSetIDs[0];
1664
1665 // Insert Registers into the UberSets formed by union-find.
1666 // Do not resize after this.
1667 UberSets.resize(UberSetIDs.getNumClasses());
1668 unsigned i = 0;
1669 for (const CodeGenRegister &Reg : Registers) {
1670 unsigned USetID = UberSetIDs[Reg.EnumValue];
1671 if (!USetID)
1672 USetID = ZeroID;
1673 else if (USetID == ZeroID)
1674 USetID = 0;
1675
1676 UberRegSet *USet = &UberSets[USetID];
1677 USet->Regs.push_back(&Reg);
1678 sortAndUniqueRegisters(USet->Regs);
1679 RegSets[i++] = USet;
1680 }
1681}
1682
1683// Recompute each UberSet weight after changing unit weights.
1684static void computeUberWeights(std::vector<UberRegSet> &UberSets,
1685 CodeGenRegBank &RegBank) {
1686 // Skip the first unallocatable set.
1687 for (std::vector<UberRegSet>::iterator I = std::next(UberSets.begin()),
1688 E = UberSets.end(); I != E; ++I) {
1689
1690 // Initialize all unit weights in this set, and remember the max units/reg.
1691 const CodeGenRegister *Reg = nullptr;
1692 unsigned MaxWeight = 0, Weight = 0;
1693 for (RegUnitIterator UnitI(I->Regs); UnitI.isValid(); ++UnitI) {
1694 if (Reg != UnitI.getReg()) {
1695 if (Weight > MaxWeight)
1696 MaxWeight = Weight;
1697 Reg = UnitI.getReg();
1698 Weight = 0;
1699 }
1700 if (!RegBank.getRegUnit(*UnitI).Artificial) {
1701 unsigned UWeight = RegBank.getRegUnit(*UnitI).Weight;
1702 if (!UWeight) {
1703 UWeight = 1;
1704 RegBank.increaseRegUnitWeight(*UnitI, UWeight);
1705 }
1706 Weight += UWeight;
1707 }
1708 }
1709 if (Weight > MaxWeight)
1710 MaxWeight = Weight;
1711 if (I->Weight != MaxWeight) {
1712 LLVM_DEBUG(dbgs() << "UberSet " << I - UberSets.begin() << " Weight "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1713 << MaxWeight;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1714 for (auto &Unitdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1715 : I->Regs) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1716 << " " << Unit->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
1717 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UberSet " << I
- UberSets.begin() << " Weight " << MaxWeight; for
(auto &Unit : I->Regs) dbgs() << " " << Unit
->getName(); dbgs() << "\n"; } } while (false)
;
1718 // Update the set weight.
1719 I->Weight = MaxWeight;
1720 }
1721
1722 // Find singular determinants.
1723 for (const auto R : I->Regs) {
1724 if (R->getRegUnits().count() == 1 && R->getWeight(RegBank) == I->Weight) {
1725 I->SingularDeterminants |= R->getRegUnits();
1726 }
1727 }
1728 }
1729}
1730
1731// normalizeWeight is a computeRegUnitWeights helper that adjusts the weight of
1732// a register and its subregisters so that they have the same weight as their
1733// UberSet. Self-recursion processes the subregister tree in postorder so
1734// subregisters are normalized first.
1735//
1736// Side effects:
1737// - creates new adopted register units
1738// - causes superregisters to inherit adopted units
1739// - increases the weight of "singular" units
1740// - induces recomputation of UberWeights.
1741static bool normalizeWeight(CodeGenRegister *Reg,
1742 std::vector<UberRegSet> &UberSets,
1743 std::vector<UberRegSet*> &RegSets,
1744 BitVector &NormalRegs,
1745 CodeGenRegister::RegUnitList &NormalUnits,
1746 CodeGenRegBank &RegBank) {
1747 NormalRegs.resize(std::max(Reg->EnumValue + 1, NormalRegs.size()));
1748 if (NormalRegs.test(Reg->EnumValue))
1749 return false;
1750 NormalRegs.set(Reg->EnumValue);
1751
1752 bool Changed = false;
1753 const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
1754 for (auto SRI : SRM) {
1755 if (SRI.second == Reg)
1756 continue; // self-cycles happen
1757
1758 Changed |= normalizeWeight(SRI.second, UberSets, RegSets, NormalRegs,
1759 NormalUnits, RegBank);
1760 }
1761 // Postorder register normalization.
1762
1763 // Inherit register units newly adopted by subregisters.
1764 if (Reg->inheritRegUnits(RegBank))
1765 computeUberWeights(UberSets, RegBank);
1766
1767 // Check if this register is too skinny for its UberRegSet.
1768 UberRegSet *UberSet = RegSets[RegBank.getRegIndex(Reg)];
1769
1770 unsigned RegWeight = Reg->getWeight(RegBank);
1771 if (UberSet->Weight > RegWeight) {
1772 // A register unit's weight can be adjusted only if it is the singular unit
1773 // for this register, has not been used to normalize a subregister's set,
1774 // and has not already been used to singularly determine this UberRegSet.
1775 unsigned AdjustUnit = *Reg->getRegUnits().begin();
1776 if (Reg->getRegUnits().count() != 1
1777 || hasRegUnit(NormalUnits, AdjustUnit)
1778 || hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
1779 // We don't have an adjustable unit, so adopt a new one.
1780 AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
1781 Reg->adoptRegUnit(AdjustUnit);
1782 // Adopting a unit does not immediately require recomputing set weights.
1783 }
1784 else {
1785 // Adjust the existing single unit.
1786 if (!RegBank.getRegUnit(AdjustUnit).Artificial)
1787 RegBank.increaseRegUnitWeight(AdjustUnit, UberSet->Weight - RegWeight);
1788 // The unit may be shared among sets and registers within this set.
1789 computeUberWeights(UberSets, RegBank);
1790 }
1791 Changed = true;
1792 }
1793
1794 // Mark these units normalized so superregisters can't change their weights.
1795 NormalUnits |= Reg->getRegUnits();
1796
1797 return Changed;
1798}
1799
1800// Compute a weight for each register unit created during getSubRegs.
1801//
1802// The goal is that two registers in the same class will have the same weight,
1803// where each register's weight is defined as sum of its units' weights.
1804void CodeGenRegBank::computeRegUnitWeights() {
1805 std::vector<UberRegSet> UberSets;
1806 std::vector<UberRegSet*> RegSets(Registers.size());
1807 computeUberSets(UberSets, RegSets, *this);
1808 // UberSets and RegSets are now immutable.
1809
1810 computeUberWeights(UberSets, *this);
1811
1812 // Iterate over each Register, normalizing the unit weights until reaching
1813 // a fix point.
1814 unsigned NumIters = 0;
1815 for (bool Changed = true; Changed; ++NumIters) {
1816 assert(NumIters <= NumNativeRegUnits && "Runaway register unit weights")(static_cast <bool> (NumIters <= NumNativeRegUnits &&
"Runaway register unit weights") ? void (0) : __assert_fail (
"NumIters <= NumNativeRegUnits && \"Runaway register unit weights\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1816, __extension__
__PRETTY_FUNCTION__))
;
1817 (void) NumIters;
1818 Changed = false;
1819 for (auto &Reg : Registers) {
1820 CodeGenRegister::RegUnitList NormalUnits;
1821 BitVector NormalRegs;
1822 Changed |= normalizeWeight(&Reg, UberSets, RegSets, NormalRegs,
1823 NormalUnits, *this);
1824 }
1825 }
1826}
1827
1828// Find a set in UniqueSets with the same elements as Set.
1829// Return an iterator into UniqueSets.
1830static std::vector<RegUnitSet>::const_iterator
1831findRegUnitSet(const std::vector<RegUnitSet> &UniqueSets,
1832 const RegUnitSet &Set) {
1833 std::vector<RegUnitSet>::const_iterator
1834 I = UniqueSets.begin(), E = UniqueSets.end();
1835 for(;I != E; ++I) {
1836 if (I->Units == Set.Units)
1837 break;
1838 }
1839 return I;
1840}
1841
1842// Return true if the RUSubSet is a subset of RUSuperSet.
1843static bool isRegUnitSubSet(const std::vector<unsigned> &RUSubSet,
1844 const std::vector<unsigned> &RUSuperSet) {
1845 return std::includes(RUSuperSet.begin(), RUSuperSet.end(),
1846 RUSubSet.begin(), RUSubSet.end());
1847}
1848
1849/// Iteratively prune unit sets. Prune subsets that are close to the superset,
1850/// but with one or two registers removed. We occasionally have registers like
1851/// APSR and PC thrown in with the general registers. We also see many
1852/// special-purpose register subsets, such as tail-call and Thumb
1853/// encodings. Generating all possible overlapping sets is combinatorial and
1854/// overkill for modeling pressure. Ideally we could fix this statically in
1855/// tablegen by (1) having the target define register classes that only include
1856/// the allocatable registers and marking other classes as non-allocatable and
1857/// (2) having a way to mark special purpose classes as "don't-care" classes for
1858/// the purpose of pressure. However, we make an attempt to handle targets that
1859/// are not nicely defined by merging nearly identical register unit sets
1860/// statically. This generates smaller tables. Then, dynamically, we adjust the
1861/// set limit by filtering the reserved registers.
1862///
1863/// Merge sets only if the units have the same weight. For example, on ARM,
1864/// Q-tuples with ssub index 0 include all S regs but also include D16+. We
1865/// should not expand the S set to include D regs.
1866void CodeGenRegBank::pruneUnitSets() {
1867 assert(RegClassUnitSets.empty() && "this invalidates RegClassUnitSets")(static_cast <bool> (RegClassUnitSets.empty() &&
"this invalidates RegClassUnitSets") ? void (0) : __assert_fail
("RegClassUnitSets.empty() && \"this invalidates RegClassUnitSets\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1867, __extension__
__PRETTY_FUNCTION__))
;
1868
1869 // Form an equivalence class of UnitSets with no significant difference.
1870 std::vector<unsigned> SuperSetIDs;
1871 for (unsigned SubIdx = 0, EndIdx = RegUnitSets.size();
1872 SubIdx != EndIdx; ++SubIdx) {
1873 const RegUnitSet &SubSet = RegUnitSets[SubIdx];
1874 unsigned SuperIdx = 0;
1875 for (; SuperIdx != EndIdx; ++SuperIdx) {
1876 if (SuperIdx == SubIdx)
1877 continue;
1878
1879 unsigned UnitWeight = RegUnits[SubSet.Units[0]].Weight;
1880 const RegUnitSet &SuperSet = RegUnitSets[SuperIdx];
1881 if (isRegUnitSubSet(SubSet.Units, SuperSet.Units)
1882 && (SubSet.Units.size() + 3 > SuperSet.Units.size())
1883 && UnitWeight == RegUnits[SuperSet.Units[0]].Weight
1884 && UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
1885 LLVM_DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdxdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << SubIdx
<< " subsumed by " << SuperIdx << "\n"; } }
while (false)
1886 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << SubIdx
<< " subsumed by " << SuperIdx << "\n"; } }
while (false)
;
1887 // We can pick any of the set names for the merged set. Go for the
1888 // shortest one to avoid picking the name of one of the classes that are
1889 // artificially created by tablegen. So "FPR128_lo" instead of
1890 // "QQQQ_with_qsub3_in_FPR128_lo".
1891 if (RegUnitSets[SubIdx].Name.size() < RegUnitSets[SuperIdx].Name.size())
1892 RegUnitSets[SuperIdx].Name = RegUnitSets[SubIdx].Name;
1893 break;
1894 }
1895 }
1896 if (SuperIdx == EndIdx)
1897 SuperSetIDs.push_back(SubIdx);
1898 }
1899 // Populate PrunedUnitSets with each equivalence class's superset.
1900 std::vector<RegUnitSet> PrunedUnitSets(SuperSetIDs.size());
1901 for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
1902 unsigned SuperIdx = SuperSetIDs[i];
1903 PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
1904 PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
1905 }
1906 RegUnitSets.swap(PrunedUnitSets);
1907}
1908
1909// Create a RegUnitSet for each RegClass that contains all units in the class
1910// including adopted units that are necessary to model register pressure. Then
1911// iteratively compute RegUnitSets such that the union of any two overlapping
1912// RegUnitSets is repreresented.
1913//
1914// RegisterInfoEmitter will map each RegClass to its RegUnitClass and any
1915// RegUnitSet that is a superset of that RegUnitClass.
1916void CodeGenRegBank::computeRegUnitSets() {
1917 assert(RegUnitSets.empty() && "dirty RegUnitSets")(static_cast <bool> (RegUnitSets.empty() && "dirty RegUnitSets"
) ? void (0) : __assert_fail ("RegUnitSets.empty() && \"dirty RegUnitSets\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1917, __extension__
__PRETTY_FUNCTION__))
;
1918
1919 // Compute a unique RegUnitSet for each RegClass.
1920 auto &RegClasses = getRegClasses();
1921 for (auto &RC : RegClasses) {
1922 if (!RC.Allocatable || RC.Artificial || !RC.GeneratePressureSet)
1923 continue;
1924
1925 // Speculatively grow the RegUnitSets to hold the new set.
1926 RegUnitSets.resize(RegUnitSets.size() + 1);
1927 RegUnitSets.back().Name = RC.getName();
1928
1929 // Compute a sorted list of units in this class.
1930 RC.buildRegUnitSet(*this, RegUnitSets.back().Units);
1931
1932 // Find an existing RegUnitSet.
1933 std::vector<RegUnitSet>::const_iterator SetI =
1934 findRegUnitSet(RegUnitSets, RegUnitSets.back());
1935 if (SetI != std::prev(RegUnitSets.end()))
1936 RegUnitSets.pop_back();
1937 }
1938
1939 if (RegUnitSets.empty())
1940 PrintFatalError("RegUnitSets cannot be empty!");
1941
1942 LLVM_DEBUG(dbgs() << "\nBefore pruning:\n"; for (unsigned USIdx = 0,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1943 USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1944 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1945 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1946 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1947 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1948 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
1949 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore pruning:\n"
; for (unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx <
USEnd; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; }; } } while (false)
;
1950
1951 // Iteratively prune unit sets.
1952 pruneUnitSets();
1953
1954 LLVM_DEBUG(dbgs() << "\nBefore union:\n"; for (unsigned USIdx = 0,do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1955 USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1956 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1957 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1958 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1959 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1960 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
1961 } dbgs() << "\nUnion sets:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\nBefore union:\n"; for
(unsigned USIdx = 0, USEnd = RegUnitSets.size(); USIdx < USEnd
; ++USIdx) { dbgs() << "UnitSet " << USIdx <<
" " << RegUnitSets[USIdx].Name << ":"; for (auto
&U : RegUnitSets[USIdx].Units) printRegUnitName(U); dbgs
() << "\n"; } dbgs() << "\nUnion sets:\n"; } } while
(false)
;
1962
1963 // Iterate over all unit sets, including new ones added by this loop.
1964 unsigned NumRegUnitSubSets = RegUnitSets.size();
1965 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
1966 // In theory, this is combinatorial. In practice, it needs to be bounded
1967 // by a small number of sets for regpressure to be efficient.
1968 // If the assert is hit, we need to implement pruning.
1969 assert(Idx < (2*NumRegUnitSubSets) && "runaway unit set inference")(static_cast <bool> (Idx < (2*NumRegUnitSubSets) &&
"runaway unit set inference") ? void (0) : __assert_fail ("Idx < (2*NumRegUnitSubSets) && \"runaway unit set inference\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 1969, __extension__
__PRETTY_FUNCTION__))
;
1970
1971 // Compare new sets with all original classes.
1972 for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx+1;
1973 SearchIdx != EndIdx; ++SearchIdx) {
1974 std::set<unsigned> Intersection;
1975 std::set_intersection(RegUnitSets[Idx].Units.begin(),
1976 RegUnitSets[Idx].Units.end(),
1977 RegUnitSets[SearchIdx].Units.begin(),
1978 RegUnitSets[SearchIdx].Units.end(),
1979 std::inserter(Intersection, Intersection.begin()));
1980 if (Intersection.empty())
1981 continue;
1982
1983 // Speculatively grow the RegUnitSets to hold the new set.
1984 RegUnitSets.resize(RegUnitSets.size() + 1);
1985 RegUnitSets.back().Name =
1986 RegUnitSets[Idx].Name + "_with_" + RegUnitSets[SearchIdx].Name;
1987
1988 std::set_union(RegUnitSets[Idx].Units.begin(),
1989 RegUnitSets[Idx].Units.end(),
1990 RegUnitSets[SearchIdx].Units.begin(),
1991 RegUnitSets[SearchIdx].Units.end(),
1992 std::inserter(RegUnitSets.back().Units,
1993 RegUnitSets.back().Units.begin()));
1994
1995 // Find an existing RegUnitSet, or add the union to the unique sets.
1996 std::vector<RegUnitSet>::const_iterator SetI =
1997 findRegUnitSet(RegUnitSets, RegUnitSets.back());
1998 if (SetI != std::prev(RegUnitSets.end()))
1999 RegUnitSets.pop_back();
2000 else {
2001 LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() - 1 << " "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
2002 << RegUnitSets.back().Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
2003 for (auto &Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
2004 : RegUnitSets.back().Units) printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
2005 dbgs() << "\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "UnitSet " << RegUnitSets
.size() - 1 << " " << RegUnitSets.back().Name <<
":"; for (auto &U : RegUnitSets.back().Units) printRegUnitName
(U); dbgs() << "\n";; } } while (false)
;
2006 }
2007 }
2008 }
2009
2010 // Iteratively prune unit sets after inferring supersets.
2011 pruneUnitSets();
2012
2013 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2014 dbgs() << "\n"; for (unsigned USIdx = 0, USEnd = RegUnitSets.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2015 USIdx < USEnd; ++USIdx) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2016 dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2017 for (auto &U : RegUnitSets[USIdx].Units)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2018 printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2019 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
2020 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; for (unsigned USIdx
= 0, USEnd = RegUnitSets.size(); USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " <<
RegUnitSets[USIdx].Name << ":"; for (auto &U : RegUnitSets
[USIdx].Units) printRegUnitName(U); dbgs() << "\n"; }; }
} while (false)
;
2021
2022 // For each register class, list the UnitSets that are supersets.
2023 RegClassUnitSets.resize(RegClasses.size());
2024 int RCIdx = -1;
2025 for (auto &RC : RegClasses) {
2026 ++RCIdx;
2027 if (!RC.Allocatable)
2028 continue;
2029
2030 // Recompute the sorted list of units in this class.
2031 std::vector<unsigned> RCRegUnits;
2032 RC.buildRegUnitSet(*this, RCRegUnits);
2033
2034 // Don't increase pressure for unallocatable regclasses.
2035 if (RCRegUnits.empty())
2036 continue;
2037
2038 LLVM_DEBUG(dbgs() << "RC " << RC.getName() << " Units:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units:\n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
2039 for (auto Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units:\n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
2040 : RCRegUnits) printRegUnitName(U);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units:\n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
2041 dbgs() << "\n UnitSetIDs:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "RC " << RC.getName
() << " Units:\n"; for (auto U : RCRegUnits) printRegUnitName
(U); dbgs() << "\n UnitSetIDs:"; } } while (false)
;
2042
2043 // Find all supersets.
2044 for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
2045 USIdx != USEnd; ++USIdx) {
2046 if (isRegUnitSubSet(RCRegUnits, RegUnitSets[USIdx].Units)) {
2047 LLVM_DEBUG(dbgs() << " " << USIdx)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << " " << USIdx; }
} while (false)
;
2048 RegClassUnitSets[RCIdx].push_back(USIdx);
2049 }
2050 }
2051 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc-emitter")) { dbgs() << "\n"; } } while (false
)
;
2052 assert((!RegClassUnitSets[RCIdx].empty() || !RC.GeneratePressureSet) &&(static_cast <bool> ((!RegClassUnitSets[RCIdx].empty() ||
!RC.GeneratePressureSet) && "missing unit set for regclass"
) ? void (0) : __assert_fail ("(!RegClassUnitSets[RCIdx].empty() || !RC.GeneratePressureSet) && \"missing unit set for regclass\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 2053, __extension__
__PRETTY_FUNCTION__))
2053 "missing unit set for regclass")(static_cast <bool> ((!RegClassUnitSets[RCIdx].empty() ||
!RC.GeneratePressureSet) && "missing unit set for regclass"
) ? void (0) : __assert_fail ("(!RegClassUnitSets[RCIdx].empty() || !RC.GeneratePressureSet) && \"missing unit set for regclass\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 2053, __extension__
__PRETTY_FUNCTION__))
;
2054 }
2055
2056 // For each register unit, ensure that we have the list of UnitSets that
2057 // contain the unit. Normally, this matches an existing list of UnitSets for a
2058 // register class. If not, we create a new entry in RegClassUnitSets as a
2059 // "fake" register class.
2060 for (unsigned UnitIdx = 0, UnitEnd = NumNativeRegUnits;
2061 UnitIdx < UnitEnd; ++UnitIdx) {
2062 std::vector<unsigned> RUSets;
2063 for (unsigned i = 0, e = RegUnitSets.size(); i != e; ++i) {
2064 RegUnitSet &RUSet = RegUnitSets[i];
2065 if (!is_contained(RUSet.Units, UnitIdx))
2066 continue;
2067 RUSets.push_back(i);
2068 }
2069 unsigned RCUnitSetsIdx = 0;
2070 for (unsigned e = RegClassUnitSets.size();
2071 RCUnitSetsIdx != e; ++RCUnitSetsIdx) {
2072 if (RegClassUnitSets[RCUnitSetsIdx] == RUSets) {
2073 break;
2074 }
2075 }
2076 RegUnits[UnitIdx].RegClassUnitSetsIdx = RCUnitSetsIdx;
2077 if (RCUnitSetsIdx == RegClassUnitSets.size()) {
2078 // Create a new list of UnitSets as a "fake" register class.
2079 RegClassUnitSets.resize(RCUnitSetsIdx + 1);
2080 RegClassUnitSets[RCUnitSetsIdx].swap(RUSets);
2081 }
2082 }
2083}
2084
2085void CodeGenRegBank::computeRegUnitLaneMasks() {
2086 for (auto &Register : Registers) {
2087 // Create an initial lane mask for all register units.
2088 const auto &RegUnits = Register.getRegUnits();
2089 CodeGenRegister::RegUnitLaneMaskList
2090 RegUnitLaneMasks(RegUnits.count(), LaneBitmask::getNone());
2091 // Iterate through SubRegisters.
2092 typedef CodeGenRegister::SubRegMap SubRegMap;
2093 const SubRegMap &SubRegs = Register.getSubRegs();
2094 for (auto S : SubRegs) {
2095 CodeGenRegister *SubReg = S.second;
2096 // Ignore non-leaf subregisters, their lane masks are fully covered by
2097 // the leaf subregisters anyway.
2098 if (!SubReg->getSubRegs().empty())
2099 continue;
2100 CodeGenSubRegIndex *SubRegIndex = S.first;
2101 const CodeGenRegister *SubRegister = S.second;
2102 LaneBitmask LaneMask = SubRegIndex->LaneMask;
2103 // Distribute LaneMask to Register Units touched.
2104 for (unsigned SUI : SubRegister->getRegUnits()) {
2105 bool Found = false;
2106 unsigned u = 0;
2107 for (unsigned RU : RegUnits) {
2108 if (SUI == RU) {
2109 RegUnitLaneMasks[u] |= LaneMask;
2110 assert(!Found)(static_cast <bool> (!Found) ? void (0) : __assert_fail
("!Found", "llvm/utils/TableGen/CodeGenRegisters.cpp", 2110,
__extension__ __PRETTY_FUNCTION__))
;
2111 Found = true;
2112 }
2113 ++u;
2114 }
2115 (void)Found;
2116 assert(Found)(static_cast <bool> (Found) ? void (0) : __assert_fail (
"Found", "llvm/utils/TableGen/CodeGenRegisters.cpp", 2116, __extension__
__PRETTY_FUNCTION__))
;
2117 }
2118 }
2119 Register.setRegUnitLaneMasks(RegUnitLaneMasks);
2120 }
2121}
2122
2123void CodeGenRegBank::computeDerivedInfo() {
2124 computeComposites();
2125 computeSubRegLaneMasks();
1
Calling 'CodeGenRegBank::computeSubRegLaneMasks'
2126
2127 // Compute a weight for each register unit created during getSubRegs.
2128 // This may create adopted register units (with unit # >= NumNativeRegUnits).
2129 computeRegUnitWeights();
2130
2131 // Compute a unique set of RegUnitSets. One for each RegClass and inferred
2132 // supersets for the union of overlapping sets.
2133 computeRegUnitSets();
2134
2135 computeRegUnitLaneMasks();
2136
2137 // Compute register class HasDisjunctSubRegs/CoveredBySubRegs flag.
2138 for (CodeGenRegisterClass &RC : RegClasses) {
2139 RC.HasDisjunctSubRegs = false;
2140 RC.CoveredBySubRegs = true;
2141 for (const CodeGenRegister *Reg : RC.getMembers()) {
2142 RC.HasDisjunctSubRegs |= Reg->HasDisjunctSubRegs;
2143 RC.CoveredBySubRegs &= Reg->CoveredBySubRegs;
2144 }
2145 }
2146
2147 // Get the weight of each set.
2148 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
2149 RegUnitSets[Idx].Weight = getRegUnitSetWeight(RegUnitSets[Idx].Units);
2150
2151 // Find the order of each set.
2152 RegUnitSetOrder.reserve(RegUnitSets.size());
2153 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
2154 RegUnitSetOrder.push_back(Idx);
2155
2156 llvm::stable_sort(RegUnitSetOrder, [this](unsigned ID1, unsigned ID2) {
2157 return getRegPressureSet(ID1).Units.size() <
2158 getRegPressureSet(ID2).Units.size();
2159 });
2160 for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
2161 RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx;
2162 }
2163}
2164
2165//
2166// Synthesize missing register class intersections.
2167//
2168// Make sure that sub-classes of RC exists such that getCommonSubClass(RC, X)
2169// returns a maximal register class for all X.
2170//
2171void CodeGenRegBank::inferCommonSubClass(CodeGenRegisterClass *RC) {
2172 assert(!RegClasses.empty())(static_cast <bool> (!RegClasses.empty()) ? void (0) : __assert_fail
("!RegClasses.empty()", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 2172, __extension__ __PRETTY_FUNCTION__))
;
2173 // Stash the iterator to the last element so that this loop doesn't visit
2174 // elements added by the getOrCreateSubClass call within it.
2175 for (auto I = RegClasses.begin(), E = std::prev(RegClasses.end());
2176 I != std::next(E); ++I) {
2177 CodeGenRegisterClass *RC1 = RC;
2178 CodeGenRegisterClass *RC2 = &*I;
2179 if (RC1 == RC2)
2180 continue;
2181
2182 // Compute the set intersection of RC1 and RC2.
2183 const CodeGenRegister::Vec &Memb1 = RC1->getMembers();
2184 const CodeGenRegister::Vec &Memb2 = RC2->getMembers();
2185 CodeGenRegister::Vec Intersection;
2186 std::set_intersection(Memb1.begin(), Memb1.end(), Memb2.begin(),
2187 Memb2.end(),
2188 std::inserter(Intersection, Intersection.begin()),
2189 deref<std::less<>>());
2190
2191 // Skip disjoint class pairs.
2192 if (Intersection.empty())
2193 continue;
2194
2195 // If RC1 and RC2 have different spill sizes or alignments, use the
2196 // stricter one for sub-classing. If they are equal, prefer RC1.
2197 if (RC2->RSI.hasStricterSpillThan(RC1->RSI))
2198 std::swap(RC1, RC2);
2199
2200 getOrCreateSubClass(RC1, &Intersection,
2201 RC1->getName() + "_and_" + RC2->getName());
2202 }
2203}
2204
2205//
2206// Synthesize missing sub-classes for getSubClassWithSubReg().
2207//
2208// Make sure that the set of registers in RC with a given SubIdx sub-register
2209// form a register class. Update RC->SubClassWithSubReg.
2210//
2211void CodeGenRegBank::inferSubClassWithSubReg(CodeGenRegisterClass *RC) {
2212 // Map SubRegIndex to set of registers in RC supporting that SubRegIndex.
2213 typedef std::map<const CodeGenSubRegIndex *, CodeGenRegister::Vec,
2214 deref<std::less<>>>
2215 SubReg2SetMap;
2216
2217 // Compute the set of registers supporting each SubRegIndex.
2218 SubReg2SetMap SRSets;
2219 for (const auto R : RC->getMembers()) {
2220 if (R->Artificial)
2221 continue;
2222 const CodeGenRegister::SubRegMap &SRM = R->getSubRegs();
2223 for (auto I : SRM) {
2224 if (!I.first->Artificial)
2225 SRSets[I.first].push_back(R);
2226 }
2227 }
2228
2229 for (auto I : SRSets)
2230 sortAndUniqueRegisters(I.second);
2231
2232 // Find matching classes for all SRSets entries. Iterate in SubRegIndex
2233 // numerical order to visit synthetic indices last.
2234 for (const auto &SubIdx : SubRegIndices) {
2235 if (SubIdx.Artificial)
2236 continue;
2237 SubReg2SetMap::const_iterator I = SRSets.find(&SubIdx);
2238 // Unsupported SubRegIndex. Skip it.
2239 if (I == SRSets.end())
2240 continue;
2241 // In most cases, all RC registers support the SubRegIndex.
2242 if (I->second.size() == RC->getMembers().size()) {
2243 RC->setSubClassWithSubReg(&SubIdx, RC);
2244 continue;
2245 }
2246 // This is a real subset. See if we have a matching class.
2247 CodeGenRegisterClass *SubRC =
2248 getOrCreateSubClass(RC, &I->second,
2249 RC->getName() + "_with_" + I->first->getName());
2250 RC->setSubClassWithSubReg(&SubIdx, SubRC);
2251 }
2252}
2253
2254//
2255// Synthesize missing sub-classes of RC for getMatchingSuperRegClass().
2256//
2257// Create sub-classes of RC such that getMatchingSuperRegClass(RC, SubIdx, X)
2258// has a maximal result for any SubIdx and any X >= FirstSubRegRC.
2259//
2260
2261void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
2262 std::list<CodeGenRegisterClass>::iterator FirstSubRegRC) {
2263 SmallVector<std::pair<const CodeGenRegister*,
2264 const CodeGenRegister*>, 16> SSPairs;
2265 BitVector TopoSigs(getNumTopoSigs());
2266
2267 // Iterate in SubRegIndex numerical order to visit synthetic indices last.
2268 for (auto &SubIdx : SubRegIndices) {
2269 // Skip indexes that aren't fully supported by RC's registers. This was
2270 // computed by inferSubClassWithSubReg() above which should have been
2271 // called first.
2272 if (RC->getSubClassWithSubReg(&SubIdx) != RC)
2273 continue;
2274
2275 // Build list of (Super, Sub) pairs for this SubIdx.
2276 SSPairs.clear();
2277 TopoSigs.reset();
2278 for (const auto Super : RC->getMembers()) {
2279 const CodeGenRegister *Sub = Super->getSubRegs().find(&SubIdx)->second;
2280 assert(Sub && "Missing sub-register")(static_cast <bool> (Sub && "Missing sub-register"
) ? void (0) : __assert_fail ("Sub && \"Missing sub-register\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 2280, __extension__
__PRETTY_FUNCTION__))
;
2281 SSPairs.push_back(std::make_pair(Super, Sub));
2282 TopoSigs.set(Sub->getTopoSig());
2283 }
2284
2285 // Iterate over sub-register class candidates. Ignore classes created by
2286 // this loop. They will never be useful.
2287 // Store an iterator to the last element (not end) so that this loop doesn't
2288 // visit newly inserted elements.
2289 assert(!RegClasses.empty())(static_cast <bool> (!RegClasses.empty()) ? void (0) : __assert_fail
("!RegClasses.empty()", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 2289, __extension__ __PRETTY_FUNCTION__))
;
2290 for (auto I = FirstSubRegRC, E = std::prev(RegClasses.end());
2291 I != std::next(E); ++I) {
2292 CodeGenRegisterClass &SubRC = *I;
2293 if (SubRC.Artificial)
2294 continue;
2295 // Topological shortcut: SubRC members have the wrong shape.
2296 if (!TopoSigs.anyCommon(SubRC.getTopoSigs()))
2297 continue;
2298 // Compute the subset of RC that maps into SubRC.
2299 CodeGenRegister::Vec SubSetVec;
2300 for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
2301 if (SubRC.contains(SSPairs[i].second))
2302 SubSetVec.push_back(SSPairs[i].first);
2303
2304 if (SubSetVec.empty())
2305 continue;
2306
2307 // RC injects completely into SubRC.
2308 sortAndUniqueRegisters(SubSetVec);
2309 if (SubSetVec.size() == SSPairs.size()) {
2310 SubRC.addSuperRegClass(&SubIdx, RC);
2311 continue;
2312 }
2313
2314 // Only a subset of RC maps into SubRC. Make sure it is represented by a
2315 // class.
2316 getOrCreateSubClass(RC, &SubSetVec, RC->getName() + "_with_" +
2317 SubIdx.getName() + "_in_" +
2318 SubRC.getName());
2319 }
2320 }
2321}
2322
2323//
2324// Infer missing register classes.
2325//
2326void CodeGenRegBank::computeInferredRegisterClasses() {
2327 assert(!RegClasses.empty())(static_cast <bool> (!RegClasses.empty()) ? void (0) : __assert_fail
("!RegClasses.empty()", "llvm/utils/TableGen/CodeGenRegisters.cpp"
, 2327, __extension__ __PRETTY_FUNCTION__))
;
2328 // When this function is called, the register classes have not been sorted
2329 // and assigned EnumValues yet. That means getSubClasses(),
2330 // getSuperClasses(), and hasSubClass() functions are defunct.
2331
2332 // Use one-before-the-end so it doesn't move forward when new elements are
2333 // added.
2334 auto FirstNewRC = std::prev(RegClasses.end());
2335
2336 // Visit all register classes, including the ones being added by the loop.
2337 // Watch out for iterator invalidation here.
2338 for (auto I = RegClasses.begin(), E = RegClasses.end(); I != E; ++I) {
2339 CodeGenRegisterClass *RC = &*I;
2340 if (RC->Artificial)
2341 continue;
2342
2343 // Synthesize answers for getSubClassWithSubReg().
2344 inferSubClassWithSubReg(RC);
2345
2346 // Synthesize answers for getCommonSubClass().
2347 inferCommonSubClass(RC);
2348
2349 // Synthesize answers for getMatchingSuperRegClass().
2350 inferMatchingSuperRegClass(RC);
2351
2352 // New register classes are created while this loop is running, and we need
2353 // to visit all of them. I particular, inferMatchingSuperRegClass needs
2354 // to match old super-register classes with sub-register classes created
2355 // after inferMatchingSuperRegClass was called. At this point,
2356 // inferMatchingSuperRegClass has checked SuperRC = [0..rci] with SubRC =
2357 // [0..FirstNewRC). We need to cover SubRC = [FirstNewRC..rci].
2358 if (I == FirstNewRC) {
2359 auto NextNewRC = std::prev(RegClasses.end());
2360 for (auto I2 = RegClasses.begin(), E2 = std::next(FirstNewRC); I2 != E2;
2361 ++I2)
2362 inferMatchingSuperRegClass(&*I2, E2);
2363 FirstNewRC = NextNewRC;
2364 }
2365 }
2366}
2367
2368/// getRegisterClassForRegister - Find the register class that contains the
2369/// specified physical register. If the register is not in a register class,
2370/// return null. If the register is in multiple classes, and the classes have a
2371/// superset-subset relationship and the same set of types, return the
2372/// superclass. Otherwise return null.
2373const CodeGenRegisterClass*
2374CodeGenRegBank::getRegClassForRegister(Record *R) {
2375 const CodeGenRegister *Reg = getReg(R);
2376 const CodeGenRegisterClass *FoundRC = nullptr;
2377 for (const auto &RC : getRegClasses()) {
2378 if (!RC.contains(Reg))
2379 continue;
2380
2381 // If this is the first class that contains the register,
2382 // make a note of it and go on to the next class.
2383 if (!FoundRC) {
2384 FoundRC = &RC;
2385 continue;
2386 }
2387
2388 // If a register's classes have different types, return null.
2389 if (RC.getValueTypes() != FoundRC->getValueTypes())
2390 return nullptr;
2391
2392 // Check to see if the previously found class that contains
2393 // the register is a subclass of the current class. If so,
2394 // prefer the superclass.
2395 if (RC.hasSubClass(FoundRC)) {
2396 FoundRC = &RC;
2397 continue;
2398 }
2399
2400 // Check to see if the previously found class that contains
2401 // the register is a superclass of the current class. If so,
2402 // prefer the superclass.
2403 if (FoundRC->hasSubClass(&RC))
2404 continue;
2405
2406 // Multiple classes, and neither is a superclass of the other.
2407 // Return null.
2408 return nullptr;
2409 }
2410 return FoundRC;
2411}
2412
2413const CodeGenRegisterClass *
2414CodeGenRegBank::getMinimalPhysRegClass(Record *RegRecord,
2415 ValueTypeByHwMode *VT) {
2416 const CodeGenRegister *Reg = getReg(RegRecord);
2417 const CodeGenRegisterClass *BestRC = nullptr;
2418 for (const auto &RC : getRegClasses()) {
2419 if ((!VT || RC.hasType(*VT)) &&
2420 RC.contains(Reg) && (!BestRC || BestRC->hasSubClass(&RC)))
2421 BestRC = &RC;
2422 }
2423
2424 assert(BestRC && "Couldn't find the register class")(static_cast <bool> (BestRC && "Couldn't find the register class"
) ? void (0) : __assert_fail ("BestRC && \"Couldn't find the register class\""
, "llvm/utils/TableGen/CodeGenRegisters.cpp", 2424, __extension__
__PRETTY_FUNCTION__))
;
2425 return BestRC;
2426}
2427
2428BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
2429 SetVector<const CodeGenRegister*> Set;
2430
2431 // First add Regs with all sub-registers.
2432 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
2433 CodeGenRegister *Reg = getReg(Regs[i]);
2434 if (Set.insert(Reg))
2435 // Reg is new, add all sub-registers.
2436 // The pre-ordering is not important here.
2437 Reg->addSubRegsPreOrder(Set, *this);
2438 }
2439
2440 // Second, find all super-registers that are completely covered by the set.
2441 for (unsigned i = 0; i != Set.size(); ++i) {
2442 const CodeGenRegister::SuperRegList &SR = Set[i]->getSuperRegs();
2443 for (unsigned j = 0, e = SR.size(); j != e; ++j) {
2444 const CodeGenRegister *Super = SR[j];
2445 if (!Super->CoveredBySubRegs || Set.count(Super))
2446 continue;
2447 // This new super-register is covered by its sub-registers.
2448 bool AllSubsInSet = true;
2449 const CodeGenRegister::SubRegMap &SRM = Super->getSubRegs();
2450 for (auto I : SRM)
2451 if (!Set.count(I.second)) {
2452 AllSubsInSet = false;
2453 break;
2454 }
2455 // All sub-registers in Set, add Super as well.
2456 // We will visit Super later to recheck its super-registers.
2457 if (AllSubsInSet)
2458 Set.insert(Super);
2459 }
2460 }
2461
2462 // Convert to BitVector.
2463 BitVector BV(Registers.size() + 1);
2464 for (unsigned i = 0, e = Set.size(); i != e; ++i)
2465 BV.set(Set[i]->EnumValue);
2466 return BV;
2467}
2468
2469void CodeGenRegBank::printRegUnitName(unsigned Unit) const {
2470 if (Unit < NumNativeRegUnits)
2471 dbgs() << ' ' << RegUnits[Unit].Roots[0]->getName();
2472 else
2473 dbgs() << " #" << Unit;
2474}

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/MC/LaneBitmask.h

1//===- llvm/MC/LaneBitmask.h ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// A common definition of LaneBitmask for use in TableGen and CodeGen.
11///
12/// A lane mask is a bitmask representing the covering of a register with
13/// sub-registers.
14///
15/// This is typically used to track liveness at sub-register granularity.
16/// Lane masks for sub-register indices are similar to register units for
17/// physical registers. The individual bits in a lane mask can't be assigned
18/// any specific meaning. They can be used to check if two sub-register
19/// indices overlap.
20///
21/// Iff the target has a register such that:
22///
23/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
24///
25/// then:
26///
27/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
28
29#ifndef LLVM_MC_LANEBITMASK_H
30#define LLVM_MC_LANEBITMASK_H
31
32#include "llvm/Support/Compiler.h"
33#include "llvm/Support/Format.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/Printable.h"
36#include "llvm/Support/raw_ostream.h"
37
38namespace llvm {
39
40 struct LaneBitmask {
41 // When changing the underlying type, change the format string as well.
42 using Type = uint64_t;
43 enum : unsigned { BitWidth = 8*sizeof(Type) };
44 constexpr static const char *const FormatStr = "%016llX";
45
46 constexpr LaneBitmask() = default;
47 explicit constexpr LaneBitmask(Type V) : Mask(V) {}
48
49 constexpr bool operator== (LaneBitmask M) const { return Mask == M.Mask; }
50 constexpr bool operator!= (LaneBitmask M) const { return Mask != M.Mask; }
51 constexpr bool operator< (LaneBitmask M) const { return Mask < M.Mask; }
52 constexpr bool none() const { return Mask == 0; }
53 constexpr bool any() const { return Mask != 0; }
54 constexpr bool all() const { return ~Mask == 0; }
55
56 constexpr LaneBitmask operator~() const {
57 return LaneBitmask(~Mask);
58 }
59 constexpr LaneBitmask operator|(LaneBitmask M) const {
60 return LaneBitmask(Mask | M.Mask);
61 }
62 constexpr LaneBitmask operator&(LaneBitmask M) const {
63 return LaneBitmask(Mask & M.Mask);
64 }
65 LaneBitmask &operator|=(LaneBitmask M) {
66 Mask |= M.Mask;
67 return *this;
68 }
69 LaneBitmask &operator&=(LaneBitmask M) {
70 Mask &= M.Mask;
71 return *this;
72 }
73
74 constexpr Type getAsInteger() const { return Mask; }
75
76 unsigned getNumLanes() const {
77 return countPopulation(Mask);
78 }
79 unsigned getHighestLane() const {
80 return Log2_64(Mask);
5
Calling 'Log2_64'
7
Returning from 'Log2_64'
8
Returning the value 4294967295
81 }
82
83 static constexpr LaneBitmask getNone() { return LaneBitmask(0); }
84 static constexpr LaneBitmask getAll() { return ~LaneBitmask(0); }
85 static constexpr LaneBitmask getLane(unsigned Lane) {
86 return LaneBitmask(Type(1) << Lane);
13
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'llvm::LaneBitmask::Type'
87 }
88
89 private:
90 Type Mask = 0;
91 };
92
93 /// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
94 inline Printable PrintLaneMask(LaneBitmask LaneMask) {
95 return Printable([LaneMask](raw_ostream &OS) {
96 OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
97 });
98 }
99
100} // end namespace llvm
101
102#endif // LLVM_MC_LANEBITMASK_H

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some functions that are useful for math stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_SUPPORT_MATHEXTRAS_H
14#define LLVM_SUPPORT_MATHEXTRAS_H
15
16#include "llvm/Support/Compiler.h"
17#include <cassert>
18#include <climits>
19#include <cmath>
20#include <cstdint>
21#include <cstring>
22#include <limits>
23#include <type_traits>
24
25#ifdef __ANDROID_NDK__
26#include <android/api-level.h>
27#endif
28
29#ifdef _MSC_VER
30// Declare these intrinsics manually rather including intrin.h. It's very
31// expensive, and MathExtras.h is popular.
32// #include <intrin.h>
33extern "C" {
34unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
35unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
36unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
37unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
38}
39#endif
40
41namespace llvm {
42
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53/// Mathematical constants.
54namespace numbers {
55// TODO: Track C++20 std::numbers.
56// TODO: Favor using the hexadecimal FP constants (requires C++17).
57constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
58 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
59 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
60 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
61 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0)
62 log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
63 pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
64 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
65 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
66 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
67 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
68 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
69 sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
70 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1)
71 phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
72constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
73 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
74 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
75 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
76 log2ef = 1.44269504F, // (0x1.715476P+0)
77 log10ef = .434294482F, // (0x1.bcb7b2P-2)
78 pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
79 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
80 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
81 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
82 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
83 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1)
84 sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
85 inv_sqrt3f = .577350269F, // (0x1.279a74P-1)
86 phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
87} // namespace numbers
88
89namespace detail {
90template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
91 static unsigned count(T Val, ZeroBehavior) {
92 if (!Val)
93 return std::numeric_limits<T>::digits;
94 if (Val & 0x1)
95 return 0;
96
97 // Bisection method.
98 unsigned ZeroBits = 0;
99 T Shift = std::numeric_limits<T>::digits >> 1;
100 T Mask = std::numeric_limits<T>::max() >> Shift;
101 while (Shift) {
102 if ((Val & Mask) == 0) {
103 Val >>= Shift;
104 ZeroBits |= Shift;
105 }
106 Shift >>= 1;
107 Mask >>= Shift;
108 }
109 return ZeroBits;
110 }
111};
112
113#if defined(__GNUC__4) || defined(_MSC_VER)
114template <typename T> struct TrailingZerosCounter<T, 4> {
115 static unsigned count(T Val, ZeroBehavior ZB) {
116 if (ZB != ZB_Undefined && Val == 0)
117 return 32;
118
119#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
120 return __builtin_ctz(Val);
121#elif defined(_MSC_VER)
122 unsigned long Index;
123 _BitScanForward(&Index, Val);
124 return Index;
125#endif
126 }
127};
128
129#if !defined(_MSC_VER) || defined(_M_X64)
130template <typename T> struct TrailingZerosCounter<T, 8> {
131 static unsigned count(T Val, ZeroBehavior ZB) {
132 if (ZB != ZB_Undefined && Val == 0)
133 return 64;
134
135#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
136 return __builtin_ctzll(Val);
137#elif defined(_MSC_VER)
138 unsigned long Index;
139 _BitScanForward64(&Index, Val);
140 return Index;
141#endif
142 }
143};
144#endif
145#endif
146} // namespace detail
147
148/// Count number of 0's from the least significant bit to the most
149/// stopping at the first 1.
150///
151/// Only unsigned integral types are allowed.
152///
153/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
154/// valid arguments.
155template <typename T>
156unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
157 static_assert(std::numeric_limits<T>::is_integer &&
158 !std::numeric_limits<T>::is_signed,
159 "Only unsigned integral types are allowed.");
160 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
161}
162
163namespace detail {
164template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
165 static unsigned count(T Val, ZeroBehavior) {
166 if (!Val)
167 return std::numeric_limits<T>::digits;
168
169 // Bisection method.
170 unsigned ZeroBits = 0;
171 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
172 T Tmp = Val >> Shift;
173 if (Tmp)
174 Val = Tmp;
175 else
176 ZeroBits |= Shift;
177 }
178 return ZeroBits;
179 }
180};
181
182#if defined(__GNUC__4) || defined(_MSC_VER)
183template <typename T> struct LeadingZerosCounter<T, 4> {
184 static unsigned count(T Val, ZeroBehavior ZB) {
185 if (ZB != ZB_Undefined && Val == 0)
186 return 32;
187
188#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
189 return __builtin_clz(Val);
190#elif defined(_MSC_VER)
191 unsigned long Index;
192 _BitScanReverse(&Index, Val);
193 return Index ^ 31;
194#endif
195 }
196};
197
198#if !defined(_MSC_VER) || defined(_M_X64)
199template <typename T> struct LeadingZerosCounter<T, 8> {
200 static unsigned count(T Val, ZeroBehavior ZB) {
201 if (ZB != ZB_Undefined && Val == 0)
202 return 64;
203
204#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
205 return __builtin_clzll(Val);
206#elif defined(_MSC_VER)
207 unsigned long Index;
208 _BitScanReverse64(&Index, Val);
209 return Index ^ 63;
210#endif
211 }
212};
213#endif
214#endif
215} // namespace detail
216
217/// Count number of 0's from the most significant bit to the least
218/// stopping at the first 1.
219///
220/// Only unsigned integral types are allowed.
221///
222/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
223/// valid arguments.
224template <typename T>
225unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
226 static_assert(std::numeric_limits<T>::is_integer &&
227 !std::numeric_limits<T>::is_signed,
228 "Only unsigned integral types are allowed.");
229 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
230}
231
232/// Get the index of the first set bit starting from the least
233/// significant bit.
234///
235/// Only unsigned integral types are allowed.
236///
237/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
238/// valid arguments.
239template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
240 if (ZB == ZB_Max && Val == 0)
241 return std::numeric_limits<T>::max();
242
243 return countTrailingZeros(Val, ZB_Undefined);
244}
245
246/// Create a bitmask with the N right-most bits set to 1, and all other
247/// bits set to 0. Only unsigned types are allowed.
248template <typename T> T maskTrailingOnes(unsigned N) {
249 static_assert(std::is_unsigned<T>::value, "Invalid type!");
250 const unsigned Bits = CHAR_BIT8 * sizeof(T);
251 assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index"
) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "llvm/include/llvm/Support/MathExtras.h", 251, __extension__
__PRETTY_FUNCTION__))
;
252 return N == 0 ? 0 : (T(-1) >> (Bits - N));
253}
254
255/// Create a bitmask with the N left-most bits set to 1, and all other
256/// bits set to 0. Only unsigned types are allowed.
257template <typename T> T maskLeadingOnes(unsigned N) {
258 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
259}
260
261/// Create a bitmask with the N right-most bits set to 0, and all other
262/// bits set to 1. Only unsigned types are allowed.
263template <typename T> T maskTrailingZeros(unsigned N) {
264 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
265}
266
267/// Create a bitmask with the N left-most bits set to 0, and all other
268/// bits set to 1. Only unsigned types are allowed.
269template <typename T> T maskLeadingZeros(unsigned N) {
270 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
271}
272
273/// Get the index of the last set bit starting from the least
274/// significant bit.
275///
276/// Only unsigned integral types are allowed.
277///
278/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
279/// valid arguments.
280template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
281 if (ZB == ZB_Max && Val == 0)
282 return std::numeric_limits<T>::max();
283
284 // Use ^ instead of - because both gcc and llvm can remove the associated ^
285 // in the __builtin_clz intrinsic on x86.
286 return countLeadingZeros(Val, ZB_Undefined) ^
287 (std::numeric_limits<T>::digits - 1);
288}
289
290/// Macro compressed bit reversal table for 256 bits.
291///
292/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
293static const unsigned char BitReverseTable256[256] = {
294#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
295#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
296#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
297 R6(0), R6(2), R6(1), R6(3)
298#undef R2
299#undef R4
300#undef R6
301};
302
303/// Reverse the bits in \p Val.
304template <typename T>
305T reverseBits(T Val) {
306 unsigned char in[sizeof(Val)];
307 unsigned char out[sizeof(Val)];
308 std::memcpy(in, &Val, sizeof(Val));
309 for (unsigned i = 0; i < sizeof(Val); ++i)
310 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
311 std::memcpy(&Val, out, sizeof(Val));
312 return Val;
313}
314
315#if __has_builtin(__builtin_bitreverse8)1
316template<>
317inline uint8_t reverseBits<uint8_t>(uint8_t Val) {
318 return __builtin_bitreverse8(Val);
319}
320#endif
321
322#if __has_builtin(__builtin_bitreverse16)1
323template<>
324inline uint16_t reverseBits<uint16_t>(uint16_t Val) {
325 return __builtin_bitreverse16(Val);
326}
327#endif
328
329#if __has_builtin(__builtin_bitreverse32)1
330template<>
331inline uint32_t reverseBits<uint32_t>(uint32_t Val) {
332 return __builtin_bitreverse32(Val);
333}
334#endif
335
336#if __has_builtin(__builtin_bitreverse64)1
337template<>
338inline uint64_t reverseBits<uint64_t>(uint64_t Val) {
339 return __builtin_bitreverse64(Val);
340}
341#endif
342
343// NOTE: The following support functions use the _32/_64 extensions instead of
344// type overloading so that signed and unsigned integers can be used without
345// ambiguity.
346
347/// Return the high 32 bits of a 64 bit value.
348constexpr inline uint32_t Hi_32(uint64_t Value) {
349 return static_cast<uint32_t>(Value >> 32);
350}
351
352/// Return the low 32 bits of a 64 bit value.
353constexpr inline uint32_t Lo_32(uint64_t Value) {
354 return static_cast<uint32_t>(Value);
355}
356
357/// Make a 64-bit integer from a high / low pair of 32-bit integers.
358constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
359 return ((uint64_t)High << 32) | (uint64_t)Low;
360}
361
362/// Checks if an integer fits into the given bit width.
363template <unsigned N> constexpr inline bool isInt(int64_t x) {
364 return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1)));
365}
366// Template specializations to get better code for common cases.
367template <> constexpr inline bool isInt<8>(int64_t x) {
368 return static_cast<int8_t>(x) == x;
369}
370template <> constexpr inline bool isInt<16>(int64_t x) {
371 return static_cast<int16_t>(x) == x;
372}
373template <> constexpr inline bool isInt<32>(int64_t x) {
374 return static_cast<int32_t>(x) == x;
375}
376
377/// Checks if a signed integer is an N bit number shifted left by S.
378template <unsigned N, unsigned S>
379constexpr inline bool isShiftedInt(int64_t x) {
380 static_assert(
381 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
382 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
383 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
384}
385
386/// Checks if an unsigned integer fits into the given bit width.
387///
388/// This is written as two functions rather than as simply
389///
390/// return N >= 64 || X < (UINT64_C(1) << N);
391///
392/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
393/// left too many places.
394template <unsigned N>
395constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) {
396 static_assert(N > 0, "isUInt<0> doesn't make sense");
397 return X < (UINT64_C(1)1UL << (N));
398}
399template <unsigned N>
400constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t) {
401 return true;
402}
403
404// Template specializations to get better code for common cases.
405template <> constexpr inline bool isUInt<8>(uint64_t x) {
406 return static_cast<uint8_t>(x) == x;
407}
408template <> constexpr inline bool isUInt<16>(uint64_t x) {
409 return static_cast<uint16_t>(x) == x;
410}
411template <> constexpr inline bool isUInt<32>(uint64_t x) {
412 return static_cast<uint32_t>(x) == x;
413}
414
415/// Checks if a unsigned integer is an N bit number shifted left by S.
416template <unsigned N, unsigned S>
417constexpr inline bool isShiftedUInt(uint64_t x) {
418 static_assert(
419 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
420 static_assert(N + S <= 64,
421 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
422 // Per the two static_asserts above, S must be strictly less than 64. So
423 // 1 << S is not undefined behavior.
424 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
425}
426
427/// Gets the maximum value for a N-bit unsigned integer.
428inline uint64_t maxUIntN(uint64_t N) {
429 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 429, __extension__
__PRETTY_FUNCTION__))
;
430
431 // uint64_t(1) << 64 is undefined behavior, so we can't do
432 // (uint64_t(1) << N) - 1
433 // without checking first that N != 64. But this works and doesn't have a
434 // branch.
435 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
436}
437
438/// Gets the minimum value for a N-bit signed integer.
439inline int64_t minIntN(int64_t N) {
440 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 440, __extension__
__PRETTY_FUNCTION__))
;
441
442 return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1));
443}
444
445/// Gets the maximum value for a N-bit signed integer.
446inline int64_t maxIntN(int64_t N) {
447 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 447, __extension__
__PRETTY_FUNCTION__))
;
448
449 // This relies on two's complement wraparound when N == 64, so we convert to
450 // int64_t only at the very end to avoid UB.
451 return (UINT64_C(1)1UL << (N - 1)) - 1;
452}
453
454/// Checks if an unsigned integer fits into the given (dynamic) bit width.
455inline bool isUIntN(unsigned N, uint64_t x) {
456 return N >= 64 || x <= maxUIntN(N);
457}
458
459/// Checks if an signed integer fits into the given (dynamic) bit width.
460inline bool isIntN(unsigned N, int64_t x) {
461 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
462}
463
464/// Return true if the argument is a non-empty sequence of ones starting at the
465/// least significant bit with the remainder zero (32 bit version).
466/// Ex. isMask_32(0x0000FFFFU) == true.
467constexpr inline bool isMask_32(uint32_t Value) {
468 return Value && ((Value + 1) & Value) == 0;
469}
470
471/// Return true if the argument is a non-empty sequence of ones starting at the
472/// least significant bit with the remainder zero (64 bit version).
473constexpr inline bool isMask_64(uint64_t Value) {
474 return Value && ((Value + 1) & Value) == 0;
475}
476
477/// Return true if the argument contains a non-empty sequence of ones with the
478/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
479constexpr inline bool isShiftedMask_32(uint32_t Value) {
480 return Value && isMask_32((Value - 1) | Value);
481}
482
483/// Return true if the argument contains a non-empty sequence of ones with the
484/// remainder zero (64 bit version.)
485constexpr inline bool isShiftedMask_64(uint64_t Value) {
486 return Value && isMask_64((Value - 1) | Value);
487}
488
489/// Return true if the argument is a power of two > 0.
490/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
491constexpr inline bool isPowerOf2_32(uint32_t Value) {
492 return Value && !(Value & (Value - 1));
493}
494
495/// Return true if the argument is a power of two > 0 (64 bit edition.)
496constexpr inline bool isPowerOf2_64(uint64_t Value) {
497 return Value && !(Value & (Value - 1));
498}
499
500/// Count the number of ones from the most significant bit to the first
501/// zero bit.
502///
503/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
504/// Only unsigned integral types are allowed.
505///
506/// \param ZB the behavior on an input of all ones. Only ZB_Width and
507/// ZB_Undefined are valid arguments.
508template <typename T>
509unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
510 static_assert(std::numeric_limits<T>::is_integer &&
511 !std::numeric_limits<T>::is_signed,
512 "Only unsigned integral types are allowed.");
513 return countLeadingZeros<T>(~Value, ZB);
514}
515
516/// Count the number of ones from the least significant bit to the first
517/// zero bit.
518///
519/// Ex. countTrailingOnes(0x00FF00FF) == 8.
520/// Only unsigned integral types are allowed.
521///
522/// \param ZB the behavior on an input of all ones. Only ZB_Width and
523/// ZB_Undefined are valid arguments.
524template <typename T>
525unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
526 static_assert(std::numeric_limits<T>::is_integer &&
527 !std::numeric_limits<T>::is_signed,
528 "Only unsigned integral types are allowed.");
529 return countTrailingZeros<T>(~Value, ZB);
530}
531
532namespace detail {
533template <typename T, std::size_t SizeOfT> struct PopulationCounter {
534 static unsigned count(T Value) {
535 // Generic version, forward to 32 bits.
536 static_assert(SizeOfT <= 4, "Not implemented!");
537#if defined(__GNUC__4)
538 return __builtin_popcount(Value);
539#else
540 uint32_t v = Value;
541 v = v - ((v >> 1) & 0x55555555);
542 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
543 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
544#endif
545 }
546};
547
548template <typename T> struct PopulationCounter<T, 8> {
549 static unsigned count(T Value) {
550#if defined(__GNUC__4)
551 return __builtin_popcountll(Value);
552#else
553 uint64_t v = Value;
554 v = v - ((v >> 1) & 0x5555555555555555ULL);
555 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
556 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
557 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
558#endif
559 }
560};
561} // namespace detail
562
563/// Count the number of set bits in a value.
564/// Ex. countPopulation(0xF000F000) = 8
565/// Returns 0 if the word is zero.
566template <typename T>
567inline unsigned countPopulation(T Value) {
568 static_assert(std::numeric_limits<T>::is_integer &&
569 !std::numeric_limits<T>::is_signed,
570 "Only unsigned integral types are allowed.");
571 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
572}
573
574/// Return true if the argument contains a non-empty sequence of ones with the
575/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
576/// If true, \p MaskIdx will specify the index of the lowest set bit and \p
577/// MaskLen is updated to specify the length of the mask, else neither are
578/// updated.
579inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx,
580 unsigned &MaskLen) {
581 if (!isShiftedMask_32(Value))
582 return false;
583 MaskIdx = countTrailingZeros(Value);
584 MaskLen = countPopulation(Value);
585 return true;
586}
587
588/// Return true if the argument contains a non-empty sequence of ones with the
589/// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index
590/// of the lowest set bit and \p MaskLen is updated to specify the length of the
591/// mask, else neither are updated.
592inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx,
593 unsigned &MaskLen) {
594 if (!isShiftedMask_64(Value))
595 return false;
596 MaskIdx = countTrailingZeros(Value);
597 MaskLen = countPopulation(Value);
598 return true;
599}
600
601/// Compile time Log2.
602/// Valid only for positive powers of two.
603template <size_t kValue> constexpr inline size_t CTLog2() {
604 static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
605 "Value is not a valid power of 2");
606 return 1 + CTLog2<kValue / 2>();
607}
608
609template <> constexpr inline size_t CTLog2<1>() { return 0; }
610
611/// Return the log base 2 of the specified value.
612inline double Log2(double Value) {
613#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
614 return __builtin_log(Value) / __builtin_log(2.0);
615#else
616 return log2(Value);
617#endif
618}
619
620/// Return the floor log base 2 of the specified value, -1 if the value is zero.
621/// (32 bit edition.)
622/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
623inline unsigned Log2_32(uint32_t Value) {
624 return 31 - countLeadingZeros(Value);
625}
626
627/// Return the floor log base 2 of the specified value, -1 if the value is zero.
628/// (64 bit edition.)
629inline unsigned Log2_64(uint64_t Value) {
630 return 63 - countLeadingZeros(Value);
6
Returning the value 4294967295
631}
632
633/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
634/// (32 bit edition).
635/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
636inline unsigned Log2_32_Ceil(uint32_t Value) {
637 return 32 - countLeadingZeros(Value - 1);
638}
639
640/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
641/// (64 bit edition.)
642inline unsigned Log2_64_Ceil(uint64_t Value) {
643 return 64 - countLeadingZeros(Value - 1);
644}
645
646/// Return the greatest common divisor of the values using Euclid's algorithm.
647template <typename T>
648inline T greatestCommonDivisor(T A, T B) {
649 while (B) {
650 T Tmp = B;
651 B = A % B;
652 A = Tmp;
653 }
654 return A;
655}
656
657inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
658 return greatestCommonDivisor<uint64_t>(A, B);
659}
660
661/// This function takes a 64-bit integer and returns the bit equivalent double.
662inline double BitsToDouble(uint64_t Bits) {
663 double D;
664 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
665 memcpy(&D, &Bits, sizeof(Bits));
666 return D;
667}
668
669/// This function takes a 32-bit integer and returns the bit equivalent float.
670inline float BitsToFloat(uint32_t Bits) {
671 float F;
672 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
673 memcpy(&F, &Bits, sizeof(Bits));
674 return F;
675}
676
677/// This function takes a double and returns the bit equivalent 64-bit integer.
678/// Note that copying doubles around changes the bits of NaNs on some hosts,
679/// notably x86, so this routine cannot be used if these bits are needed.
680inline uint64_t DoubleToBits(double Double) {
681 uint64_t Bits;
682 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
683 memcpy(&Bits, &Double, sizeof(Double));
684 return Bits;
685}
686
687/// This function takes a float and returns the bit equivalent 32-bit integer.
688/// Note that copying floats around changes the bits of NaNs on some hosts,
689/// notably x86, so this routine cannot be used if these bits are needed.
690inline uint32_t FloatToBits(float Float) {
691 uint32_t Bits;
692 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
693 memcpy(&Bits, &Float, sizeof(Float));
694 return Bits;
695}
696
697/// A and B are either alignments or offsets. Return the minimum alignment that
698/// may be assumed after adding the two together.
699constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
700 // The largest power of 2 that divides both A and B.
701 //
702 // Replace "-Value" by "1+~Value" in the following commented code to avoid
703 // MSVC warning C4146
704 // return (A | B) & -(A | B);
705 return (A | B) & (1 + ~(A | B));
706}
707
708/// Returns the next power of two (in 64-bits) that is strictly greater than A.
709/// Returns zero on overflow.
710constexpr inline uint64_t NextPowerOf2(uint64_t A) {
711 A |= (A >> 1);
712 A |= (A >> 2);
713 A |= (A >> 4);
714 A |= (A >> 8);
715 A |= (A >> 16);
716 A |= (A >> 32);
717 return A + 1;
718}
719
720/// Returns the power of two which is less than or equal to the given value.
721/// Essentially, it is a floor operation across the domain of powers of two.
722inline uint64_t PowerOf2Floor(uint64_t A) {
723 if (!A) return 0;
724 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
725}
726
727/// Returns the power of two which is greater than or equal to the given value.
728/// Essentially, it is a ceil operation across the domain of powers of two.
729inline uint64_t PowerOf2Ceil(uint64_t A) {
730 if (!A)
731 return 0;
732 return NextPowerOf2(A - 1);
733}
734
735/// Returns the next integer (mod 2**64) that is greater than or equal to
736/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
737///
738/// If non-zero \p Skew is specified, the return value will be a minimal
739/// integer that is greater than or equal to \p Value and equal to
740/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
741/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
742///
743/// Examples:
744/// \code
745/// alignTo(5, 8) = 8
746/// alignTo(17, 8) = 24
747/// alignTo(~0LL, 8) = 0
748/// alignTo(321, 255) = 510
749///
750/// alignTo(5, 8, 7) = 7
751/// alignTo(17, 8, 1) = 17
752/// alignTo(~0LL, 8, 3) = 3
753/// alignTo(321, 255, 42) = 552
754/// \endcode
755inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
756 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 756, __extension__
__PRETTY_FUNCTION__))
;
757 Skew %= Align;
758 return (Value + Align - 1 - Skew) / Align * Align + Skew;
759}
760
761/// Returns the next integer (mod 2**64) that is greater than or equal to
762/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
763template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
764 static_assert(Align != 0u, "Align must be non-zero");
765 return (Value + Align - 1) / Align * Align;
766}
767
768/// Returns the integer ceil(Numerator / Denominator).
769inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
770 return alignTo(Numerator, Denominator) / Denominator;
771}
772
773/// Returns the integer nearest(Numerator / Denominator).
774inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
775 return (Numerator + (Denominator / 2)) / Denominator;
776}
777
778/// Returns the largest uint64_t less than or equal to \p Value and is
779/// \p Skew mod \p Align. \p Align must be non-zero
780inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
781 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 781, __extension__
__PRETTY_FUNCTION__))
;
782 Skew %= Align;
783 return (Value - Skew) / Align * Align + Skew;
784}
785
786/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
787/// Requires 0 < B <= 32.
788template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
789 static_assert(B > 0, "Bit width can't be 0.");
790 static_assert(B <= 32, "Bit width out of range.");
791 return int32_t(X << (32 - B)) >> (32 - B);
792}
793
794/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
795/// Requires 0 < B <= 32.
796inline int32_t SignExtend32(uint32_t X, unsigned B) {
797 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 797, __extension__
__PRETTY_FUNCTION__))
;
798 assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 798, __extension__
__PRETTY_FUNCTION__))
;
799 return int32_t(X << (32 - B)) >> (32 - B);
800}
801
802/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
803/// Requires 0 < B <= 64.
804template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
805 static_assert(B > 0, "Bit width can't be 0.");
806 static_assert(B <= 64, "Bit width out of range.");
807 return int64_t(x << (64 - B)) >> (64 - B);
808}
809
810/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
811/// Requires 0 < B <= 64.
812inline int64_t SignExtend64(uint64_t X, unsigned B) {
813 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 813, __extension__
__PRETTY_FUNCTION__))
;
814 assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 814, __extension__
__PRETTY_FUNCTION__))
;
815 return int64_t(X << (64 - B)) >> (64 - B);
816}
817
818/// Subtract two unsigned integers, X and Y, of type T and return the absolute
819/// value of the result.
820template <typename T>
821std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) {
822 return X > Y ? (X - Y) : (Y - X);
823}
824
825/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
826/// maximum representable value of T on overflow. ResultOverflowed indicates if
827/// the result is larger than the maximum representable value of type T.
828template <typename T>
829std::enable_if_t<std::is_unsigned<T>::value, T>
830SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
831 bool Dummy;
832 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
833 // Hacker's Delight, p. 29
834 T Z = X + Y;
835 Overflowed = (Z < X || Z < Y);
836 if (Overflowed)
837 return std::numeric_limits<T>::max();
838 else
839 return Z;
840}
841
842/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
843/// maximum representable value of T on overflow. ResultOverflowed indicates if
844/// the result is larger than the maximum representable value of type T.
845template <typename T>
846std::enable_if_t<std::is_unsigned<T>::value, T>
847SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
848 bool Dummy;
849 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
850
851 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
852 // because it fails for uint16_t (where multiplication can have undefined
853 // behavior due to promotion to int), and requires a division in addition
854 // to the multiplication.
855
856 Overflowed = false;
857
858 // Log2(Z) would be either Log2Z or Log2Z + 1.
859 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
860 // will necessarily be less than Log2Max as desired.
861 int Log2Z = Log2_64(X) + Log2_64(Y);
862 const T Max = std::numeric_limits<T>::max();
863 int Log2Max = Log2_64(Max);
864 if (Log2Z < Log2Max) {
865 return X * Y;
866 }
867 if (Log2Z > Log2Max) {
868 Overflowed = true;
869 return Max;
870 }
871
872 // We're going to use the top bit, and maybe overflow one
873 // bit past it. Multiply all but the bottom bit then add
874 // that on at the end.
875 T Z = (X >> 1) * Y;
876 if (Z & ~(Max >> 1)) {
877 Overflowed = true;
878 return Max;
879 }
880 Z <<= 1;
881 if (X & 1)
882 return SaturatingAdd(Z, Y, ResultOverflowed);
883
884 return Z;
885}
886
887/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
888/// the product. Clamp the result to the maximum representable value of T on
889/// overflow. ResultOverflowed indicates if the result is larger than the
890/// maximum representable value of type T.
891template <typename T>
892std::enable_if_t<std::is_unsigned<T>::value, T>
893SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
894 bool Dummy;
895 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
896
897 T Product = SaturatingMultiply(X, Y, &Overflowed);
898 if (Overflowed)
899 return Product;
900
901 return SaturatingAdd(A, Product, &Overflowed);
902}
903
904/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
905extern const float huge_valf;
906
907
908/// Add two signed integers, computing the two's complement truncated result,
909/// returning true if overflow occurred.
910template <typename T>
911std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) {
912#if __has_builtin(__builtin_add_overflow)1
913 return __builtin_add_overflow(X, Y, &Result);
914#else
915 // Perform the unsigned addition.
916 using U = std::make_unsigned_t<T>;
917 const U UX = static_cast<U>(X);
918 const U UY = static_cast<U>(Y);
919 const U UResult = UX + UY;
920
921 // Convert to signed.
922 Result = static_cast<T>(UResult);
923
924 // Adding two positive numbers should result in a positive number.
925 if (X > 0 && Y > 0)
926 return Result <= 0;
927 // Adding two negatives should result in a negative number.
928 if (X < 0 && Y < 0)
929 return Result >= 0;
930 return false;
931#endif
932}
933
934/// Subtract two signed integers, computing the two's complement truncated
935/// result, returning true if an overflow ocurred.
936template <typename T>
937std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) {
938#if __has_builtin(__builtin_sub_overflow)1
939 return __builtin_sub_overflow(X, Y, &Result);
940#else
941 // Perform the unsigned addition.
942 using U = std::make_unsigned_t<T>;
943 const U UX = static_cast<U>(X);
944 const U UY = static_cast<U>(Y);
945 const U UResult = UX - UY;
946
947 // Convert to signed.
948 Result = static_cast<T>(UResult);
949
950 // Subtracting a positive number from a negative results in a negative number.
951 if (X <= 0 && Y > 0)
952 return Result >= 0;
953 // Subtracting a negative number from a positive results in a positive number.
954 if (X >= 0 && Y < 0)
955 return Result <= 0;
956 return false;
957#endif
958}
959
960/// Multiply two signed integers, computing the two's complement truncated
961/// result, returning true if an overflow ocurred.
962template <typename T>
963std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) {
964 // Perform the unsigned multiplication on absolute values.
965 using U = std::make_unsigned_t<T>;
966 const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
967 const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
968 const U UResult = UX * UY;
969
970 // Convert to signed.
971 const bool IsNegative = (X < 0) ^ (Y < 0);
972 Result = IsNegative ? (0 - UResult) : UResult;
973
974 // If any of the args was 0, result is 0 and no overflow occurs.
975 if (UX == 0 || UY == 0)
976 return false;
977
978 // UX and UY are in [1, 2^n], where n is the number of digits.
979 // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
980 // positive) divided by an argument compares to the other.
981 if (IsNegative)
982 return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
983 else
984 return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
985}
986
987} // End llvm namespace
988
989#endif