Bug Summary

File:lib/CodeGen/AggressiveAntiDepBreaker.cpp
Warning:line 794, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AggressiveAntiDepBreaker.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn372306/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn372306/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn372306/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn372306/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn372306=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-09-19-172240-30738-1 -x c++ /build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp

/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp

1//===- AggressiveAntiDepBreaker.cpp - Anti-dep breaker --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the AggressiveAntiDepBreaker class, which
10// implements register anti-dependence breaking during post-RA
11// scheduling. It attempts to break all anti-dependencies within a
12// block.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AggressiveAntiDepBreaker.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/SmallSet.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/CodeGen/MachineBasicBlock.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineOperand.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/RegisterClassInfo.h"
28#include "llvm/CodeGen/ScheduleDAG.h"
29#include "llvm/CodeGen/TargetInstrInfo.h"
30#include "llvm/CodeGen/TargetRegisterInfo.h"
31#include "llvm/CodeGen/TargetSubtargetInfo.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/MC/MCRegisterInfo.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/MachineValueType.h"
37#include "llvm/Support/raw_ostream.h"
38#include <cassert>
39#include <map>
40#include <set>
41#include <utility>
42#include <vector>
43
44using namespace llvm;
45
46#define DEBUG_TYPE"post-RA-sched" "post-RA-sched"
47
48// If DebugDiv > 0 then only break antidep with (ID % DebugDiv) == DebugMod
49static cl::opt<int>
50DebugDiv("agg-antidep-debugdiv",
51 cl::desc("Debug control for aggressive anti-dep breaker"),
52 cl::init(0), cl::Hidden);
53
54static cl::opt<int>
55DebugMod("agg-antidep-debugmod",
56 cl::desc("Debug control for aggressive anti-dep breaker"),
57 cl::init(0), cl::Hidden);
58
59AggressiveAntiDepState::AggressiveAntiDepState(const unsigned TargetRegs,
60 MachineBasicBlock *BB)
61 : NumTargetRegs(TargetRegs), GroupNodes(TargetRegs, 0),
62 GroupNodeIndices(TargetRegs, 0), KillIndices(TargetRegs, 0),
63 DefIndices(TargetRegs, 0) {
64 const unsigned BBSize = BB->size();
65 for (unsigned i = 0; i < NumTargetRegs; ++i) {
66 // Initialize all registers to be in their own group. Initially we
67 // assign the register to the same-indexed GroupNode.
68 GroupNodeIndices[i] = i;
69 // Initialize the indices to indicate that no registers are live.
70 KillIndices[i] = ~0u;
71 DefIndices[i] = BBSize;
72 }
73}
74
75unsigned AggressiveAntiDepState::GetGroup(unsigned Reg) {
76 unsigned Node = GroupNodeIndices[Reg];
77 while (GroupNodes[Node] != Node)
78 Node = GroupNodes[Node];
79
80 return Node;
81}
82
83void AggressiveAntiDepState::GetGroupRegs(
84 unsigned Group,
85 std::vector<unsigned> &Regs,
86 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference> *RegRefs)
87{
88 for (unsigned Reg = 0; Reg != NumTargetRegs; ++Reg) {
89 if ((GetGroup(Reg) == Group) && (RegRefs->count(Reg) > 0))
90 Regs.push_back(Reg);
91 }
92}
93
94unsigned AggressiveAntiDepState::UnionGroups(unsigned Reg1, unsigned Reg2) {
95 assert(GroupNodes[0] == 0 && "GroupNode 0 not parent!")((GroupNodes[0] == 0 && "GroupNode 0 not parent!") ? static_cast
<void> (0) : __assert_fail ("GroupNodes[0] == 0 && \"GroupNode 0 not parent!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 95, __PRETTY_FUNCTION__))
;
96 assert(GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!")((GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!"
) ? static_cast<void> (0) : __assert_fail ("GroupNodeIndices[0] == 0 && \"Reg 0 not in Group 0!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 96, __PRETTY_FUNCTION__))
;
97
98 // find group for each register
99 unsigned Group1 = GetGroup(Reg1);
100 unsigned Group2 = GetGroup(Reg2);
101
102 // if either group is 0, then that must become the parent
103 unsigned Parent = (Group1 == 0) ? Group1 : Group2;
104 unsigned Other = (Parent == Group1) ? Group2 : Group1;
105 GroupNodes.at(Other) = Parent;
106 return Parent;
107}
108
109unsigned AggressiveAntiDepState::LeaveGroup(unsigned Reg) {
110 // Create a new GroupNode for Reg. Reg's existing GroupNode must
111 // stay as is because there could be other GroupNodes referring to
112 // it.
113 unsigned idx = GroupNodes.size();
114 GroupNodes.push_back(idx);
115 GroupNodeIndices[Reg] = idx;
116 return idx;
117}
118
119bool AggressiveAntiDepState::IsLive(unsigned Reg) {
120 // KillIndex must be defined and DefIndex not defined for a register
121 // to be live.
122 return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
123}
124
125AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
126 MachineFunction &MFi, const RegisterClassInfo &RCI,
127 TargetSubtargetInfo::RegClassVector &CriticalPathRCs)
128 : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
129 TII(MF.getSubtarget().getInstrInfo()),
130 TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI) {
131 /* Collect a bitset of all registers that are only broken if they
132 are on the critical path. */
133 for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
134 BitVector CPSet = TRI->getAllocatableSet(MF, CriticalPathRCs[i]);
135 if (CriticalPathSet.none())
136 CriticalPathSet = CPSet;
137 else
138 CriticalPathSet |= CPSet;
139 }
140
141 LLVM_DEBUG(dbgs() << "AntiDep Critical-Path Registers:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "AntiDep Critical-Path Registers:"
; } } while (false)
;
142 LLVM_DEBUG(for (unsigned rdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { for (unsigned r : CriticalPathSet.set_bits
()) dbgs() << " " << printReg(r, TRI); } } while (
false)
143 : CriticalPathSet.set_bits()) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { for (unsigned r : CriticalPathSet.set_bits
()) dbgs() << " " << printReg(r, TRI); } } while (
false)
144 << " " << printReg(r, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { for (unsigned r : CriticalPathSet.set_bits
()) dbgs() << " " << printReg(r, TRI); } } while (
false)
;
145 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
146}
147
148AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
149 delete State;
150}
151
152void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
153 assert(!State)((!State) ? static_cast<void> (0) : __assert_fail ("!State"
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 153, __PRETTY_FUNCTION__))
;
154 State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
155
156 bool IsReturnBlock = BB->isReturnBlock();
157 std::vector<unsigned> &KillIndices = State->GetKillIndices();
158 std::vector<unsigned> &DefIndices = State->GetDefIndices();
159
160 // Examine the live-in regs of all successors.
161 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
162 SE = BB->succ_end(); SI != SE; ++SI)
163 for (const auto &LI : (*SI)->liveins()) {
164 for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI) {
165 unsigned Reg = *AI;
166 State->UnionGroups(Reg, 0);
167 KillIndices[Reg] = BB->size();
168 DefIndices[Reg] = ~0u;
169 }
170 }
171
172 // Mark live-out callee-saved registers. In a return block this is
173 // all callee-saved registers. In non-return this is any
174 // callee-saved register that is not saved in the prolog.
175 const MachineFrameInfo &MFI = MF.getFrameInfo();
176 BitVector Pristine = MFI.getPristineRegs(MF);
177 for (const MCPhysReg *I = MF.getRegInfo().getCalleeSavedRegs(); *I;
178 ++I) {
179 unsigned Reg = *I;
180 if (!IsReturnBlock && !Pristine.test(Reg))
181 continue;
182 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
183 unsigned AliasReg = *AI;
184 State->UnionGroups(AliasReg, 0);
185 KillIndices[AliasReg] = BB->size();
186 DefIndices[AliasReg] = ~0u;
187 }
188 }
189}
190
191void AggressiveAntiDepBreaker::FinishBlock() {
192 delete State;
193 State = nullptr;
194}
195
196void AggressiveAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
197 unsigned InsertPosIndex) {
198 assert(Count < InsertPosIndex && "Instruction index out of expected range!")((Count < InsertPosIndex && "Instruction index out of expected range!"
) ? static_cast<void> (0) : __assert_fail ("Count < InsertPosIndex && \"Instruction index out of expected range!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 198, __PRETTY_FUNCTION__))
;
199
200 std::set<unsigned> PassthruRegs;
201 GetPassthruRegs(MI, PassthruRegs);
202 PrescanInstruction(MI, Count, PassthruRegs);
203 ScanInstruction(MI, Count);
204
205 LLVM_DEBUG(dbgs() << "Observe: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "Observe: "; } } while (
false)
;
206 LLVM_DEBUG(MI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { MI.dump(); } } while (false)
;
207 LLVM_DEBUG(dbgs() << "\tRegs:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tRegs:"; } } while (false
)
;
208
209 std::vector<unsigned> &DefIndices = State->GetDefIndices();
210 for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
211 // If Reg is current live, then mark that it can't be renamed as
212 // we don't know the extent of its live-range anymore (now that it
213 // has been scheduled). If it is not live but was defined in the
214 // previous schedule region, then set its def index to the most
215 // conservative location (i.e. the beginning of the previous
216 // schedule region).
217 if (State->IsLive(Reg)) {
218 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (State->GetGroup(Reg) != 0) dbgs()
<< " " << printReg(Reg, TRI) << "=g" <<
State->GetGroup(Reg) << "->g0(region live-out)";
} } while (false)
219 << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (State->GetGroup(Reg) != 0) dbgs()
<< " " << printReg(Reg, TRI) << "=g" <<
State->GetGroup(Reg) << "->g0(region live-out)";
} } while (false)
220 << "->g0(region live-out)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (State->GetGroup(Reg) != 0) dbgs()
<< " " << printReg(Reg, TRI) << "=g" <<
State->GetGroup(Reg) << "->g0(region live-out)";
} } while (false)
;
221 State->UnionGroups(Reg, 0);
222 } else if ((DefIndices[Reg] < InsertPosIndex)
223 && (DefIndices[Reg] >= Count)) {
224 DefIndices[Reg] = Count;
225 }
226 }
227 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
228}
229
230bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI,
231 MachineOperand &MO) {
232 if (!MO.isReg() || !MO.isImplicit())
233 return false;
234
235 Register Reg = MO.getReg();
236 if (Reg == 0)
237 return false;
238
239 MachineOperand *Op = nullptr;
240 if (MO.isDef())
241 Op = MI.findRegisterUseOperand(Reg, true);
242 else
243 Op = MI.findRegisterDefOperand(Reg);
244
245 return(Op && Op->isImplicit());
246}
247
248void AggressiveAntiDepBreaker::GetPassthruRegs(
249 MachineInstr &MI, std::set<unsigned> &PassthruRegs) {
250 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
251 MachineOperand &MO = MI.getOperand(i);
252 if (!MO.isReg()) continue;
253 if ((MO.isDef() && MI.isRegTiedToUseOperand(i)) ||
254 IsImplicitDefUse(MI, MO)) {
255 const Register Reg = MO.getReg();
256 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
257 SubRegs.isValid(); ++SubRegs)
258 PassthruRegs.insert(*SubRegs);
259 }
260 }
261}
262
263/// AntiDepEdges - Return in Edges the anti- and output- dependencies
264/// in SU that we want to consider for breaking.
265static void AntiDepEdges(const SUnit *SU, std::vector<const SDep *> &Edges) {
266 SmallSet<unsigned, 4> RegSet;
267 for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
268 P != PE; ++P) {
269 if ((P->getKind() == SDep::Anti) || (P->getKind() == SDep::Output)) {
270 if (RegSet.insert(P->getReg()).second)
271 Edges.push_back(&*P);
272 }
273 }
274}
275
276/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
277/// critical path.
278static const SUnit *CriticalPathStep(const SUnit *SU) {
279 const SDep *Next = nullptr;
280 unsigned NextDepth = 0;
281 // Find the predecessor edge with the greatest depth.
282 if (SU) {
283 for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
284 P != PE; ++P) {
285 const SUnit *PredSU = P->getSUnit();
286 unsigned PredLatency = P->getLatency();
287 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
288 // In the case of a latency tie, prefer an anti-dependency edge over
289 // other types of edges.
290 if (NextDepth < PredTotalLatency ||
291 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
292 NextDepth = PredTotalLatency;
293 Next = &*P;
294 }
295 }
296 }
297
298 return (Next) ? Next->getSUnit() : nullptr;
299}
300
301void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
302 const char *tag,
303 const char *header,
304 const char *footer) {
305 std::vector<unsigned> &KillIndices = State->GetKillIndices();
306 std::vector<unsigned> &DefIndices = State->GetDefIndices();
307 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
308 RegRefs = State->GetRegRefs();
309
310 // FIXME: We must leave subregisters of live super registers as live, so that
311 // we don't clear out the register tracking information for subregisters of
312 // super registers we're still tracking (and with which we're unioning
313 // subregister definitions).
314 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
315 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) {
316 LLVM_DEBUG(if (!header && footer) dbgs() << footer)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (!header && footer) dbgs() <<
footer; } } while (false)
;
317 return;
318 }
319
320 if (!State->IsLive(Reg)) {
321 KillIndices[Reg] = KillIdx;
322 DefIndices[Reg] = ~0u;
323 RegRefs.erase(Reg);
324 State->LeaveGroup(Reg);
325 LLVM_DEBUG(if (header) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
326 dbgs() << header << printReg(Reg, TRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
327 header = nullptr;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
328 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
;
329 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "->g" << State->
GetGroup(Reg) << tag; } } while (false)
;
330 // Repeat for subregisters. Note that we only do this if the superregister
331 // was not live because otherwise, regardless whether we have an explicit
332 // use of the subregister, the subregister's contents are needed for the
333 // uses of the superregister.
334 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
335 unsigned SubregReg = *SubRegs;
336 if (!State->IsLive(SubregReg)) {
337 KillIndices[SubregReg] = KillIdx;
338 DefIndices[SubregReg] = ~0u;
339 RegRefs.erase(SubregReg);
340 State->LeaveGroup(SubregReg);
341 LLVM_DEBUG(if (header) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
342 dbgs() << header << printReg(Reg, TRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
343 header = nullptr;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
344 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (header) { dbgs() << header <<
printReg(Reg, TRI); header = nullptr; }; } } while (false)
;
345 LLVM_DEBUG(dbgs() << " " << printReg(SubregReg, TRI) << "->g"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(SubregReg
, TRI) << "->g" << State->GetGroup(SubregReg
) << tag; } } while (false)
346 << State->GetGroup(SubregReg) << tag)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(SubregReg
, TRI) << "->g" << State->GetGroup(SubregReg
) << tag; } } while (false)
;
347 }
348 }
349 }
350
351 LLVM_DEBUG(if (!header && footer) dbgs() << footer)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (!header && footer) dbgs() <<
footer; } } while (false)
;
352}
353
354void AggressiveAntiDepBreaker::PrescanInstruction(
355 MachineInstr &MI, unsigned Count, std::set<unsigned> &PassthruRegs) {
356 std::vector<unsigned> &DefIndices = State->GetDefIndices();
357 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
358 RegRefs = State->GetRegRefs();
359
360 // Handle dead defs by simulating a last-use of the register just
361 // after the def. A dead def can occur because the def is truly
362 // dead, or because only a subregister is live at the def. If we
363 // don't do this the dead def will be incorrectly merged into the
364 // previous def.
365 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
366 MachineOperand &MO = MI.getOperand(i);
367 if (!MO.isReg() || !MO.isDef()) continue;
368 Register Reg = MO.getReg();
369 if (Reg == 0) continue;
370
371 HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n");
372 }
373
374 LLVM_DEBUG(dbgs() << "\tDef Groups:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tDef Groups:"; } } while
(false)
;
375 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
376 MachineOperand &MO = MI.getOperand(i);
377 if (!MO.isReg() || !MO.isDef()) continue;
378 Register Reg = MO.getReg();
379 if (Reg == 0) continue;
380
381 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI) << "=g" << State->GetGroup(Reg); } } while
(false)
382 << State->GetGroup(Reg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI) << "=g" << State->GetGroup(Reg); } } while
(false)
;
383
384 // If MI's defs have a special allocation requirement, don't allow
385 // any def registers to be changed. Also assume all registers
386 // defined in a call must not be changed (ABI). Inline assembly may
387 // reference either system calls or the register directly. Skip it until we
388 // can tell user specified registers from compiler-specified.
389 if (MI.isCall() || MI.hasExtraDefRegAllocReq() || TII->isPredicated(MI) ||
390 MI.isInlineAsm()) {
391 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (State->GetGroup(Reg) != 0) dbgs()
<< "->g0(alloc-req)"; } } while (false)
;
392 State->UnionGroups(Reg, 0);
393 }
394
395 // Any aliased that are live at this point are completely or
396 // partially defined here, so group those aliases with Reg.
397 for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
398 unsigned AliasReg = *AI;
399 if (State->IsLive(AliasReg)) {
400 State->UnionGroups(Reg, AliasReg);
401 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "->g" << State->
GetGroup(Reg) << "(via " << printReg(AliasReg, TRI
) << ")"; } } while (false)
402 << printReg(AliasReg, TRI) << ")")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "->g" << State->
GetGroup(Reg) << "(via " << printReg(AliasReg, TRI
) << ")"; } } while (false)
;
403 }
404 }
405
406 // Note register reference...
407 const TargetRegisterClass *RC = nullptr;
408 if (i < MI.getDesc().getNumOperands())
409 RC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
410 AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
411 RegRefs.insert(std::make_pair(Reg, RR));
412 }
413
414 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
415
416 // Scan the register defs for this instruction and update
417 // live-ranges.
418 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
419 MachineOperand &MO = MI.getOperand(i);
420 if (!MO.isReg() || !MO.isDef()) continue;
421 Register Reg = MO.getReg();
422 if (Reg == 0) continue;
423 // Ignore KILLs and passthru registers for liveness...
424 if (MI.isKill() || (PassthruRegs.count(Reg) != 0))
425 continue;
426
427 // Update def for Reg and aliases.
428 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
429 // We need to be careful here not to define already-live super registers.
430 // If the super register is already live, then this definition is not
431 // a definition of the whole super register (just a partial insertion
432 // into it). Earlier subregister definitions (which we've not yet visited
433 // because we're iterating bottom-up) need to be linked to the same group
434 // as this definition.
435 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI))
436 continue;
437
438 DefIndices[*AI] = Count;
439 }
440 }
441}
442
443void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
444 unsigned Count) {
445 LLVM_DEBUG(dbgs() << "\tUse Groups:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tUse Groups:"; } } while
(false)
;
446 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
447 RegRefs = State->GetRegRefs();
448
449 // If MI's uses have special allocation requirement, don't allow
450 // any use registers to be changed. Also assume all registers
451 // used in a call must not be changed (ABI).
452 // Inline Assembly register uses also cannot be safely changed.
453 // FIXME: The issue with predicated instruction is more complex. We are being
454 // conservatively here because the kill markers cannot be trusted after
455 // if-conversion:
456 // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14]
457 // ...
458 // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395]
459 // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12]
460 // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8)
461 //
462 // The first R6 kill is not really a kill since it's killed by a predicated
463 // instruction which may not be executed. The second R6 def may or may not
464 // re-define R6 so it's not safe to change it since the last R6 use cannot be
465 // changed.
466 bool Special = MI.isCall() || MI.hasExtraSrcRegAllocReq() ||
467 TII->isPredicated(MI) || MI.isInlineAsm();
468
469 // Scan the register uses for this instruction and update
470 // live-ranges, groups and RegRefs.
471 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
472 MachineOperand &MO = MI.getOperand(i);
473 if (!MO.isReg() || !MO.isUse()) continue;
474 Register Reg = MO.getReg();
475 if (Reg == 0) continue;
476
477 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI) << "=g" << State->GetGroup(Reg); } } while
(false)
478 << State->GetGroup(Reg))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI) << "=g" << State->GetGroup(Reg); } } while
(false)
;
479
480 // It wasn't previously live but now it is, this is a kill. Forget
481 // the previous live-range information and start a new live-range
482 // for the register.
483 HandleLastUse(Reg, Count, "(last-use)");
484
485 if (Special) {
486 LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { if (State->GetGroup(Reg) != 0) dbgs()
<< "->g0(alloc-req)"; } } while (false)
;
487 State->UnionGroups(Reg, 0);
488 }
489
490 // Note register reference...
491 const TargetRegisterClass *RC = nullptr;
492 if (i < MI.getDesc().getNumOperands())
493 RC = TII->getRegClass(MI.getDesc(), i, TRI, MF);
494 AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
495 RegRefs.insert(std::make_pair(Reg, RR));
496 }
497
498 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
499
500 // Form a group of all defs and uses of a KILL instruction to ensure
501 // that all registers are renamed as a group.
502 if (MI.isKill()) {
503 LLVM_DEBUG(dbgs() << "\tKill Group:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tKill Group:"; } } while
(false)
;
504
505 unsigned FirstReg = 0;
506 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
507 MachineOperand &MO = MI.getOperand(i);
508 if (!MO.isReg()) continue;
509 Register Reg = MO.getReg();
510 if (Reg == 0) continue;
511
512 if (FirstReg != 0) {
513 LLVM_DEBUG(dbgs() << "=" << printReg(Reg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "=" << printReg(Reg
, TRI); } } while (false)
;
514 State->UnionGroups(FirstReg, Reg);
515 } else {
516 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI); } } while (false)
;
517 FirstReg = Reg;
518 }
519 }
520
521 LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "->g" << State->
GetGroup(FirstReg) << '\n'; } } while (false)
;
522 }
523}
524
525BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
526 BitVector BV(TRI->getNumRegs(), false);
527 bool first = true;
528
529 // Check all references that need rewriting for Reg. For each, use
530 // the corresponding register class to narrow the set of registers
531 // that are appropriate for renaming.
532 for (const auto &Q : make_range(State->GetRegRefs().equal_range(Reg))) {
533 const TargetRegisterClass *RC = Q.second.RC;
534 if (!RC) continue;
535
536 BitVector RCBV = TRI->getAllocatableSet(MF, RC);
537 if (first) {
538 BV |= RCBV;
539 first = false;
540 } else {
541 BV &= RCBV;
542 }
543
544 LLVM_DEBUG(dbgs() << " " << TRI->getRegClassName(RC))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << TRI->getRegClassName
(RC); } } while (false)
;
545 }
546
547 return BV;
548}
549
550bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
551 unsigned AntiDepGroupIndex,
552 RenameOrderType& RenameOrder,
553 std::map<unsigned, unsigned> &RenameMap) {
554 std::vector<unsigned> &KillIndices = State->GetKillIndices();
555 std::vector<unsigned> &DefIndices = State->GetDefIndices();
556 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
557 RegRefs = State->GetRegRefs();
558
559 // Collect all referenced registers in the same group as
560 // AntiDepReg. These all need to be renamed together if we are to
561 // break the anti-dependence.
562 std::vector<unsigned> Regs;
563 State->GetGroupRegs(AntiDepGroupIndex, Regs, &RegRefs);
564 assert(!Regs.empty() && "Empty register group!")((!Regs.empty() && "Empty register group!") ? static_cast
<void> (0) : __assert_fail ("!Regs.empty() && \"Empty register group!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 564, __PRETTY_FUNCTION__))
;
565 if (Regs.empty())
566 return false;
567
568 // Find the "superest" register in the group. At the same time,
569 // collect the BitVector of registers that can be used to rename
570 // each register.
571 LLVM_DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndexdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tRename Candidates for Group g"
<< AntiDepGroupIndex << ":\n"; } } while (false)
572 << ":\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tRename Candidates for Group g"
<< AntiDepGroupIndex << ":\n"; } } while (false)
;
573 std::map<unsigned, BitVector> RenameRegisterMap;
574 unsigned SuperReg = 0;
575 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
576 unsigned Reg = Regs[i];
577 if ((SuperReg == 0) || TRI->isSuperRegister(SuperReg, Reg))
578 SuperReg = Reg;
579
580 // If Reg has any references, then collect possible rename regs
581 if (RegRefs.count(Reg) > 0) {
582 LLVM_DEBUG(dbgs() << "\t\t" << printReg(Reg, TRI) << ":")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\t\t" << printReg
(Reg, TRI) << ":"; } } while (false)
;
583
584 BitVector &BV = RenameRegisterMap[Reg];
585 assert(BV.empty())((BV.empty()) ? static_cast<void> (0) : __assert_fail (
"BV.empty()", "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 585, __PRETTY_FUNCTION__))
;
586 BV = GetRenameRegisters(Reg);
587
588 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
589 dbgs() << " ::";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
590 for (unsigned r : BV.set_bits())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
591 dbgs() << " " << printReg(r, TRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
592 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
593 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { { dbgs() << " ::"; for (unsigned r
: BV.set_bits()) dbgs() << " " << printReg(r, TRI
); dbgs() << "\n"; }; } } while (false)
;
594 }
595 }
596
597 // All group registers should be a subreg of SuperReg.
598 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
599 unsigned Reg = Regs[i];
600 if (Reg == SuperReg) continue;
601 bool IsSub = TRI->isSubRegister(SuperReg, Reg);
602 // FIXME: remove this once PR18663 has been properly fixed. For now,
603 // return a conservative answer:
604 // assert(IsSub && "Expecting group subregister");
605 if (!IsSub)
606 return false;
607 }
608
609#ifndef NDEBUG
610 // If DebugDiv > 0 then only rename (renamecnt % DebugDiv) == DebugMod
611 if (DebugDiv > 0) {
612 static int renamecnt = 0;
613 if (renamecnt++ % DebugDiv != DebugMod)
614 return false;
615
616 dbgs() << "*** Performing rename " << printReg(SuperReg, TRI)
617 << " for debug ***\n";
618 }
619#endif
620
621 // Check each possible rename register for SuperReg in round-robin
622 // order. If that register is available, and the corresponding
623 // registers are available for the other group subregisters, then we
624 // can use those registers to rename.
625
626 // FIXME: Using getMinimalPhysRegClass is very conservative. We should
627 // check every use of the register and find the largest register class
628 // that can be used in all of them.
629 const TargetRegisterClass *SuperRC =
630 TRI->getMinimalPhysRegClass(SuperReg, MVT::Other);
631
632 ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(SuperRC);
633 if (Order.empty()) {
634 LLVM_DEBUG(dbgs() << "\tEmpty Super Regclass!!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tEmpty Super Regclass!!\n"
; } } while (false)
;
635 return false;
636 }
637
638 LLVM_DEBUG(dbgs() << "\tFind Registers:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tFind Registers:"; } }
while (false)
;
639
640 RenameOrder.insert(RenameOrderType::value_type(SuperRC, Order.size()));
641
642 unsigned OrigR = RenameOrder[SuperRC];
643 unsigned EndR = ((OrigR == Order.size()) ? 0 : OrigR);
644 unsigned R = OrigR;
645 do {
646 if (R == 0) R = Order.size();
647 --R;
648 const unsigned NewSuperReg = Order[R];
649 // Don't consider non-allocatable registers
650 if (!MRI.isAllocatable(NewSuperReg)) continue;
651 // Don't replace a register with itself.
652 if (NewSuperReg == SuperReg) continue;
653
654 LLVM_DEBUG(dbgs() << " [" << printReg(NewSuperReg, TRI) << ':')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " [" << printReg(NewSuperReg
, TRI) << ':'; } } while (false)
;
655 RenameMap.clear();
656
657 // For each referenced group register (which must be a SuperReg or
658 // a subregister of SuperReg), find the corresponding subregister
659 // of NewSuperReg and make sure it is free to be renamed.
660 for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
661 unsigned Reg = Regs[i];
662 unsigned NewReg = 0;
663 if (Reg == SuperReg) {
664 NewReg = NewSuperReg;
665 } else {
666 unsigned NewSubRegIdx = TRI->getSubRegIndex(SuperReg, Reg);
667 if (NewSubRegIdx != 0)
668 NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx);
669 }
670
671 LLVM_DEBUG(dbgs() << " " << printReg(NewReg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(NewReg
, TRI); } } while (false)
;
672
673 // Check if Reg can be renamed to NewReg.
674 if (!RenameRegisterMap[Reg].test(NewReg)) {
675 LLVM_DEBUG(dbgs() << "(no rename)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(no rename)"; } } while
(false)
;
676 goto next_super_reg;
677 }
678
679 // If NewReg is dead and NewReg's most recent def is not before
680 // Regs's kill, it's safe to replace Reg with NewReg. We
681 // must also check all aliases of NewReg, because we can't define a
682 // register when any sub or super is already live.
683 if (State->IsLive(NewReg) || (KillIndices[Reg] > DefIndices[NewReg])) {
684 LLVM_DEBUG(dbgs() << "(live)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(live)"; } } while (false
)
;
685 goto next_super_reg;
686 } else {
687 bool found = false;
688 for (MCRegAliasIterator AI(NewReg, TRI, false); AI.isValid(); ++AI) {
689 unsigned AliasReg = *AI;
690 if (State->IsLive(AliasReg) ||
691 (KillIndices[Reg] > DefIndices[AliasReg])) {
692 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(alias " << printReg
(AliasReg, TRI) << " live)"; } } while (false)
693 << "(alias " << printReg(AliasReg, TRI) << " live)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(alias " << printReg
(AliasReg, TRI) << " live)"; } } while (false)
;
694 found = true;
695 break;
696 }
697 }
698 if (found)
699 goto next_super_reg;
700 }
701
702 // We cannot rename 'Reg' to 'NewReg' if one of the uses of 'Reg' also
703 // defines 'NewReg' via an early-clobber operand.
704 for (const auto &Q : make_range(RegRefs.equal_range(Reg))) {
705 MachineInstr *UseMI = Q.second.Operand->getParent();
706 int Idx = UseMI->findRegisterDefOperandIdx(NewReg, false, true, TRI);
707 if (Idx == -1)
708 continue;
709
710 if (UseMI->getOperand(Idx).isEarlyClobber()) {
711 LLVM_DEBUG(dbgs() << "(ec)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(ec)"; } } while (false
)
;
712 goto next_super_reg;
713 }
714 }
715
716 // Also, we cannot rename 'Reg' to 'NewReg' if the instruction defining
717 // 'Reg' is an early-clobber define and that instruction also uses
718 // 'NewReg'.
719 for (const auto &Q : make_range(RegRefs.equal_range(Reg))) {
720 if (!Q.second.Operand->isDef() || !Q.second.Operand->isEarlyClobber())
721 continue;
722
723 MachineInstr *DefMI = Q.second.Operand->getParent();
724 if (DefMI->readsRegister(NewReg, TRI)) {
725 LLVM_DEBUG(dbgs() << "(ec)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "(ec)"; } } while (false
)
;
726 goto next_super_reg;
727 }
728 }
729
730 // Record that 'Reg' can be renamed to 'NewReg'.
731 RenameMap.insert(std::pair<unsigned, unsigned>(Reg, NewReg));
732 }
733
734 // If we fall-out here, then every register in the group can be
735 // renamed, as recorded in RenameMap.
736 RenameOrder.erase(SuperRC);
737 RenameOrder.insert(RenameOrderType::value_type(SuperRC, R));
738 LLVM_DEBUG(dbgs() << "]\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "]\n"; } } while (false)
;
739 return true;
740
741 next_super_reg:
742 LLVM_DEBUG(dbgs() << ']')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << ']'; } } while (false)
;
743 } while (R != EndR);
744
745 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
746
747 // No registers are free and available!
748 return false;
749}
750
751/// BreakAntiDependencies - Identifiy anti-dependencies within the
752/// ScheduleDAG and break them by renaming registers.
753unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
754 const std::vector<SUnit> &SUnits,
755 MachineBasicBlock::iterator Begin,
756 MachineBasicBlock::iterator End,
757 unsigned InsertPosIndex,
758 DbgValueVector &DbgValues) {
759 std::vector<unsigned> &KillIndices = State->GetKillIndices();
760 std::vector<unsigned> &DefIndices = State->GetDefIndices();
761 std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
762 RegRefs = State->GetRegRefs();
763
764 // The code below assumes that there is at least one instruction,
765 // so just duck out immediately if the block is empty.
766 if (SUnits.empty()) return 0;
1
Assuming the condition is false
2
Taking false branch
767
768 // For each regclass the next register to use for renaming.
769 RenameOrderType RenameOrder;
770
771 // ...need a map from MI to SUnit.
772 std::map<MachineInstr *, const SUnit *> MISUnitMap;
773 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
3
Assuming 'i' is equal to 'e'
4
Loop condition is false. Execution continues on line 782
774 const SUnit *SU = &SUnits[i];
775 MISUnitMap.insert(std::pair<MachineInstr *, const SUnit *>(SU->getInstr(),
776 SU));
777 }
778
779 // Track progress along the critical path through the SUnit graph as
780 // we walk the instructions. This is needed for regclasses that only
781 // break critical-path anti-dependencies.
782 const SUnit *CriticalPathSU = nullptr;
5
'CriticalPathSU' initialized to a null pointer value
783 MachineInstr *CriticalPathMI = nullptr;
784 if (CriticalPathSet.any()) {
6
Calling 'BitVector::any'
12
Returning from 'BitVector::any'
13
Taking true branch
785 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
14
Assuming 'i' is equal to 'e'
15
Loop condition is false. Execution continues on line 794
786 const SUnit *SU = &SUnits[i];
787 if (!CriticalPathSU ||
788 ((SU->getDepth() + SU->Latency) >
789 (CriticalPathSU->getDepth() + CriticalPathSU->Latency))) {
790 CriticalPathSU = SU;
791 }
792 }
793
794 CriticalPathMI = CriticalPathSU->getInstr();
16
Called C++ object pointer is null
795 }
796
797#ifndef NDEBUG
798 LLVM_DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\n===== Aggressive anti-dependency breaking\n"
; } } while (false)
;
799 LLVM_DEBUG(dbgs() << "Available regs:")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "Available regs:"; } } while
(false)
;
800 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
801 if (!State->IsLive(Reg))
802 LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(Reg
, TRI); } } while (false)
;
803 }
804 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
805#endif
806
807 BitVector RegAliases(TRI->getNumRegs());
808
809 // Attempt to break anti-dependence edges. Walk the instructions
810 // from the bottom up, tracking information about liveness as we go
811 // to help determine which registers are available.
812 unsigned Broken = 0;
813 unsigned Count = InsertPosIndex - 1;
814 for (MachineBasicBlock::iterator I = End, E = Begin;
815 I != E; --Count) {
816 MachineInstr &MI = *--I;
817
818 if (MI.isDebugInstr())
819 continue;
820
821 LLVM_DEBUG(dbgs() << "Anti: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "Anti: "; } } while (false
)
;
822 LLVM_DEBUG(MI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { MI.dump(); } } while (false)
;
823
824 std::set<unsigned> PassthruRegs;
825 GetPassthruRegs(MI, PassthruRegs);
826
827 // Process the defs in MI...
828 PrescanInstruction(MI, Count, PassthruRegs);
829
830 // The dependence edges that represent anti- and output-
831 // dependencies that are candidates for breaking.
832 std::vector<const SDep *> Edges;
833 const SUnit *PathSU = MISUnitMap[&MI];
834 AntiDepEdges(PathSU, Edges);
835
836 // If MI is not on the critical path, then we don't rename
837 // registers in the CriticalPathSet.
838 BitVector *ExcludeRegs = nullptr;
839 if (&MI == CriticalPathMI) {
840 CriticalPathSU = CriticalPathStep(CriticalPathSU);
841 CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : nullptr;
842 } else if (CriticalPathSet.any()) {
843 ExcludeRegs = &CriticalPathSet;
844 }
845
846 // Ignore KILL instructions (they form a group in ScanInstruction
847 // but don't cause any anti-dependence breaking themselves)
848 if (!MI.isKill()) {
849 // Attempt to break each anti-dependency...
850 for (unsigned i = 0, e = Edges.size(); i != e; ++i) {
851 const SDep *Edge = Edges[i];
852 SUnit *NextSU = Edge->getSUnit();
853
854 if ((Edge->getKind() != SDep::Anti) &&
855 (Edge->getKind() != SDep::Output)) continue;
856
857 unsigned AntiDepReg = Edge->getReg();
858 LLVM_DEBUG(dbgs() << "\tAntidep reg: " << printReg(AntiDepReg, TRI))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tAntidep reg: " <<
printReg(AntiDepReg, TRI); } } while (false)
;
859 assert(AntiDepReg != 0 && "Anti-dependence on reg0?")((AntiDepReg != 0 && "Anti-dependence on reg0?") ? static_cast
<void> (0) : __assert_fail ("AntiDepReg != 0 && \"Anti-dependence on reg0?\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 859, __PRETTY_FUNCTION__))
;
860
861 if (!MRI.isAllocatable(AntiDepReg)) {
862 // Don't break anti-dependencies on non-allocatable registers.
863 LLVM_DEBUG(dbgs() << " (non-allocatable)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (non-allocatable)\n"; }
} while (false)
;
864 continue;
865 } else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
866 // Don't break anti-dependencies for critical path registers
867 // if not on the critical path
868 LLVM_DEBUG(dbgs() << " (not critical-path)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (not critical-path)\n"
; } } while (false)
;
869 continue;
870 } else if (PassthruRegs.count(AntiDepReg) != 0) {
871 // If the anti-dep register liveness "passes-thru", then
872 // don't try to change it. It will be changed along with
873 // the use if required to break an earlier antidep.
874 LLVM_DEBUG(dbgs() << " (passthru)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (passthru)\n"; } } while
(false)
;
875 continue;
876 } else {
877 // No anti-dep breaking for implicit deps
878 MachineOperand *AntiDepOp = MI.findRegisterDefOperand(AntiDepReg);
879 assert(AntiDepOp && "Can't find index for defined register operand")((AntiDepOp && "Can't find index for defined register operand"
) ? static_cast<void> (0) : __assert_fail ("AntiDepOp && \"Can't find index for defined register operand\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 879, __PRETTY_FUNCTION__))
;
880 if (!AntiDepOp || AntiDepOp->isImplicit()) {
881 LLVM_DEBUG(dbgs() << " (implicit)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (implicit)\n"; } } while
(false)
;
882 continue;
883 }
884
885 // If the SUnit has other dependencies on the SUnit that
886 // it anti-depends on, don't bother breaking the
887 // anti-dependency since those edges would prevent such
888 // units from being scheduled past each other
889 // regardless.
890 //
891 // Also, if there are dependencies on other SUnits with the
892 // same register as the anti-dependency, don't attempt to
893 // break it.
894 for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
895 PE = PathSU->Preds.end(); P != PE; ++P) {
896 if (P->getSUnit() == NextSU ?
897 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
898 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
899 AntiDepReg = 0;
900 break;
901 }
902 }
903 for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
904 PE = PathSU->Preds.end(); P != PE; ++P) {
905 if ((P->getSUnit() == NextSU) && (P->getKind() != SDep::Anti) &&
906 (P->getKind() != SDep::Output)) {
907 LLVM_DEBUG(dbgs() << " (real dependency)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (real dependency)\n"; }
} while (false)
;
908 AntiDepReg = 0;
909 break;
910 } else if ((P->getSUnit() != NextSU) &&
911 (P->getKind() == SDep::Data) &&
912 (P->getReg() == AntiDepReg)) {
913 LLVM_DEBUG(dbgs() << " (other dependency)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (other dependency)\n";
} } while (false)
;
914 AntiDepReg = 0;
915 break;
916 }
917 }
918
919 if (AntiDepReg == 0) continue;
920
921 // If the definition of the anti-dependency register does not start
922 // a new live range, bail out. This can happen if the anti-dep
923 // register is a sub-register of another register whose live range
924 // spans over PathSU. In such case, PathSU defines only a part of
925 // the larger register.
926 RegAliases.reset();
927 for (MCRegAliasIterator AI(AntiDepReg, TRI, true); AI.isValid(); ++AI)
928 RegAliases.set(*AI);
929 for (SDep S : PathSU->Succs) {
930 SDep::Kind K = S.getKind();
931 if (K != SDep::Data && K != SDep::Output && K != SDep::Anti)
932 continue;
933 unsigned R = S.getReg();
934 if (!RegAliases[R])
935 continue;
936 if (R == AntiDepReg || TRI->isSubRegister(AntiDepReg, R))
937 continue;
938 AntiDepReg = 0;
939 break;
940 }
941
942 if (AntiDepReg == 0) continue;
943 }
944
945 assert(AntiDepReg != 0)((AntiDepReg != 0) ? static_cast<void> (0) : __assert_fail
("AntiDepReg != 0", "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 945, __PRETTY_FUNCTION__))
;
946 if (AntiDepReg == 0) continue;
947
948 // Determine AntiDepReg's register group.
949 const unsigned GroupIndex = State->GetGroup(AntiDepReg);
950 if (GroupIndex == 0) {
951 LLVM_DEBUG(dbgs() << " (zero group)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " (zero group)\n"; } } while
(false)
;
952 continue;
953 }
954
955 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
956
957 // Look for a suitable register to use to break the anti-dependence.
958 std::map<unsigned, unsigned> RenameMap;
959 if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) {
960 LLVM_DEBUG(dbgs() << "\tBreaking anti-dependence edge on "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tBreaking anti-dependence edge on "
<< printReg(AntiDepReg, TRI) << ":"; } } while (
false)
961 << printReg(AntiDepReg, TRI) << ":")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << "\tBreaking anti-dependence edge on "
<< printReg(AntiDepReg, TRI) << ":"; } } while (
false)
;
962
963 // Handle each group register...
964 for (std::map<unsigned, unsigned>::iterator
965 S = RenameMap.begin(), E = RenameMap.end(); S != E; ++S) {
966 unsigned CurrReg = S->first;
967 unsigned NewReg = S->second;
968
969 LLVM_DEBUG(dbgs() << " " << printReg(CurrReg, TRI) << "->"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(CurrReg
, TRI) << "->" << printReg(NewReg, TRI) <<
"(" << RegRefs.count(CurrReg) << " refs)"; } } while
(false)
970 << printReg(NewReg, TRI) << "("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(CurrReg
, TRI) << "->" << printReg(NewReg, TRI) <<
"(" << RegRefs.count(CurrReg) << " refs)"; } } while
(false)
971 << RegRefs.count(CurrReg) << " refs)")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << " " << printReg(CurrReg
, TRI) << "->" << printReg(NewReg, TRI) <<
"(" << RegRefs.count(CurrReg) << " refs)"; } } while
(false)
;
972
973 // Update the references to the old register CurrReg to
974 // refer to the new register NewReg.
975 for (const auto &Q : make_range(RegRefs.equal_range(CurrReg))) {
976 Q.second.Operand->setReg(NewReg);
977 // If the SU for the instruction being updated has debug
978 // information related to the anti-dependency register, make
979 // sure to update that as well.
980 const SUnit *SU = MISUnitMap[Q.second.Operand->getParent()];
981 if (!SU) continue;
982 UpdateDbgValues(DbgValues, Q.second.Operand->getParent(),
983 AntiDepReg, NewReg);
984 }
985
986 // We just went back in time and modified history; the
987 // liveness information for CurrReg is now inconsistent. Set
988 // the state as if it were dead.
989 State->UnionGroups(NewReg, 0);
990 RegRefs.erase(NewReg);
991 DefIndices[NewReg] = DefIndices[CurrReg];
992 KillIndices[NewReg] = KillIndices[CurrReg];
993
994 State->UnionGroups(CurrReg, 0);
995 RegRefs.erase(CurrReg);
996 DefIndices[CurrReg] = KillIndices[CurrReg];
997 KillIndices[CurrReg] = ~0u;
998 assert(((KillIndices[CurrReg] == ~0u) !=((((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~
0u)) && "Kill and Def maps aren't consistent for AntiDepReg!"
) ? static_cast<void> (0) : __assert_fail ("((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~0u)) && \"Kill and Def maps aren't consistent for AntiDepReg!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 1000, __PRETTY_FUNCTION__))
999 (DefIndices[CurrReg] == ~0u)) &&((((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~
0u)) && "Kill and Def maps aren't consistent for AntiDepReg!"
) ? static_cast<void> (0) : __assert_fail ("((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~0u)) && \"Kill and Def maps aren't consistent for AntiDepReg!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 1000, __PRETTY_FUNCTION__))
1000 "Kill and Def maps aren't consistent for AntiDepReg!")((((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~
0u)) && "Kill and Def maps aren't consistent for AntiDepReg!"
) ? static_cast<void> (0) : __assert_fail ("((KillIndices[CurrReg] == ~0u) != (DefIndices[CurrReg] == ~0u)) && \"Kill and Def maps aren't consistent for AntiDepReg!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/lib/CodeGen/AggressiveAntiDepBreaker.cpp"
, 1000, __PRETTY_FUNCTION__))
;
1001 }
1002
1003 ++Broken;
1004 LLVM_DEBUG(dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("post-RA-sched")) { dbgs() << '\n'; } } while (false)
;
1005 }
1006 }
1007 }
1008
1009 ScanInstruction(MI, Count);
1010 }
1011
1012 return Broken;
1013}

/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h

1//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the BitVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_BITVECTOR_H
14#define LLVM_ADT_BITVECTOR_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/iterator_range.h"
18#include "llvm/Support/MathExtras.h"
19#include <algorithm>
20#include <cassert>
21#include <climits>
22#include <cstdint>
23#include <cstdlib>
24#include <cstring>
25#include <utility>
26
27namespace llvm {
28
29/// ForwardIterator for the bits that are set.
30/// Iterators get invalidated when resize / reserve is called.
31template <typename BitVectorT> class const_set_bits_iterator_impl {
32 const BitVectorT &Parent;
33 int Current = 0;
34
35 void advance() {
36 assert(Current != -1 && "Trying to advance past end.")((Current != -1 && "Trying to advance past end.") ? static_cast
<void> (0) : __assert_fail ("Current != -1 && \"Trying to advance past end.\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 36, __PRETTY_FUNCTION__))
;
37 Current = Parent.find_next(Current);
38 }
39
40public:
41 const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
42 : Parent(Parent), Current(Current) {}
43 explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
44 : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
45 const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
46
47 const_set_bits_iterator_impl operator++(int) {
48 auto Prev = *this;
49 advance();
50 return Prev;
51 }
52
53 const_set_bits_iterator_impl &operator++() {
54 advance();
55 return *this;
56 }
57
58 unsigned operator*() const { return Current; }
59
60 bool operator==(const const_set_bits_iterator_impl &Other) const {
61 assert(&Parent == &Other.Parent &&((&Parent == &Other.Parent && "Comparing iterators from different BitVectors"
) ? static_cast<void> (0) : __assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 62, __PRETTY_FUNCTION__))
62 "Comparing iterators from different BitVectors")((&Parent == &Other.Parent && "Comparing iterators from different BitVectors"
) ? static_cast<void> (0) : __assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 62, __PRETTY_FUNCTION__))
;
63 return Current == Other.Current;
64 }
65
66 bool operator!=(const const_set_bits_iterator_impl &Other) const {
67 assert(&Parent == &Other.Parent &&((&Parent == &Other.Parent && "Comparing iterators from different BitVectors"
) ? static_cast<void> (0) : __assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 68, __PRETTY_FUNCTION__))
68 "Comparing iterators from different BitVectors")((&Parent == &Other.Parent && "Comparing iterators from different BitVectors"
) ? static_cast<void> (0) : __assert_fail ("&Parent == &Other.Parent && \"Comparing iterators from different BitVectors\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 68, __PRETTY_FUNCTION__))
;
69 return Current != Other.Current;
70 }
71};
72
73class BitVector {
74 typedef unsigned long BitWord;
75
76 enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT8 };
77
78 static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
79 "Unsupported word size");
80
81 MutableArrayRef<BitWord> Bits; // Actual bits.
82 unsigned Size; // Size of bitvector in bits.
83
84public:
85 typedef unsigned size_type;
86 // Encapsulation of a single bit.
87 class reference {
88 friend class BitVector;
89
90 BitWord *WordRef;
91 unsigned BitPos;
92
93 public:
94 reference(BitVector &b, unsigned Idx) {
95 WordRef = &b.Bits[Idx / BITWORD_SIZE];
96 BitPos = Idx % BITWORD_SIZE;
97 }
98
99 reference() = delete;
100 reference(const reference&) = default;
101
102 reference &operator=(reference t) {
103 *this = bool(t);
104 return *this;
105 }
106
107 reference& operator=(bool t) {
108 if (t)
109 *WordRef |= BitWord(1) << BitPos;
110 else
111 *WordRef &= ~(BitWord(1) << BitPos);
112 return *this;
113 }
114
115 operator bool() const {
116 return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
117 }
118 };
119
120 typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
121 typedef const_set_bits_iterator set_iterator;
122
123 const_set_bits_iterator set_bits_begin() const {
124 return const_set_bits_iterator(*this);
125 }
126 const_set_bits_iterator set_bits_end() const {
127 return const_set_bits_iterator(*this, -1);
128 }
129 iterator_range<const_set_bits_iterator> set_bits() const {
130 return make_range(set_bits_begin(), set_bits_end());
131 }
132
133 /// BitVector default ctor - Creates an empty bitvector.
134 BitVector() : Size(0) {}
135
136 /// BitVector ctor - Creates a bitvector of specified number of bits. All
137 /// bits are initialized to the specified value.
138 explicit BitVector(unsigned s, bool t = false) : Size(s) {
139 size_t Capacity = NumBitWords(s);
140 Bits = allocate(Capacity);
141 init_words(Bits, t);
142 if (t)
143 clear_unused_bits();
144 }
145
146 /// BitVector copy ctor.
147 BitVector(const BitVector &RHS) : Size(RHS.size()) {
148 if (Size == 0) {
149 Bits = MutableArrayRef<BitWord>();
150 return;
151 }
152
153 size_t Capacity = NumBitWords(RHS.size());
154 Bits = allocate(Capacity);
155 std::memcpy(Bits.data(), RHS.Bits.data(), Capacity * sizeof(BitWord));
156 }
157
158 BitVector(BitVector &&RHS) : Bits(RHS.Bits), Size(RHS.Size) {
159 RHS.Bits = MutableArrayRef<BitWord>();
160 RHS.Size = 0;
161 }
162
163 ~BitVector() { std::free(Bits.data()); }
164
165 /// empty - Tests whether there are no bits in this bitvector.
166 bool empty() const { return Size == 0; }
167
168 /// size - Returns the number of bits in this bitvector.
169 size_type size() const { return Size; }
170
171 /// count - Returns the number of bits which are set.
172 size_type count() const {
173 unsigned NumBits = 0;
174 for (unsigned i = 0; i < NumBitWords(size()); ++i)
175 NumBits += countPopulation(Bits[i]);
176 return NumBits;
177 }
178
179 /// any - Returns true if any bit is set.
180 bool any() const {
181 for (unsigned i = 0; i < NumBitWords(size()); ++i)
7
Assuming the condition is true
8
Loop condition is true. Entering loop body
182 if (Bits[i] != 0)
9
Assuming the condition is true
10
Taking true branch
183 return true;
11
Returning the value 1, which participates in a condition later
184 return false;
185 }
186
187 /// all - Returns true if all bits are set.
188 bool all() const {
189 for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
190 if (Bits[i] != ~0UL)
191 return false;
192
193 // If bits remain check that they are ones. The unused bits are always zero.
194 if (unsigned Remainder = Size % BITWORD_SIZE)
195 return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
196
197 return true;
198 }
199
200 /// none - Returns true if none of the bits are set.
201 bool none() const {
202 return !any();
203 }
204
205 /// find_first_in - Returns the index of the first set bit in the range
206 /// [Begin, End). Returns -1 if all bits in the range are unset.
207 int find_first_in(unsigned Begin, unsigned End) const {
208 assert(Begin <= End && End <= Size)((Begin <= End && End <= Size) ? static_cast<
void> (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 208, __PRETTY_FUNCTION__))
;
209 if (Begin == End)
210 return -1;
211
212 unsigned FirstWord = Begin / BITWORD_SIZE;
213 unsigned LastWord = (End - 1) / BITWORD_SIZE;
214
215 // Check subsequent words.
216 for (unsigned i = FirstWord; i <= LastWord; ++i) {
217 BitWord Copy = Bits[i];
218
219 if (i == FirstWord) {
220 unsigned FirstBit = Begin % BITWORD_SIZE;
221 Copy &= maskTrailingZeros<BitWord>(FirstBit);
222 }
223
224 if (i == LastWord) {
225 unsigned LastBit = (End - 1) % BITWORD_SIZE;
226 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
227 }
228 if (Copy != 0)
229 return i * BITWORD_SIZE + countTrailingZeros(Copy);
230 }
231 return -1;
232 }
233
234 /// find_last_in - Returns the index of the last set bit in the range
235 /// [Begin, End). Returns -1 if all bits in the range are unset.
236 int find_last_in(unsigned Begin, unsigned End) const {
237 assert(Begin <= End && End <= Size)((Begin <= End && End <= Size) ? static_cast<
void> (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 237, __PRETTY_FUNCTION__))
;
238 if (Begin == End)
239 return -1;
240
241 unsigned LastWord = (End - 1) / BITWORD_SIZE;
242 unsigned FirstWord = Begin / BITWORD_SIZE;
243
244 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
245 unsigned CurrentWord = i - 1;
246
247 BitWord Copy = Bits[CurrentWord];
248 if (CurrentWord == LastWord) {
249 unsigned LastBit = (End - 1) % BITWORD_SIZE;
250 Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
251 }
252
253 if (CurrentWord == FirstWord) {
254 unsigned FirstBit = Begin % BITWORD_SIZE;
255 Copy &= maskTrailingZeros<BitWord>(FirstBit);
256 }
257
258 if (Copy != 0)
259 return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
260 }
261
262 return -1;
263 }
264
265 /// find_first_unset_in - Returns the index of the first unset bit in the
266 /// range [Begin, End). Returns -1 if all bits in the range are set.
267 int find_first_unset_in(unsigned Begin, unsigned End) const {
268 assert(Begin <= End && End <= Size)((Begin <= End && End <= Size) ? static_cast<
void> (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 268, __PRETTY_FUNCTION__))
;
269 if (Begin == End)
270 return -1;
271
272 unsigned FirstWord = Begin / BITWORD_SIZE;
273 unsigned LastWord = (End - 1) / BITWORD_SIZE;
274
275 // Check subsequent words.
276 for (unsigned i = FirstWord; i <= LastWord; ++i) {
277 BitWord Copy = Bits[i];
278
279 if (i == FirstWord) {
280 unsigned FirstBit = Begin % BITWORD_SIZE;
281 Copy |= maskTrailingOnes<BitWord>(FirstBit);
282 }
283
284 if (i == LastWord) {
285 unsigned LastBit = (End - 1) % BITWORD_SIZE;
286 Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
287 }
288 if (Copy != ~0UL) {
289 unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Copy);
290 return Result < size() ? Result : -1;
291 }
292 }
293 return -1;
294 }
295
296 /// find_last_unset_in - Returns the index of the last unset bit in the
297 /// range [Begin, End). Returns -1 if all bits in the range are set.
298 int find_last_unset_in(unsigned Begin, unsigned End) const {
299 assert(Begin <= End && End <= Size)((Begin <= End && End <= Size) ? static_cast<
void> (0) : __assert_fail ("Begin <= End && End <= Size"
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 299, __PRETTY_FUNCTION__))
;
300 if (Begin == End)
301 return -1;
302
303 unsigned LastWord = (End - 1) / BITWORD_SIZE;
304 unsigned FirstWord = Begin / BITWORD_SIZE;
305
306 for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
307 unsigned CurrentWord = i - 1;
308
309 BitWord Copy = Bits[CurrentWord];
310 if (CurrentWord == LastWord) {
311 unsigned LastBit = (End - 1) % BITWORD_SIZE;
312 Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
313 }
314
315 if (CurrentWord == FirstWord) {
316 unsigned FirstBit = Begin % BITWORD_SIZE;
317 Copy |= maskTrailingOnes<BitWord>(FirstBit);
318 }
319
320 if (Copy != ~0UL) {
321 unsigned Result =
322 (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
323 return Result < Size ? Result : -1;
324 }
325 }
326 return -1;
327 }
328
329 /// find_first - Returns the index of the first set bit, -1 if none
330 /// of the bits are set.
331 int find_first() const { return find_first_in(0, Size); }
332
333 /// find_last - Returns the index of the last set bit, -1 if none of the bits
334 /// are set.
335 int find_last() const { return find_last_in(0, Size); }
336
337 /// find_next - Returns the index of the next set bit following the
338 /// "Prev" bit. Returns -1 if the next set bit is not found.
339 int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
340
341 /// find_prev - Returns the index of the first set bit that precedes the
342 /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
343 int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
344
345 /// find_first_unset - Returns the index of the first unset bit, -1 if all
346 /// of the bits are set.
347 int find_first_unset() const { return find_first_unset_in(0, Size); }
348
349 /// find_next_unset - Returns the index of the next unset bit following the
350 /// "Prev" bit. Returns -1 if all remaining bits are set.
351 int find_next_unset(unsigned Prev) const {
352 return find_first_unset_in(Prev + 1, Size);
353 }
354
355 /// find_last_unset - Returns the index of the last unset bit, -1 if all of
356 /// the bits are set.
357 int find_last_unset() const { return find_last_unset_in(0, Size); }
358
359 /// find_prev_unset - Returns the index of the first unset bit that precedes
360 /// the bit at \p PriorTo. Returns -1 if all previous bits are set.
361 int find_prev_unset(unsigned PriorTo) {
362 return find_last_unset_in(0, PriorTo);
363 }
364
365 /// clear - Removes all bits from the bitvector. Does not change capacity.
366 void clear() {
367 Size = 0;
368 }
369
370 /// resize - Grow or shrink the bitvector.
371 void resize(unsigned N, bool t = false) {
372 if (N > getBitCapacity()) {
373 unsigned OldCapacity = Bits.size();
374 grow(N);
375 init_words(Bits.drop_front(OldCapacity), t);
376 }
377
378 // Set any old unused bits that are now included in the BitVector. This
379 // may set bits that are not included in the new vector, but we will clear
380 // them back out below.
381 if (N > Size)
382 set_unused_bits(t);
383
384 // Update the size, and clear out any bits that are now unused
385 unsigned OldSize = Size;
386 Size = N;
387 if (t || N < OldSize)
388 clear_unused_bits();
389 }
390
391 void reserve(unsigned N) {
392 if (N > getBitCapacity())
393 grow(N);
394 }
395
396 // Set, reset, flip
397 BitVector &set() {
398 init_words(Bits, true);
399 clear_unused_bits();
400 return *this;
401 }
402
403 BitVector &set(unsigned Idx) {
404 assert(Bits.data() && "Bits never allocated")((Bits.data() && "Bits never allocated") ? static_cast
<void> (0) : __assert_fail ("Bits.data() && \"Bits never allocated\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 404, __PRETTY_FUNCTION__))
;
405 Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
406 return *this;
407 }
408
409 /// set - Efficiently set a range of bits in [I, E)
410 BitVector &set(unsigned I, unsigned E) {
411 assert(I <= E && "Attempted to set backwards range!")((I <= E && "Attempted to set backwards range!") ?
static_cast<void> (0) : __assert_fail ("I <= E && \"Attempted to set backwards range!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 411, __PRETTY_FUNCTION__))
;
412 assert(E <= size() && "Attempted to set out-of-bounds range!")((E <= size() && "Attempted to set out-of-bounds range!"
) ? static_cast<void> (0) : __assert_fail ("E <= size() && \"Attempted to set out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 412, __PRETTY_FUNCTION__))
;
413
414 if (I == E) return *this;
415
416 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
417 BitWord EMask = 1UL << (E % BITWORD_SIZE);
418 BitWord IMask = 1UL << (I % BITWORD_SIZE);
419 BitWord Mask = EMask - IMask;
420 Bits[I / BITWORD_SIZE] |= Mask;
421 return *this;
422 }
423
424 BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
425 Bits[I / BITWORD_SIZE] |= PrefixMask;
426 I = alignTo(I, BITWORD_SIZE);
427
428 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
429 Bits[I / BITWORD_SIZE] = ~0UL;
430
431 BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
432 if (I < E)
433 Bits[I / BITWORD_SIZE] |= PostfixMask;
434
435 return *this;
436 }
437
438 BitVector &reset() {
439 init_words(Bits, false);
440 return *this;
441 }
442
443 BitVector &reset(unsigned Idx) {
444 Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
445 return *this;
446 }
447
448 /// reset - Efficiently reset a range of bits in [I, E)
449 BitVector &reset(unsigned I, unsigned E) {
450 assert(I <= E && "Attempted to reset backwards range!")((I <= E && "Attempted to reset backwards range!")
? static_cast<void> (0) : __assert_fail ("I <= E && \"Attempted to reset backwards range!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 450, __PRETTY_FUNCTION__))
;
451 assert(E <= size() && "Attempted to reset out-of-bounds range!")((E <= size() && "Attempted to reset out-of-bounds range!"
) ? static_cast<void> (0) : __assert_fail ("E <= size() && \"Attempted to reset out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 451, __PRETTY_FUNCTION__))
;
452
453 if (I == E) return *this;
454
455 if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
456 BitWord EMask = 1UL << (E % BITWORD_SIZE);
457 BitWord IMask = 1UL << (I % BITWORD_SIZE);
458 BitWord Mask = EMask - IMask;
459 Bits[I / BITWORD_SIZE] &= ~Mask;
460 return *this;
461 }
462
463 BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
464 Bits[I / BITWORD_SIZE] &= ~PrefixMask;
465 I = alignTo(I, BITWORD_SIZE);
466
467 for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
468 Bits[I / BITWORD_SIZE] = 0UL;
469
470 BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
471 if (I < E)
472 Bits[I / BITWORD_SIZE] &= ~PostfixMask;
473
474 return *this;
475 }
476
477 BitVector &flip() {
478 for (unsigned i = 0; i < NumBitWords(size()); ++i)
479 Bits[i] = ~Bits[i];
480 clear_unused_bits();
481 return *this;
482 }
483
484 BitVector &flip(unsigned Idx) {
485 Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
486 return *this;
487 }
488
489 // Indexing.
490 reference operator[](unsigned Idx) {
491 assert (Idx < Size && "Out-of-bounds Bit access.")((Idx < Size && "Out-of-bounds Bit access.") ? static_cast
<void> (0) : __assert_fail ("Idx < Size && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 491, __PRETTY_FUNCTION__))
;
492 return reference(*this, Idx);
493 }
494
495 bool operator[](unsigned Idx) const {
496 assert (Idx < Size && "Out-of-bounds Bit access.")((Idx < Size && "Out-of-bounds Bit access.") ? static_cast
<void> (0) : __assert_fail ("Idx < Size && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 496, __PRETTY_FUNCTION__))
;
497 BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
498 return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
499 }
500
501 bool test(unsigned Idx) const {
502 return (*this)[Idx];
503 }
504
505 // Push single bit to end of vector.
506 void push_back(bool Val) {
507 unsigned OldSize = Size;
508 unsigned NewSize = Size + 1;
509
510 // Resize, which will insert zeros.
511 // If we already fit then the unused bits will be already zero.
512 if (NewSize > getBitCapacity())
513 resize(NewSize, false);
514 else
515 Size = NewSize;
516
517 // If true, set single bit.
518 if (Val)
519 set(OldSize);
520 }
521
522 /// Test if any common bits are set.
523 bool anyCommon(const BitVector &RHS) const {
524 unsigned ThisWords = NumBitWords(size());
525 unsigned RHSWords = NumBitWords(RHS.size());
526 for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
527 if (Bits[i] & RHS.Bits[i])
528 return true;
529 return false;
530 }
531
532 // Comparison operators.
533 bool operator==(const BitVector &RHS) const {
534 unsigned ThisWords = NumBitWords(size());
535 unsigned RHSWords = NumBitWords(RHS.size());
536 unsigned i;
537 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
538 if (Bits[i] != RHS.Bits[i])
539 return false;
540
541 // Verify that any extra words are all zeros.
542 if (i != ThisWords) {
543 for (; i != ThisWords; ++i)
544 if (Bits[i])
545 return false;
546 } else if (i != RHSWords) {
547 for (; i != RHSWords; ++i)
548 if (RHS.Bits[i])
549 return false;
550 }
551 return true;
552 }
553
554 bool operator!=(const BitVector &RHS) const {
555 return !(*this == RHS);
556 }
557
558 /// Intersection, union, disjoint union.
559 BitVector &operator&=(const BitVector &RHS) {
560 unsigned ThisWords = NumBitWords(size());
561 unsigned RHSWords = NumBitWords(RHS.size());
562 unsigned i;
563 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
564 Bits[i] &= RHS.Bits[i];
565
566 // Any bits that are just in this bitvector become zero, because they aren't
567 // in the RHS bit vector. Any words only in RHS are ignored because they
568 // are already zero in the LHS.
569 for (; i != ThisWords; ++i)
570 Bits[i] = 0;
571
572 return *this;
573 }
574
575 /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
576 BitVector &reset(const BitVector &RHS) {
577 unsigned ThisWords = NumBitWords(size());
578 unsigned RHSWords = NumBitWords(RHS.size());
579 unsigned i;
580 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
581 Bits[i] &= ~RHS.Bits[i];
582 return *this;
583 }
584
585 /// test - Check if (This - RHS) is zero.
586 /// This is the same as reset(RHS) and any().
587 bool test(const BitVector &RHS) const {
588 unsigned ThisWords = NumBitWords(size());
589 unsigned RHSWords = NumBitWords(RHS.size());
590 unsigned i;
591 for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
592 if ((Bits[i] & ~RHS.Bits[i]) != 0)
593 return true;
594
595 for (; i != ThisWords ; ++i)
596 if (Bits[i] != 0)
597 return true;
598
599 return false;
600 }
601
602 BitVector &operator|=(const BitVector &RHS) {
603 if (size() < RHS.size())
604 resize(RHS.size());
605 for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
606 Bits[i] |= RHS.Bits[i];
607 return *this;
608 }
609
610 BitVector &operator^=(const BitVector &RHS) {
611 if (size() < RHS.size())
612 resize(RHS.size());
613 for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
614 Bits[i] ^= RHS.Bits[i];
615 return *this;
616 }
617
618 BitVector &operator>>=(unsigned N) {
619 assert(N <= Size)((N <= Size) ? static_cast<void> (0) : __assert_fail
("N <= Size", "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 619, __PRETTY_FUNCTION__))
;
620 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
621 return *this;
622
623 unsigned NumWords = NumBitWords(Size);
624 assert(NumWords >= 1)((NumWords >= 1) ? static_cast<void> (0) : __assert_fail
("NumWords >= 1", "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 624, __PRETTY_FUNCTION__))
;
625
626 wordShr(N / BITWORD_SIZE);
627
628 unsigned BitDistance = N % BITWORD_SIZE;
629 if (BitDistance == 0)
630 return *this;
631
632 // When the shift size is not a multiple of the word size, then we have
633 // a tricky situation where each word in succession needs to extract some
634 // of the bits from the next word and or them into this word while
635 // shifting this word to make room for the new bits. This has to be done
636 // for every word in the array.
637
638 // Since we're shifting each word right, some bits will fall off the end
639 // of each word to the right, and empty space will be created on the left.
640 // The final word in the array will lose bits permanently, so starting at
641 // the beginning, work forwards shifting each word to the right, and
642 // OR'ing in the bits from the end of the next word to the beginning of
643 // the current word.
644
645 // Example:
646 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
647 // by 4 bits.
648 // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD
649 // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD
650 // Step 3: Word[1] >>= 4 ; 0x0EEFF001
651 // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001
652 // Step 5: Word[2] >>= 4 ; 0x02334455
653 // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
654 const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
655 const unsigned LSH = BITWORD_SIZE - BitDistance;
656
657 for (unsigned I = 0; I < NumWords - 1; ++I) {
658 Bits[I] >>= BitDistance;
659 Bits[I] |= (Bits[I + 1] & Mask) << LSH;
660 }
661
662 Bits[NumWords - 1] >>= BitDistance;
663
664 return *this;
665 }
666
667 BitVector &operator<<=(unsigned N) {
668 assert(N <= Size)((N <= Size) ? static_cast<void> (0) : __assert_fail
("N <= Size", "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 668, __PRETTY_FUNCTION__))
;
669 if (LLVM_UNLIKELY(empty() || N == 0)__builtin_expect((bool)(empty() || N == 0), false))
670 return *this;
671
672 unsigned NumWords = NumBitWords(Size);
673 assert(NumWords >= 1)((NumWords >= 1) ? static_cast<void> (0) : __assert_fail
("NumWords >= 1", "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 673, __PRETTY_FUNCTION__))
;
674
675 wordShl(N / BITWORD_SIZE);
676
677 unsigned BitDistance = N % BITWORD_SIZE;
678 if (BitDistance == 0)
679 return *this;
680
681 // When the shift size is not a multiple of the word size, then we have
682 // a tricky situation where each word in succession needs to extract some
683 // of the bits from the previous word and or them into this word while
684 // shifting this word to make room for the new bits. This has to be done
685 // for every word in the array. This is similar to the algorithm outlined
686 // in operator>>=, but backwards.
687
688 // Since we're shifting each word left, some bits will fall off the end
689 // of each word to the left, and empty space will be created on the right.
690 // The first word in the array will lose bits permanently, so starting at
691 // the end, work backwards shifting each word to the left, and OR'ing
692 // in the bits from the end of the next word to the beginning of the
693 // current word.
694
695 // Example:
696 // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
697 // by 4 bits.
698 // Step 1: Word[2] <<= 4 ; 0x23344550
699 // Step 2: Word[2] |= 0x0000000E ; 0x2334455E
700 // Step 3: Word[1] <<= 4 ; 0xEFF00110
701 // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A
702 // Step 5: Word[0] <<= 4 ; 0xABBCCDD0
703 // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
704 const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
705 const unsigned RSH = BITWORD_SIZE - BitDistance;
706
707 for (int I = NumWords - 1; I > 0; --I) {
708 Bits[I] <<= BitDistance;
709 Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
710 }
711 Bits[0] <<= BitDistance;
712 clear_unused_bits();
713
714 return *this;
715 }
716
717 // Assignment operator.
718 const BitVector &operator=(const BitVector &RHS) {
719 if (this == &RHS) return *this;
720
721 Size = RHS.size();
722 unsigned RHSWords = NumBitWords(Size);
723 if (Size <= getBitCapacity()) {
724 if (Size)
725 std::memcpy(Bits.data(), RHS.Bits.data(), RHSWords * sizeof(BitWord));
726 clear_unused_bits();
727 return *this;
728 }
729
730 // Grow the bitvector to have enough elements.
731 unsigned NewCapacity = RHSWords;
732 assert(NewCapacity > 0 && "negative capacity?")((NewCapacity > 0 && "negative capacity?") ? static_cast
<void> (0) : __assert_fail ("NewCapacity > 0 && \"negative capacity?\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 732, __PRETTY_FUNCTION__))
;
733 auto NewBits = allocate(NewCapacity);
734 std::memcpy(NewBits.data(), RHS.Bits.data(), NewCapacity * sizeof(BitWord));
735
736 // Destroy the old bits.
737 std::free(Bits.data());
738 Bits = NewBits;
739
740 return *this;
741 }
742
743 const BitVector &operator=(BitVector &&RHS) {
744 if (this == &RHS) return *this;
745
746 std::free(Bits.data());
747 Bits = RHS.Bits;
748 Size = RHS.Size;
749
750 RHS.Bits = MutableArrayRef<BitWord>();
751 RHS.Size = 0;
752
753 return *this;
754 }
755
756 void swap(BitVector &RHS) {
757 std::swap(Bits, RHS.Bits);
758 std::swap(Size, RHS.Size);
759 }
760
761 //===--------------------------------------------------------------------===//
762 // Portable bit mask operations.
763 //===--------------------------------------------------------------------===//
764 //
765 // These methods all operate on arrays of uint32_t, each holding 32 bits. The
766 // fixed word size makes it easier to work with literal bit vector constants
767 // in portable code.
768 //
769 // The LSB in each word is the lowest numbered bit. The size of a portable
770 // bit mask is always a whole multiple of 32 bits. If no bit mask size is
771 // given, the bit mask is assumed to cover the entire BitVector.
772
773 /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
774 /// This computes "*this |= Mask".
775 void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
776 applyMask<true, false>(Mask, MaskWords);
777 }
778
779 /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
780 /// Don't resize. This computes "*this &= ~Mask".
781 void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
782 applyMask<false, false>(Mask, MaskWords);
783 }
784
785 /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
786 /// Don't resize. This computes "*this |= ~Mask".
787 void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
788 applyMask<true, true>(Mask, MaskWords);
789 }
790
791 /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
792 /// Don't resize. This computes "*this &= Mask".
793 void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
794 applyMask<false, true>(Mask, MaskWords);
795 }
796
797private:
798 /// Perform a logical left shift of \p Count words by moving everything
799 /// \p Count words to the right in memory.
800 ///
801 /// While confusing, words are stored from least significant at Bits[0] to
802 /// most significant at Bits[NumWords-1]. A logical shift left, however,
803 /// moves the current least significant bit to a higher logical index, and
804 /// fills the previous least significant bits with 0. Thus, we actually
805 /// need to move the bytes of the memory to the right, not to the left.
806 /// Example:
807 /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
808 /// represents a BitVector where 0xBBBBAAAA contain the least significant
809 /// bits. So if we want to shift the BitVector left by 2 words, we need to
810 /// turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
811 /// memmove which moves right, not left.
812 void wordShl(uint32_t Count) {
813 if (Count == 0)
814 return;
815
816 uint32_t NumWords = NumBitWords(Size);
817
818 auto Src = Bits.take_front(NumWords).drop_back(Count);
819 auto Dest = Bits.take_front(NumWords).drop_front(Count);
820
821 // Since we always move Word-sized chunks of data with src and dest both
822 // aligned to a word-boundary, we don't need to worry about endianness
823 // here.
824 std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
825 std::memset(Bits.data(), 0, Count * sizeof(BitWord));
826 clear_unused_bits();
827 }
828
829 /// Perform a logical right shift of \p Count words by moving those
830 /// words to the left in memory. See wordShl for more information.
831 ///
832 void wordShr(uint32_t Count) {
833 if (Count == 0)
834 return;
835
836 uint32_t NumWords = NumBitWords(Size);
837
838 auto Src = Bits.take_front(NumWords).drop_front(Count);
839 auto Dest = Bits.take_front(NumWords).drop_back(Count);
840 assert(Dest.size() == Src.size())((Dest.size() == Src.size()) ? static_cast<void> (0) : __assert_fail
("Dest.size() == Src.size()", "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 840, __PRETTY_FUNCTION__))
;
841
842 std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
843 std::memset(Dest.end(), 0, Count * sizeof(BitWord));
844 }
845
846 MutableArrayRef<BitWord> allocate(size_t NumWords) {
847 BitWord *RawBits = static_cast<BitWord *>(
848 safe_malloc(NumWords * sizeof(BitWord)));
849 return MutableArrayRef<BitWord>(RawBits, NumWords);
850 }
851
852 int next_unset_in_word(int WordIndex, BitWord Word) const {
853 unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
854 return Result < size() ? Result : -1;
855 }
856
857 unsigned NumBitWords(unsigned S) const {
858 return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
859 }
860
861 // Set the unused bits in the high words.
862 void set_unused_bits(bool t = true) {
863 // Set high words first.
864 unsigned UsedWords = NumBitWords(Size);
865 if (Bits.size() > UsedWords)
866 init_words(Bits.drop_front(UsedWords), t);
867
868 // Then set any stray high bits of the last used word.
869 unsigned ExtraBits = Size % BITWORD_SIZE;
870 if (ExtraBits) {
871 BitWord ExtraBitMask = ~0UL << ExtraBits;
872 if (t)
873 Bits[UsedWords-1] |= ExtraBitMask;
874 else
875 Bits[UsedWords-1] &= ~ExtraBitMask;
876 }
877 }
878
879 // Clear the unused bits in the high words.
880 void clear_unused_bits() {
881 set_unused_bits(false);
882 }
883
884 void grow(unsigned NewSize) {
885 size_t NewCapacity = std::max<size_t>(NumBitWords(NewSize), Bits.size() * 2);
886 assert(NewCapacity > 0 && "realloc-ing zero space")((NewCapacity > 0 && "realloc-ing zero space") ? static_cast
<void> (0) : __assert_fail ("NewCapacity > 0 && \"realloc-ing zero space\""
, "/build/llvm-toolchain-snapshot-10~svn372306/include/llvm/ADT/BitVector.h"
, 886, __PRETTY_FUNCTION__))
;
887 BitWord *NewBits = static_cast<BitWord *>(
888 safe_realloc(Bits.data(), NewCapacity * sizeof(BitWord)));
889 Bits = MutableArrayRef<BitWord>(NewBits, NewCapacity);
890 clear_unused_bits();
891 }
892
893 void init_words(MutableArrayRef<BitWord> B, bool t) {
894 if (B.size() > 0)
895 memset(B.data(), 0 - (int)t, B.size() * sizeof(BitWord));
896 }
897
898 template<bool AddBits, bool InvertMask>
899 void applyMask(const uint32_t *Mask, unsigned MaskWords) {
900 static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
901 MaskWords = std::min(MaskWords, (size() + 31) / 32);
902 const unsigned Scale = BITWORD_SIZE / 32;
903 unsigned i;
904 for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
905 BitWord BW = Bits[i];
906 // This inner loop should unroll completely when BITWORD_SIZE > 32.
907 for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
908 uint32_t M = *Mask++;
909 if (InvertMask) M = ~M;
910 if (AddBits) BW |= BitWord(M) << b;
911 else BW &= ~(BitWord(M) << b);
912 }
913 Bits[i] = BW;
914 }
915 for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
916 uint32_t M = *Mask++;
917 if (InvertMask) M = ~M;
918 if (AddBits) Bits[i] |= BitWord(M) << b;
919 else Bits[i] &= ~(BitWord(M) << b);
920 }
921 if (AddBits)
922 clear_unused_bits();
923 }
924
925public:
926 /// Return the size (in bytes) of the bit vector.
927 size_t getMemorySize() const { return Bits.size() * sizeof(BitWord); }
928 size_t getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
929};
930
931inline size_t capacity_in_bytes(const BitVector &X) {
932 return X.getMemorySize();
933}
934
935} // end namespace llvm
936
937namespace std {
938 /// Implement std::swap in terms of BitVector swap.
939 inline void
940 swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
941 LHS.swap(RHS);
942 }
943} // end namespace std
944
945#endif // LLVM_ADT_BITVECTOR_H