Bug Summary

File:lib/ProfileData/Coverage/CoverageMapping.cpp
Warning:line 583, column 1
Potential leak of memory pointed to by 'IsNotExpandedFile.X'

Annotated Source Code

[?] Use j/k keys for keyboard navigation

/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp

1//===- CoverageMapping.cpp - Code coverage mapping support ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains support for clang's and llvm's instrumentation based
11// code coverage.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ProfileData/Coverage/CoverageMapping.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/SmallBitVector.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ProfileData/Coverage/CoverageMappingReader.h"
24#include "llvm/ProfileData/InstrProfReader.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/Errc.h"
27#include "llvm/Support/Error.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/ManagedStatic.h"
30#include "llvm/Support/MemoryBuffer.h"
31#include "llvm/Support/raw_ostream.h"
32#include <algorithm>
33#include <cassert>
34#include <cstdint>
35#include <iterator>
36#include <map>
37#include <memory>
38#include <string>
39#include <system_error>
40#include <utility>
41#include <vector>
42
43using namespace llvm;
44using namespace coverage;
45
46#define DEBUG_TYPE"coverage-mapping" "coverage-mapping"
47
48Counter CounterExpressionBuilder::get(const CounterExpression &E) {
49 auto It = ExpressionIndices.find(E);
50 if (It != ExpressionIndices.end())
51 return Counter::getExpression(It->second);
52 unsigned I = Expressions.size();
53 Expressions.push_back(E);
54 ExpressionIndices[E] = I;
55 return Counter::getExpression(I);
56}
57
58void CounterExpressionBuilder::extractTerms(Counter C, int Factor,
59 SmallVectorImpl<Term> &Terms) {
60 switch (C.getKind()) {
61 case Counter::Zero:
62 break;
63 case Counter::CounterValueReference:
64 Terms.emplace_back(C.getCounterID(), Factor);
65 break;
66 case Counter::Expression:
67 const auto &E = Expressions[C.getExpressionID()];
68 extractTerms(E.LHS, Factor, Terms);
69 extractTerms(
70 E.RHS, E.Kind == CounterExpression::Subtract ? -Factor : Factor, Terms);
71 break;
72 }
73}
74
75Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) {
76 // Gather constant terms.
77 SmallVector<Term, 32> Terms;
78 extractTerms(ExpressionTree, +1, Terms);
79
80 // If there are no terms, this is just a zero. The algorithm below assumes at
81 // least one term.
82 if (Terms.size() == 0)
83 return Counter::getZero();
84
85 // Group the terms by counter ID.
86 std::sort(Terms.begin(), Terms.end(), [](const Term &LHS, const Term &RHS) {
87 return LHS.CounterID < RHS.CounterID;
88 });
89
90 // Combine terms by counter ID to eliminate counters that sum to zero.
91 auto Prev = Terms.begin();
92 for (auto I = Prev + 1, E = Terms.end(); I != E; ++I) {
93 if (I->CounterID == Prev->CounterID) {
94 Prev->Factor += I->Factor;
95 continue;
96 }
97 ++Prev;
98 *Prev = *I;
99 }
100 Terms.erase(++Prev, Terms.end());
101
102 Counter C;
103 // Create additions. We do this before subtractions to avoid constructs like
104 // ((0 - X) + Y), as opposed to (Y - X).
105 for (auto T : Terms) {
106 if (T.Factor <= 0)
107 continue;
108 for (int I = 0; I < T.Factor; ++I)
109 if (C.isZero())
110 C = Counter::getCounter(T.CounterID);
111 else
112 C = get(CounterExpression(CounterExpression::Add, C,
113 Counter::getCounter(T.CounterID)));
114 }
115
116 // Create subtractions.
117 for (auto T : Terms) {
118 if (T.Factor >= 0)
119 continue;
120 for (int I = 0; I < -T.Factor; ++I)
121 C = get(CounterExpression(CounterExpression::Subtract, C,
122 Counter::getCounter(T.CounterID)));
123 }
124 return C;
125}
126
127Counter CounterExpressionBuilder::add(Counter LHS, Counter RHS) {
128 return simplify(get(CounterExpression(CounterExpression::Add, LHS, RHS)));
129}
130
131Counter CounterExpressionBuilder::subtract(Counter LHS, Counter RHS) {
132 return simplify(
133 get(CounterExpression(CounterExpression::Subtract, LHS, RHS)));
134}
135
136void CounterMappingContext::dump(const Counter &C, raw_ostream &OS) const {
137 switch (C.getKind()) {
138 case Counter::Zero:
139 OS << '0';
140 return;
141 case Counter::CounterValueReference:
142 OS << '#' << C.getCounterID();
143 break;
144 case Counter::Expression: {
145 if (C.getExpressionID() >= Expressions.size())
146 return;
147 const auto &E = Expressions[C.getExpressionID()];
148 OS << '(';
149 dump(E.LHS, OS);
150 OS << (E.Kind == CounterExpression::Subtract ? " - " : " + ");
151 dump(E.RHS, OS);
152 OS << ')';
153 break;
154 }
155 }
156 if (CounterValues.empty())
157 return;
158 Expected<int64_t> Value = evaluate(C);
159 if (auto E = Value.takeError()) {
160 consumeError(std::move(E));
161 return;
162 }
163 OS << '[' << *Value << ']';
164}
165
166Expected<int64_t> CounterMappingContext::evaluate(const Counter &C) const {
167 switch (C.getKind()) {
168 case Counter::Zero:
169 return 0;
170 case Counter::CounterValueReference:
171 if (C.getCounterID() >= CounterValues.size())
172 return errorCodeToError(errc::argument_out_of_domain);
173 return CounterValues[C.getCounterID()];
174 case Counter::Expression: {
175 if (C.getExpressionID() >= Expressions.size())
176 return errorCodeToError(errc::argument_out_of_domain);
177 const auto &E = Expressions[C.getExpressionID()];
178 Expected<int64_t> LHS = evaluate(E.LHS);
179 if (!LHS)
180 return LHS;
181 Expected<int64_t> RHS = evaluate(E.RHS);
182 if (!RHS)
183 return RHS;
184 return E.Kind == CounterExpression::Subtract ? *LHS - *RHS : *LHS + *RHS;
185 }
186 }
187 llvm_unreachable("Unhandled CounterKind")::llvm::llvm_unreachable_internal("Unhandled CounterKind", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 187)
;
188}
189
190void FunctionRecordIterator::skipOtherFiles() {
191 while (Current != Records.end() && !Filename.empty() &&
192 Filename != Current->Filenames[0])
193 ++Current;
194 if (Current == Records.end())
195 *this = FunctionRecordIterator();
196}
197
198Error CoverageMapping::loadFunctionRecord(
199 const CoverageMappingRecord &Record,
200 IndexedInstrProfReader &ProfileReader) {
201 StringRef OrigFuncName = Record.FunctionName;
202 if (OrigFuncName.empty())
203 return make_error<CoverageMapError>(coveragemap_error::malformed);
204
205 if (Record.Filenames.empty())
206 OrigFuncName = getFuncNameWithoutPrefix(OrigFuncName);
207 else
208 OrigFuncName = getFuncNameWithoutPrefix(OrigFuncName, Record.Filenames[0]);
209
210 // Don't load records for functions we've already seen.
211 if (!FunctionNames.insert(OrigFuncName).second)
212 return Error::success();
213
214 CounterMappingContext Ctx(Record.Expressions);
215
216 std::vector<uint64_t> Counts;
217 if (Error E = ProfileReader.getFunctionCounts(Record.FunctionName,
218 Record.FunctionHash, Counts)) {
219 instrprof_error IPE = InstrProfError::take(std::move(E));
220 if (IPE == instrprof_error::hash_mismatch) {
221 FuncHashMismatches.emplace_back(Record.FunctionName, Record.FunctionHash);
222 return Error::success();
223 } else if (IPE != instrprof_error::unknown_function)
224 return make_error<InstrProfError>(IPE);
225 Counts.assign(Record.MappingRegions.size(), 0);
226 }
227 Ctx.setCounts(Counts);
228
229 assert(!Record.MappingRegions.empty() && "Function has no regions")(static_cast <bool> (!Record.MappingRegions.empty() &&
"Function has no regions") ? void (0) : __assert_fail ("!Record.MappingRegions.empty() && \"Function has no regions\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230
231 FunctionRecord Function(OrigFuncName, Record.Filenames);
232 for (const auto &Region : Record.MappingRegions) {
233 Expected<int64_t> ExecutionCount = Ctx.evaluate(Region.Count);
234 if (auto E = ExecutionCount.takeError()) {
235 consumeError(std::move(E));
236 return Error::success();
237 }
238 Function.pushRegion(Region, *ExecutionCount);
239 }
240 if (Function.CountedRegions.size() != Record.MappingRegions.size()) {
241 FuncCounterMismatches.emplace_back(Record.FunctionName,
242 Function.CountedRegions.size());
243 return Error::success();
244 }
245
246 Functions.push_back(std::move(Function));
247 return Error::success();
248}
249
250Expected<std::unique_ptr<CoverageMapping>> CoverageMapping::load(
251 ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
252 IndexedInstrProfReader &ProfileReader) {
253 auto Coverage = std::unique_ptr<CoverageMapping>(new CoverageMapping());
254
255 for (const auto &CoverageReader : CoverageReaders) {
256 for (auto RecordOrErr : *CoverageReader) {
257 if (Error E = RecordOrErr.takeError())
258 return std::move(E);
259 const auto &Record = *RecordOrErr;
260 if (Error E = Coverage->loadFunctionRecord(Record, ProfileReader))
261 return std::move(E);
262 }
263 }
264
265 return std::move(Coverage);
266}
267
268Expected<std::unique_ptr<CoverageMapping>>
269CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames,
270 StringRef ProfileFilename, ArrayRef<StringRef> Arches) {
271 auto ProfileReaderOrErr = IndexedInstrProfReader::create(ProfileFilename);
272 if (Error E = ProfileReaderOrErr.takeError())
273 return std::move(E);
274 auto ProfileReader = std::move(ProfileReaderOrErr.get());
275
276 SmallVector<std::unique_ptr<CoverageMappingReader>, 4> Readers;
277 SmallVector<std::unique_ptr<MemoryBuffer>, 4> Buffers;
278 for (const auto &File : llvm::enumerate(ObjectFilenames)) {
279 auto CovMappingBufOrErr = MemoryBuffer::getFileOrSTDIN(File.value());
280 if (std::error_code EC = CovMappingBufOrErr.getError())
281 return errorCodeToError(EC);
282 StringRef Arch = Arches.empty() ? StringRef() : Arches[File.index()];
283 auto CoverageReaderOrErr =
284 BinaryCoverageReader::create(CovMappingBufOrErr.get(), Arch);
285 if (Error E = CoverageReaderOrErr.takeError())
286 return std::move(E);
287 Readers.push_back(std::move(CoverageReaderOrErr.get()));
288 Buffers.push_back(std::move(CovMappingBufOrErr.get()));
289 }
290 return load(Readers, *ProfileReader);
291}
292
293namespace {
294
295/// \brief Distributes functions into instantiation sets.
296///
297/// An instantiation set is a collection of functions that have the same source
298/// code, ie, template functions specializations.
299class FunctionInstantiationSetCollector {
300 using MapT = std::map<LineColPair, std::vector<const FunctionRecord *>>;
301 MapT InstantiatedFunctions;
302
303public:
304 void insert(const FunctionRecord &Function, unsigned FileID) {
305 auto I = Function.CountedRegions.begin(), E = Function.CountedRegions.end();
306 while (I != E && I->FileID != FileID)
307 ++I;
308 assert(I != E && "function does not cover the given file")(static_cast <bool> (I != E && "function does not cover the given file"
) ? void (0) : __assert_fail ("I != E && \"function does not cover the given file\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 308, __extension__ __PRETTY_FUNCTION__))
;
309 auto &Functions = InstantiatedFunctions[I->startLoc()];
310 Functions.push_back(&Function);
311 }
312
313 MapT::iterator begin() { return InstantiatedFunctions.begin(); }
314 MapT::iterator end() { return InstantiatedFunctions.end(); }
315};
316
317class SegmentBuilder {
318 std::vector<CoverageSegment> &Segments;
319 SmallVector<const CountedRegion *, 8> ActiveRegions;
320
321 SegmentBuilder(std::vector<CoverageSegment> &Segments) : Segments(Segments) {}
322
323 /// Emit a segment with the count from \p Region starting at \p StartLoc.
324 //
325 /// \p IsRegionEntry: The segment is at the start of a new non-gap region.
326 /// \p EmitSkippedRegion: The segment must be emitted as a skipped region.
327 void startSegment(const CountedRegion &Region, LineColPair StartLoc,
328 bool IsRegionEntry, bool EmitSkippedRegion = false) {
329 bool HasCount = !EmitSkippedRegion &&
330 (Region.Kind != CounterMappingRegion::SkippedRegion);
331
332 // If the new segment wouldn't affect coverage rendering, skip it.
333 if (!Segments.empty() && !IsRegionEntry && !EmitSkippedRegion) {
334 const auto &Last = Segments.back();
335 if (Last.HasCount == HasCount && Last.Count == Region.ExecutionCount &&
336 !Last.IsRegionEntry)
337 return;
338 }
339
340 if (HasCount)
341 Segments.emplace_back(StartLoc.first, StartLoc.second,
342 Region.ExecutionCount, IsRegionEntry,
343 Region.Kind == CounterMappingRegion::GapRegion);
344 else
345 Segments.emplace_back(StartLoc.first, StartLoc.second, IsRegionEntry);
346
347 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
348 const auto &Last = Segments.back();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
349 dbgs() << "Segment at " << Last.Line << ":" << Last.Coldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
350 << " (count = " << Last.Count << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
351 << (Last.IsRegionEntry ? ", RegionEntry" : "")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
352 << (!Last.HasCount ? ", Skipped" : "")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
353 << (Last.IsGapRegion ? ", Gap" : "") << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
354 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { const auto &Last = Segments.back
(); dbgs() << "Segment at " << Last.Line <<
":" << Last.Col << " (count = " << Last.Count
<< ")" << (Last.IsRegionEntry ? ", RegionEntry" :
"") << (!Last.HasCount ? ", Skipped" : "") << (Last
.IsGapRegion ? ", Gap" : "") << "\n"; }; } } while (false
)
;
355 }
356
357 /// Emit segments for active regions which end before \p Loc.
358 ///
359 /// \p Loc: The start location of the next region. If None, all active
360 /// regions are completed.
361 /// \p FirstCompletedRegion: Index of the first completed region.
362 void completeRegionsUntil(Optional<LineColPair> Loc,
363 unsigned FirstCompletedRegion) {
364 // Sort the completed regions by end location. This makes it simple to
365 // emit closing segments in sorted order.
366 auto CompletedRegionsIt = ActiveRegions.begin() + FirstCompletedRegion;
367 std::stable_sort(CompletedRegionsIt, ActiveRegions.end(),
368 [](const CountedRegion *L, const CountedRegion *R) {
369 return L->endLoc() < R->endLoc();
370 });
371
372 // Emit segments for all completed regions.
373 for (unsigned I = FirstCompletedRegion + 1, E = ActiveRegions.size(); I < E;
374 ++I) {
375 const auto *CompletedRegion = ActiveRegions[I];
376 assert((!Loc || CompletedRegion->endLoc() <= *Loc) &&(static_cast <bool> ((!Loc || CompletedRegion->endLoc
() <= *Loc) && "Completed region ends after start of new region"
) ? void (0) : __assert_fail ("(!Loc || CompletedRegion->endLoc() <= *Loc) && \"Completed region ends after start of new region\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 377, __extension__ __PRETTY_FUNCTION__))
377 "Completed region ends after start of new region")(static_cast <bool> ((!Loc || CompletedRegion->endLoc
() <= *Loc) && "Completed region ends after start of new region"
) ? void (0) : __assert_fail ("(!Loc || CompletedRegion->endLoc() <= *Loc) && \"Completed region ends after start of new region\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 377, __extension__ __PRETTY_FUNCTION__))
;
378
379 const auto *PrevCompletedRegion = ActiveRegions[I - 1];
380 auto CompletedSegmentLoc = PrevCompletedRegion->endLoc();
381
382 // Don't emit any more segments if they start where the new region begins.
383 if (Loc && CompletedSegmentLoc == *Loc)
384 break;
385
386 // Don't emit a segment if the next completed region ends at the same
387 // location as this one.
388 if (CompletedSegmentLoc == CompletedRegion->endLoc())
389 continue;
390
391 // Use the count from the last completed region which ends at this loc.
392 for (unsigned J = I + 1; J < E; ++J)
393 if (CompletedRegion->endLoc() == ActiveRegions[J]->endLoc())
394 CompletedRegion = ActiveRegions[J];
395
396 startSegment(*CompletedRegion, CompletedSegmentLoc, false);
397 }
398
399 auto Last = ActiveRegions.back();
400 if (FirstCompletedRegion && Last->endLoc() != *Loc) {
401 // If there's a gap after the end of the last completed region and the
402 // start of the new region, use the last active region to fill the gap.
403 startSegment(*ActiveRegions[FirstCompletedRegion - 1], Last->endLoc(),
404 false);
405 } else if (!FirstCompletedRegion && (!Loc || *Loc != Last->endLoc())) {
406 // Emit a skipped segment if there are no more active regions. This
407 // ensures that gaps between functions are marked correctly.
408 startSegment(*Last, Last->endLoc(), false, true);
409 }
410
411 // Pop the completed regions.
412 ActiveRegions.erase(CompletedRegionsIt, ActiveRegions.end());
413 }
414
415 void buildSegmentsImpl(ArrayRef<CountedRegion> Regions) {
416 for (const auto &CR : enumerate(Regions)) {
417 auto CurStartLoc = CR.value().startLoc();
418
419 // Active regions which end before the current region need to be popped.
420 auto CompletedRegions =
421 std::stable_partition(ActiveRegions.begin(), ActiveRegions.end(),
422 [&](const CountedRegion *Region) {
423 return !(Region->endLoc() <= CurStartLoc);
424 });
425 if (CompletedRegions != ActiveRegions.end()) {
426 unsigned FirstCompletedRegion =
427 std::distance(ActiveRegions.begin(), CompletedRegions);
428 completeRegionsUntil(CurStartLoc, FirstCompletedRegion);
429 }
430
431 bool GapRegion = CR.value().Kind == CounterMappingRegion::GapRegion;
432
433 // Try to emit a segment for the current region.
434 if (CurStartLoc == CR.value().endLoc()) {
435 // Avoid making zero-length regions active. If it's the last region,
436 // emit a skipped segment. Otherwise use its predecessor's count.
437 const bool Skipped = (CR.index() + 1) == Regions.size();
438 startSegment(ActiveRegions.empty() ? CR.value() : *ActiveRegions.back(),
439 CurStartLoc, !GapRegion, Skipped);
440 continue;
441 }
442 if (CR.index() + 1 == Regions.size() ||
443 CurStartLoc != Regions[CR.index() + 1].startLoc()) {
444 // Emit a segment if the next region doesn't start at the same location
445 // as this one.
446 startSegment(CR.value(), CurStartLoc, !GapRegion);
447 }
448
449 // This region is active (i.e not completed).
450 ActiveRegions.push_back(&CR.value());
451 }
452
453 // Complete any remaining active regions.
454 if (!ActiveRegions.empty())
455 completeRegionsUntil(None, 0);
456 }
457
458 /// Sort a nested sequence of regions from a single file.
459 static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) {
460 std::sort(Regions.begin(), Regions.end(), [](const CountedRegion &LHS,
461 const CountedRegion &RHS) {
462 if (LHS.startLoc() != RHS.startLoc())
463 return LHS.startLoc() < RHS.startLoc();
464 if (LHS.endLoc() != RHS.endLoc())
465 // When LHS completely contains RHS, we sort LHS first.
466 return RHS.endLoc() < LHS.endLoc();
467 // If LHS and RHS cover the same area, we need to sort them according
468 // to their kinds so that the most suitable region will become "active"
469 // in combineRegions(). Because we accumulate counter values only from
470 // regions of the same kind as the first region of the area, prefer
471 // CodeRegion to ExpansionRegion and ExpansionRegion to SkippedRegion.
472 static_assert(CounterMappingRegion::CodeRegion <
473 CounterMappingRegion::ExpansionRegion &&
474 CounterMappingRegion::ExpansionRegion <
475 CounterMappingRegion::SkippedRegion,
476 "Unexpected order of region kind values");
477 return LHS.Kind < RHS.Kind;
478 });
479 }
480
481 /// Combine counts of regions which cover the same area.
482 static ArrayRef<CountedRegion>
483 combineRegions(MutableArrayRef<CountedRegion> Regions) {
484 if (Regions.empty())
485 return Regions;
486 auto Active = Regions.begin();
487 auto End = Regions.end();
488 for (auto I = Regions.begin() + 1; I != End; ++I) {
489 if (Active->startLoc() != I->startLoc() ||
490 Active->endLoc() != I->endLoc()) {
491 // Shift to the next region.
492 ++Active;
493 if (Active != I)
494 *Active = *I;
495 continue;
496 }
497 // Merge duplicate region.
498 // If CodeRegions and ExpansionRegions cover the same area, it's probably
499 // a macro which is fully expanded to another macro. In that case, we need
500 // to accumulate counts only from CodeRegions, or else the area will be
501 // counted twice.
502 // On the other hand, a macro may have a nested macro in its body. If the
503 // outer macro is used several times, the ExpansionRegion for the nested
504 // macro will also be added several times. These ExpansionRegions cover
505 // the same source locations and have to be combined to reach the correct
506 // value for that area.
507 // We add counts of the regions of the same kind as the active region
508 // to handle the both situations.
509 if (I->Kind == Active->Kind)
510 Active->ExecutionCount += I->ExecutionCount;
511 }
512 return Regions.drop_back(std::distance(++Active, End));
513 }
514
515public:
516 /// Build a sorted list of CoverageSegments from a list of Regions.
517 static std::vector<CoverageSegment>
518 buildSegments(MutableArrayRef<CountedRegion> Regions) {
519 std::vector<CoverageSegment> Segments;
520 SegmentBuilder Builder(Segments);
521
522 sortNestedRegions(Regions);
523 ArrayRef<CountedRegion> CombinedRegions = combineRegions(Regions);
524
525 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
526 dbgs() << "Combined regions:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
527 for (const auto &CR : CombinedRegions)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
528 dbgs() << " " << CR.LineStart << ":" << CR.ColumnStart << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
529 << CR.LineEnd << ":" << CR.ColumnEnddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
530 << " (count=" << CR.ExecutionCount << ")\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
531 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { { dbgs() << "Combined regions:\n"
; for (const auto &CR : CombinedRegions) dbgs() << " "
<< CR.LineStart << ":" << CR.ColumnStart <<
" -> " << CR.LineEnd << ":" << CR.ColumnEnd
<< " (count=" << CR.ExecutionCount << ")\n"
; }; } } while (false)
;
532
533 Builder.buildSegmentsImpl(CombinedRegions);
534
535#ifndef NDEBUG
536 for (unsigned I = 1, E = Segments.size(); I < E; ++I) {
537 const auto &L = Segments[I - 1];
538 const auto &R = Segments[I];
539 if (!(L.Line < R.Line) && !(L.Line == R.Line && L.Col < R.Col)) {
540 DEBUG(dbgs() << " ! Segment " << L.Line << ":" << L.Coldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << " ! Segment " <<
L.Line << ":" << L.Col << " followed by " <<
R.Line << ":" << R.Col << "\n"; } } while (
false)
541 << " followed by " << R.Line << ":" << R.Col << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << " ! Segment " <<
L.Line << ":" << L.Col << " followed by " <<
R.Line << ":" << R.Col << "\n"; } } while (
false)
;
542 assert(false && "Coverage segments not unique or sorted")(static_cast <bool> (false && "Coverage segments not unique or sorted"
) ? void (0) : __assert_fail ("false && \"Coverage segments not unique or sorted\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 542, __extension__ __PRETTY_FUNCTION__))
;
543 }
544 }
545#endif
546
547 return Segments;
548 }
549};
550
551} // end anonymous namespace
552
553std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const {
554 std::vector<StringRef> Filenames;
555 for (const auto &Function : getCoveredFunctions())
556 Filenames.insert(Filenames.end(), Function.Filenames.begin(),
557 Function.Filenames.end());
558 std::sort(Filenames.begin(), Filenames.end());
559 auto Last = std::unique(Filenames.begin(), Filenames.end());
560 Filenames.erase(Last, Filenames.end());
561 return Filenames;
562}
563
564static SmallBitVector gatherFileIDs(StringRef SourceFile,
565 const FunctionRecord &Function) {
566 SmallBitVector FilenameEquivalence(Function.Filenames.size(), false);
567 for (unsigned I = 0, E = Function.Filenames.size(); I < E; ++I)
568 if (SourceFile == Function.Filenames[I])
569 FilenameEquivalence[I] = true;
570 return FilenameEquivalence;
571}
572
573/// Return the ID of the file where the definition of the function is located.
574static Optional<unsigned> findMainViewFileID(const FunctionRecord &Function) {
575 SmallBitVector IsNotExpandedFile(Function.Filenames.size(), true);
2
Calling constructor for 'SmallBitVector'
6
Returning from constructor for 'SmallBitVector'
576 for (const auto &CR : Function.CountedRegions)
577 if (CR.Kind == CounterMappingRegion::ExpansionRegion)
578 IsNotExpandedFile[CR.ExpandedFileID] = false;
579 int I = IsNotExpandedFile.find_first();
580 if (I == -1)
7
Taking true branch
581 return None;
582 return I;
583}
8
Potential leak of memory pointed to by 'IsNotExpandedFile.X'
584
585/// Check if SourceFile is the file that contains the definition of
586/// the Function. Return the ID of the file in that case or None otherwise.
587static Optional<unsigned> findMainViewFileID(StringRef SourceFile,
588 const FunctionRecord &Function) {
589 Optional<unsigned> I = findMainViewFileID(Function);
590 if (I && SourceFile == Function.Filenames[*I])
591 return I;
592 return None;
593}
594
595static bool isExpansion(const CountedRegion &R, unsigned FileID) {
596 return R.Kind == CounterMappingRegion::ExpansionRegion && R.FileID == FileID;
597}
598
599CoverageData CoverageMapping::getCoverageForFile(StringRef Filename) const {
600 CoverageData FileCoverage(Filename);
601 std::vector<CountedRegion> Regions;
602
603 for (const auto &Function : Functions) {
604 auto MainFileID = findMainViewFileID(Filename, Function);
605 auto FileIDs = gatherFileIDs(Filename, Function);
606 for (const auto &CR : Function.CountedRegions)
607 if (FileIDs.test(CR.FileID)) {
608 Regions.push_back(CR);
609 if (MainFileID && isExpansion(CR, *MainFileID))
610 FileCoverage.Expansions.emplace_back(CR, Function);
611 }
612 }
613
614 DEBUG(dbgs() << "Emitting segments for file: " << Filename << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << "Emitting segments for file: "
<< Filename << "\n"; } } while (false)
;
615 FileCoverage.Segments = SegmentBuilder::buildSegments(Regions);
616
617 return FileCoverage;
618}
619
620std::vector<InstantiationGroup>
621CoverageMapping::getInstantiationGroups(StringRef Filename) const {
622 FunctionInstantiationSetCollector InstantiationSetCollector;
623 for (const auto &Function : Functions) {
624 auto MainFileID = findMainViewFileID(Filename, Function);
625 if (!MainFileID)
626 continue;
627 InstantiationSetCollector.insert(Function, *MainFileID);
628 }
629
630 std::vector<InstantiationGroup> Result;
631 for (auto &InstantiationSet : InstantiationSetCollector) {
632 InstantiationGroup IG{InstantiationSet.first.first,
633 InstantiationSet.first.second,
634 std::move(InstantiationSet.second)};
635 Result.emplace_back(std::move(IG));
636 }
637 return Result;
638}
639
640CoverageData
641CoverageMapping::getCoverageForFunction(const FunctionRecord &Function) const {
642 auto MainFileID = findMainViewFileID(Function);
1
Calling 'findMainViewFileID'
643 if (!MainFileID)
644 return CoverageData();
645
646 CoverageData FunctionCoverage(Function.Filenames[*MainFileID]);
647 std::vector<CountedRegion> Regions;
648 for (const auto &CR : Function.CountedRegions)
649 if (CR.FileID == *MainFileID) {
650 Regions.push_back(CR);
651 if (isExpansion(CR, *MainFileID))
652 FunctionCoverage.Expansions.emplace_back(CR, Function);
653 }
654
655 DEBUG(dbgs() << "Emitting segments for function: " << Function.Name << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << "Emitting segments for function: "
<< Function.Name << "\n"; } } while (false)
;
656 FunctionCoverage.Segments = SegmentBuilder::buildSegments(Regions);
657
658 return FunctionCoverage;
659}
660
661CoverageData CoverageMapping::getCoverageForExpansion(
662 const ExpansionRecord &Expansion) const {
663 CoverageData ExpansionCoverage(
664 Expansion.Function.Filenames[Expansion.FileID]);
665 std::vector<CountedRegion> Regions;
666 for (const auto &CR : Expansion.Function.CountedRegions)
667 if (CR.FileID == Expansion.FileID) {
668 Regions.push_back(CR);
669 if (isExpansion(CR, Expansion.FileID))
670 ExpansionCoverage.Expansions.emplace_back(CR, Expansion.Function);
671 }
672
673 DEBUG(dbgs() << "Emitting segments for expansion of file " << Expansion.FileIDdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << "Emitting segments for expansion of file "
<< Expansion.FileID << "\n"; } } while (false)
674 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("coverage-mapping")) { dbgs() << "Emitting segments for expansion of file "
<< Expansion.FileID << "\n"; } } while (false)
;
675 ExpansionCoverage.Segments = SegmentBuilder::buildSegments(Regions);
676
677 return ExpansionCoverage;
678}
679
680LineCoverageStats::LineCoverageStats(
681 ArrayRef<const CoverageSegment *> LineSegments,
682 const CoverageSegment *WrappedSegment, unsigned Line)
683 : ExecutionCount(0), HasMultipleRegions(false), Mapped(false), Line(Line),
684 LineSegments(LineSegments), WrappedSegment(WrappedSegment) {
685 // Find the minimum number of regions which start in this line.
686 unsigned MinRegionCount = 0;
687 auto isStartOfRegion = [](const CoverageSegment *S) {
688 return !S->IsGapRegion && S->HasCount && S->IsRegionEntry;
689 };
690 for (unsigned I = 0; I < LineSegments.size() && MinRegionCount < 2; ++I)
691 if (isStartOfRegion(LineSegments[I]))
692 ++MinRegionCount;
693
694 bool StartOfSkippedRegion = !LineSegments.empty() &&
695 !LineSegments.front()->HasCount &&
696 LineSegments.front()->IsRegionEntry;
697
698 HasMultipleRegions = MinRegionCount > 1;
699 Mapped =
700 !StartOfSkippedRegion &&
701 ((WrappedSegment && WrappedSegment->HasCount) || (MinRegionCount > 0));
702
703 if (!Mapped)
704 return;
705
706 // Pick the max count from the non-gap, region entry segments and the
707 // wrapped count.
708 if (WrappedSegment)
709 ExecutionCount = WrappedSegment->Count;
710 if (!MinRegionCount)
711 return;
712 for (const auto *LS : LineSegments)
713 if (isStartOfRegion(LS))
714 ExecutionCount = std::max(ExecutionCount, LS->Count);
715}
716
717LineCoverageIterator &LineCoverageIterator::operator++() {
718 if (Next == CD.end()) {
719 Stats = LineCoverageStats();
720 Ended = true;
721 return *this;
722 }
723 if (Segments.size())
724 WrappedSegment = Segments.back();
725 Segments.clear();
726 while (Next != CD.end() && Next->Line == Line)
727 Segments.push_back(&*Next++);
728 Stats = LineCoverageStats(Segments, WrappedSegment, Line);
729 ++Line;
730 return *this;
731}
732
733static std::string getCoverageMapErrString(coveragemap_error Err) {
734 switch (Err) {
735 case coveragemap_error::success:
736 return "Success";
737 case coveragemap_error::eof:
738 return "End of File";
739 case coveragemap_error::no_data_found:
740 return "No coverage data found";
741 case coveragemap_error::unsupported_version:
742 return "Unsupported coverage format version";
743 case coveragemap_error::truncated:
744 return "Truncated coverage data";
745 case coveragemap_error::malformed:
746 return "Malformed coverage data";
747 }
748 llvm_unreachable("A value of coveragemap_error has no message.")::llvm::llvm_unreachable_internal("A value of coveragemap_error has no message."
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/ProfileData/Coverage/CoverageMapping.cpp"
, 748)
;
749}
750
751namespace {
752
753// FIXME: This class is only here to support the transition to llvm::Error. It
754// will be removed once this transition is complete. Clients should prefer to
755// deal with the Error value directly, rather than converting to error_code.
756class CoverageMappingErrorCategoryType : public std::error_category {
757 const char *name() const noexcept override { return "llvm.coveragemap"; }
758 std::string message(int IE) const override {
759 return getCoverageMapErrString(static_cast<coveragemap_error>(IE));
760 }
761};
762
763} // end anonymous namespace
764
765std::string CoverageMapError::message() const {
766 return getCoverageMapErrString(Err);
767}
768
769static ManagedStatic<CoverageMappingErrorCategoryType> ErrorCategory;
770
771const std::error_category &llvm::coverage::coveragemap_category() {
772 return *ErrorCategory;
773}
774
775char CoverageMapError::ID = 0;

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h

1//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the SmallBitVector class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_SMALLBITVECTOR_H
15#define LLVM_ADT_SMALLBITVECTOR_H
16
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/Support/MathExtras.h"
20#include <algorithm>
21#include <cassert>
22#include <climits>
23#include <cstddef>
24#include <cstdint>
25#include <limits>
26#include <utility>
27
28namespace llvm {
29
30/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
31/// the case when the array is small. It contains one pointer-sized field, which
32/// is directly used as a plain collection of bits when possible, or as a
33/// pointer to a larger heap-allocated array when necessary. This allows normal
34/// "small" cases to be fast without losing generality for large inputs.
35class SmallBitVector {
36 // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
37 // unnecessary level of indirection. It would be more efficient to use a
38 // pointer to memory containing size, allocation size, and the array of bits.
39 uintptr_t X = 1;
40
41 enum {
42 // The number of bits in this class.
43 NumBaseBits = sizeof(uintptr_t) * CHAR_BIT8,
44
45 // One bit is used to discriminate between small and large mode. The
46 // remaining bits are used for the small-mode representation.
47 SmallNumRawBits = NumBaseBits - 1,
48
49 // A few more bits are used to store the size of the bit set in small mode.
50 // Theoretically this is a ceil-log2. These bits are encoded in the most
51 // significant bits of the raw bits.
52 SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
53 NumBaseBits == 64 ? 6 :
54 SmallNumRawBits),
55
56 // The remaining bits are used to store the actual set in small mode.
57 SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
58 };
59
60 static_assert(NumBaseBits == 64 || NumBaseBits == 32,
61 "Unsupported word size");
62
63public:
64 using size_type = unsigned;
65
66 // Encapsulation of a single bit.
67 class reference {
68 SmallBitVector &TheVector;
69 unsigned BitPos;
70
71 public:
72 reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
73
74 reference(const reference&) = default;
75
76 reference& operator=(reference t) {
77 *this = bool(t);
78 return *this;
79 }
80
81 reference& operator=(bool t) {
82 if (t)
83 TheVector.set(BitPos);
84 else
85 TheVector.reset(BitPos);
86 return *this;
87 }
88
89 operator bool() const {
90 return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
91 }
92 };
93
94private:
95 bool isSmall() const {
96 return X & uintptr_t(1);
97 }
98
99 BitVector *getPointer() const {
100 assert(!isSmall())(static_cast <bool> (!isSmall()) ? void (0) : __assert_fail
("!isSmall()", "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 100, __extension__ __PRETTY_FUNCTION__))
;
101 return reinterpret_cast<BitVector *>(X);
102 }
103
104 void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
105 X = 1;
106 setSmallSize(NewSize);
107 setSmallBits(NewSmallBits);
108 }
109
110 void switchToLarge(BitVector *BV) {
111 X = reinterpret_cast<uintptr_t>(BV);
112 assert(!isSmall() && "Tried to use an unaligned pointer")(static_cast <bool> (!isSmall() && "Tried to use an unaligned pointer"
) ? void (0) : __assert_fail ("!isSmall() && \"Tried to use an unaligned pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 112, __extension__ __PRETTY_FUNCTION__))
;
113 }
114
115 // Return all the bits used for the "small" representation; this includes
116 // bits for the size as well as the element bits.
117 uintptr_t getSmallRawBits() const {
118 assert(isSmall())(static_cast <bool> (isSmall()) ? void (0) : __assert_fail
("isSmall()", "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119 return X >> 1;
120 }
121
122 void setSmallRawBits(uintptr_t NewRawBits) {
123 assert(isSmall())(static_cast <bool> (isSmall()) ? void (0) : __assert_fail
("isSmall()", "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 123, __extension__ __PRETTY_FUNCTION__))
;
124 X = (NewRawBits << 1) | uintptr_t(1);
125 }
126
127 // Return the size.
128 size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; }
129
130 void setSmallSize(size_t Size) {
131 setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
132 }
133
134 // Return the element bits.
135 uintptr_t getSmallBits() const {
136 return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
137 }
138
139 void setSmallBits(uintptr_t NewBits) {
140 setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
141 (getSmallSize() << SmallNumDataBits));
142 }
143
144public:
145 /// Creates an empty bitvector.
146 SmallBitVector() = default;
147
148 /// Creates a bitvector of specified number of bits. All bits are initialized
149 /// to the specified value.
150 explicit SmallBitVector(unsigned s, bool t = false) {
151 if (s <= SmallNumDataBits)
3
Assuming 's' is > SmallNumDataBits
4
Taking false branch
152 switchToSmall(t ? ~uintptr_t(0) : 0, s);
153 else
154 switchToLarge(new BitVector(s, t));
5
Memory is allocated
155 }
156
157 /// SmallBitVector copy ctor.
158 SmallBitVector(const SmallBitVector &RHS) {
159 if (RHS.isSmall())
160 X = RHS.X;
161 else
162 switchToLarge(new BitVector(*RHS.getPointer()));
163 }
164
165 SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
166 RHS.X = 1;
167 }
168
169 ~SmallBitVector() {
170 if (!isSmall())
171 delete getPointer();
172 }
173
174 using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
175 using set_iterator = const_set_bits_iterator;
176
177 const_set_bits_iterator set_bits_begin() const {
178 return const_set_bits_iterator(*this);
179 }
180
181 const_set_bits_iterator set_bits_end() const {
182 return const_set_bits_iterator(*this, -1);
183 }
184
185 iterator_range<const_set_bits_iterator> set_bits() const {
186 return make_range(set_bits_begin(), set_bits_end());
187 }
188
189 /// Tests whether there are no bits in this bitvector.
190 bool empty() const {
191 return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
192 }
193
194 /// Returns the number of bits in this bitvector.
195 size_t size() const {
196 return isSmall() ? getSmallSize() : getPointer()->size();
197 }
198
199 /// Returns the number of bits which are set.
200 size_type count() const {
201 if (isSmall()) {
202 uintptr_t Bits = getSmallBits();
203 return countPopulation(Bits);
204 }
205 return getPointer()->count();
206 }
207
208 /// Returns true if any bit is set.
209 bool any() const {
210 if (isSmall())
211 return getSmallBits() != 0;
212 return getPointer()->any();
213 }
214
215 /// Returns true if all bits are set.
216 bool all() const {
217 if (isSmall())
218 return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
219 return getPointer()->all();
220 }
221
222 /// Returns true if none of the bits are set.
223 bool none() const {
224 if (isSmall())
225 return getSmallBits() == 0;
226 return getPointer()->none();
227 }
228
229 /// Returns the index of the first set bit, -1 if none of the bits are set.
230 int find_first() const {
231 if (isSmall()) {
232 uintptr_t Bits = getSmallBits();
233 if (Bits == 0)
234 return -1;
235 return countTrailingZeros(Bits);
236 }
237 return getPointer()->find_first();
238 }
239
240 int find_last() const {
241 if (isSmall()) {
242 uintptr_t Bits = getSmallBits();
243 if (Bits == 0)
244 return -1;
245 return NumBaseBits - countLeadingZeros(Bits);
246 }
247 return getPointer()->find_last();
248 }
249
250 /// Returns the index of the first unset bit, -1 if all of the bits are set.
251 int find_first_unset() const {
252 if (isSmall()) {
253 if (count() == getSmallSize())
254 return -1;
255
256 uintptr_t Bits = getSmallBits();
257 return countTrailingOnes(Bits);
258 }
259 return getPointer()->find_first_unset();
260 }
261
262 int find_last_unset() const {
263 if (isSmall()) {
264 if (count() == getSmallSize())
265 return -1;
266
267 uintptr_t Bits = getSmallBits();
268 return NumBaseBits - countLeadingOnes(Bits);
269 }
270 return getPointer()->find_last_unset();
271 }
272
273 /// Returns the index of the next set bit following the "Prev" bit.
274 /// Returns -1 if the next set bit is not found.
275 int find_next(unsigned Prev) const {
276 if (isSmall()) {
277 uintptr_t Bits = getSmallBits();
278 // Mask off previous bits.
279 Bits &= ~uintptr_t(0) << (Prev + 1);
280 if (Bits == 0 || Prev + 1 >= getSmallSize())
281 return -1;
282 return countTrailingZeros(Bits);
283 }
284 return getPointer()->find_next(Prev);
285 }
286
287 /// Returns the index of the next unset bit following the "Prev" bit.
288 /// Returns -1 if the next unset bit is not found.
289 int find_next_unset(unsigned Prev) const {
290 if (isSmall()) {
291 ++Prev;
292 uintptr_t Bits = getSmallBits();
293 // Mask in previous bits.
294 uintptr_t Mask = (1 << Prev) - 1;
295 Bits |= Mask;
296
297 if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
298 return -1;
299 return countTrailingOnes(Bits);
300 }
301 return getPointer()->find_next_unset(Prev);
302 }
303
304 /// find_prev - Returns the index of the first set bit that precedes the
305 /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
306 int find_prev(unsigned PriorTo) const {
307 if (isSmall()) {
308 if (PriorTo == 0)
309 return -1;
310
311 --PriorTo;
312 uintptr_t Bits = getSmallBits();
313 Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
314 if (Bits == 0)
315 return -1;
316
317 return NumBaseBits - countLeadingZeros(Bits) - 1;
318 }
319 return getPointer()->find_prev(PriorTo);
320 }
321
322 /// Clear all bits.
323 void clear() {
324 if (!isSmall())
325 delete getPointer();
326 switchToSmall(0, 0);
327 }
328
329 /// Grow or shrink the bitvector.
330 void resize(unsigned N, bool t = false) {
331 if (!isSmall()) {
332 getPointer()->resize(N, t);
333 } else if (SmallNumDataBits >= N) {
334 uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
335 setSmallSize(N);
336 setSmallBits(NewBits | getSmallBits());
337 } else {
338 BitVector *BV = new BitVector(N, t);
339 uintptr_t OldBits = getSmallBits();
340 for (size_t i = 0, e = getSmallSize(); i != e; ++i)
341 (*BV)[i] = (OldBits >> i) & 1;
342 switchToLarge(BV);
343 }
344 }
345
346 void reserve(unsigned N) {
347 if (isSmall()) {
348 if (N > SmallNumDataBits) {
349 uintptr_t OldBits = getSmallRawBits();
350 size_t SmallSize = getSmallSize();
351 BitVector *BV = new BitVector(SmallSize);
352 for (size_t i = 0; i < SmallSize; ++i)
353 if ((OldBits >> i) & 1)
354 BV->set(i);
355 BV->reserve(N);
356 switchToLarge(BV);
357 }
358 } else {
359 getPointer()->reserve(N);
360 }
361 }
362
363 // Set, reset, flip
364 SmallBitVector &set() {
365 if (isSmall())
366 setSmallBits(~uintptr_t(0));
367 else
368 getPointer()->set();
369 return *this;
370 }
371
372 SmallBitVector &set(unsigned Idx) {
373 if (isSmall()) {
374 assert(Idx <= static_cast<unsigned>((static_cast <bool> (Idx <= static_cast<unsigned>
( std::numeric_limits<uintptr_t>::digits) && "undefined behavior"
) ? void (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 376, __extension__ __PRETTY_FUNCTION__))
375 std::numeric_limits<uintptr_t>::digits) &&(static_cast <bool> (Idx <= static_cast<unsigned>
( std::numeric_limits<uintptr_t>::digits) && "undefined behavior"
) ? void (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 376, __extension__ __PRETTY_FUNCTION__))
376 "undefined behavior")(static_cast <bool> (Idx <= static_cast<unsigned>
( std::numeric_limits<uintptr_t>::digits) && "undefined behavior"
) ? void (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 376, __extension__ __PRETTY_FUNCTION__))
;
377 setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
378 }
379 else
380 getPointer()->set(Idx);
381 return *this;
382 }
383
384 /// Efficiently set a range of bits in [I, E)
385 SmallBitVector &set(unsigned I, unsigned E) {
386 assert(I <= E && "Attempted to set backwards range!")(static_cast <bool> (I <= E && "Attempted to set backwards range!"
) ? void (0) : __assert_fail ("I <= E && \"Attempted to set backwards range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387 assert(E <= size() && "Attempted to set out-of-bounds range!")(static_cast <bool> (E <= size() && "Attempted to set out-of-bounds range!"
) ? void (0) : __assert_fail ("E <= size() && \"Attempted to set out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 387, __extension__ __PRETTY_FUNCTION__))
;
388 if (I == E) return *this;
389 if (isSmall()) {
390 uintptr_t EMask = ((uintptr_t)1) << E;
391 uintptr_t IMask = ((uintptr_t)1) << I;
392 uintptr_t Mask = EMask - IMask;
393 setSmallBits(getSmallBits() | Mask);
394 } else
395 getPointer()->set(I, E);
396 return *this;
397 }
398
399 SmallBitVector &reset() {
400 if (isSmall())
401 setSmallBits(0);
402 else
403 getPointer()->reset();
404 return *this;
405 }
406
407 SmallBitVector &reset(unsigned Idx) {
408 if (isSmall())
409 setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
410 else
411 getPointer()->reset(Idx);
412 return *this;
413 }
414
415 /// Efficiently reset a range of bits in [I, E)
416 SmallBitVector &reset(unsigned I, unsigned E) {
417 assert(I <= E && "Attempted to reset backwards range!")(static_cast <bool> (I <= E && "Attempted to reset backwards range!"
) ? void (0) : __assert_fail ("I <= E && \"Attempted to reset backwards range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 417, __extension__ __PRETTY_FUNCTION__))
;
418 assert(E <= size() && "Attempted to reset out-of-bounds range!")(static_cast <bool> (E <= size() && "Attempted to reset out-of-bounds range!"
) ? void (0) : __assert_fail ("E <= size() && \"Attempted to reset out-of-bounds range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 418, __extension__ __PRETTY_FUNCTION__))
;
419 if (I == E) return *this;
420 if (isSmall()) {
421 uintptr_t EMask = ((uintptr_t)1) << E;
422 uintptr_t IMask = ((uintptr_t)1) << I;
423 uintptr_t Mask = EMask - IMask;
424 setSmallBits(getSmallBits() & ~Mask);
425 } else
426 getPointer()->reset(I, E);
427 return *this;
428 }
429
430 SmallBitVector &flip() {
431 if (isSmall())
432 setSmallBits(~getSmallBits());
433 else
434 getPointer()->flip();
435 return *this;
436 }
437
438 SmallBitVector &flip(unsigned Idx) {
439 if (isSmall())
440 setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
441 else
442 getPointer()->flip(Idx);
443 return *this;
444 }
445
446 // No argument flip.
447 SmallBitVector operator~() const {
448 return SmallBitVector(*this).flip();
449 }
450
451 // Indexing.
452 reference operator[](unsigned Idx) {
453 assert(Idx < size() && "Out-of-bounds Bit access.")(static_cast <bool> (Idx < size() && "Out-of-bounds Bit access."
) ? void (0) : __assert_fail ("Idx < size() && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 453, __extension__ __PRETTY_FUNCTION__))
;
454 return reference(*this, Idx);
455 }
456
457 bool operator[](unsigned Idx) const {
458 assert(Idx < size() && "Out-of-bounds Bit access.")(static_cast <bool> (Idx < size() && "Out-of-bounds Bit access."
) ? void (0) : __assert_fail ("Idx < size() && \"Out-of-bounds Bit access.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 458, __extension__ __PRETTY_FUNCTION__))
;
459 if (isSmall())
460 return ((getSmallBits() >> Idx) & 1) != 0;
461 return getPointer()->operator[](Idx);
462 }
463
464 bool test(unsigned Idx) const {
465 return (*this)[Idx];
466 }
467
468 /// Test if any common bits are set.
469 bool anyCommon(const SmallBitVector &RHS) const {
470 if (isSmall() && RHS.isSmall())
471 return (getSmallBits() & RHS.getSmallBits()) != 0;
472 if (!isSmall() && !RHS.isSmall())
473 return getPointer()->anyCommon(*RHS.getPointer());
474
475 for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
476 if (test(i) && RHS.test(i))
477 return true;
478 return false;
479 }
480
481 // Comparison operators.
482 bool operator==(const SmallBitVector &RHS) const {
483 if (size() != RHS.size())
484 return false;
485 if (isSmall())
486 return getSmallBits() == RHS.getSmallBits();
487 else
488 return *getPointer() == *RHS.getPointer();
489 }
490
491 bool operator!=(const SmallBitVector &RHS) const {
492 return !(*this == RHS);
493 }
494
495 // Intersection, union, disjoint union.
496 SmallBitVector &operator&=(const SmallBitVector &RHS) {
497 resize(std::max(size(), RHS.size()));
498 if (isSmall())
499 setSmallBits(getSmallBits() & RHS.getSmallBits());
500 else if (!RHS.isSmall())
501 getPointer()->operator&=(*RHS.getPointer());
502 else {
503 SmallBitVector Copy = RHS;
504 Copy.resize(size());
505 getPointer()->operator&=(*Copy.getPointer());
506 }
507 return *this;
508 }
509
510 /// Reset bits that are set in RHS. Same as *this &= ~RHS.
511 SmallBitVector &reset(const SmallBitVector &RHS) {
512 if (isSmall() && RHS.isSmall())
513 setSmallBits(getSmallBits() & ~RHS.getSmallBits());
514 else if (!isSmall() && !RHS.isSmall())
515 getPointer()->reset(*RHS.getPointer());
516 else
517 for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
518 if (RHS.test(i))
519 reset(i);
520
521 return *this;
522 }
523
524 /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
525 bool test(const SmallBitVector &RHS) const {
526 if (isSmall() && RHS.isSmall())
527 return (getSmallBits() & ~RHS.getSmallBits()) != 0;
528 if (!isSmall() && !RHS.isSmall())
529 return getPointer()->test(*RHS.getPointer());
530
531 unsigned i, e;
532 for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
533 if (test(i) && !RHS.test(i))
534 return true;
535
536 for (e = size(); i != e; ++i)
537 if (test(i))
538 return true;
539
540 return false;
541 }
542
543 SmallBitVector &operator|=(const SmallBitVector &RHS) {
544 resize(std::max(size(), RHS.size()));
545 if (isSmall())
546 setSmallBits(getSmallBits() | RHS.getSmallBits());
547 else if (!RHS.isSmall())
548 getPointer()->operator|=(*RHS.getPointer());
549 else {
550 SmallBitVector Copy = RHS;
551 Copy.resize(size());
552 getPointer()->operator|=(*Copy.getPointer());
553 }
554 return *this;
555 }
556
557 SmallBitVector &operator^=(const SmallBitVector &RHS) {
558 resize(std::max(size(), RHS.size()));
559 if (isSmall())
560 setSmallBits(getSmallBits() ^ RHS.getSmallBits());
561 else if (!RHS.isSmall())
562 getPointer()->operator^=(*RHS.getPointer());
563 else {
564 SmallBitVector Copy = RHS;
565 Copy.resize(size());
566 getPointer()->operator^=(*Copy.getPointer());
567 }
568 return *this;
569 }
570
571 SmallBitVector &operator<<=(unsigned N) {
572 if (isSmall())
573 setSmallBits(getSmallBits() << N);
574 else
575 getPointer()->operator<<=(N);
576 return *this;
577 }
578
579 SmallBitVector &operator>>=(unsigned N) {
580 if (isSmall())
581 setSmallBits(getSmallBits() >> N);
582 else
583 getPointer()->operator>>=(N);
584 return *this;
585 }
586
587 // Assignment operator.
588 const SmallBitVector &operator=(const SmallBitVector &RHS) {
589 if (isSmall()) {
590 if (RHS.isSmall())
591 X = RHS.X;
592 else
593 switchToLarge(new BitVector(*RHS.getPointer()));
594 } else {
595 if (!RHS.isSmall())
596 *getPointer() = *RHS.getPointer();
597 else {
598 delete getPointer();
599 X = RHS.X;
600 }
601 }
602 return *this;
603 }
604
605 const SmallBitVector &operator=(SmallBitVector &&RHS) {
606 if (this != &RHS) {
607 clear();
608 swap(RHS);
609 }
610 return *this;
611 }
612
613 void swap(SmallBitVector &RHS) {
614 std::swap(X, RHS.X);
615 }
616
617 /// Add '1' bits from Mask to this vector. Don't resize.
618 /// This computes "*this |= Mask".
619 void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
620 if (isSmall())
621 applyMask<true, false>(Mask, MaskWords);
622 else
623 getPointer()->setBitsInMask(Mask, MaskWords);
624 }
625
626 /// Clear any bits in this vector that are set in Mask. Don't resize.
627 /// This computes "*this &= ~Mask".
628 void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
629 if (isSmall())
630 applyMask<false, false>(Mask, MaskWords);
631 else
632 getPointer()->clearBitsInMask(Mask, MaskWords);
633 }
634
635 /// Add a bit to this vector for every '0' bit in Mask. Don't resize.
636 /// This computes "*this |= ~Mask".
637 void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
638 if (isSmall())
639 applyMask<true, true>(Mask, MaskWords);
640 else
641 getPointer()->setBitsNotInMask(Mask, MaskWords);
642 }
643
644 /// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
645 /// This computes "*this &= Mask".
646 void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
647 if (isSmall())
648 applyMask<false, true>(Mask, MaskWords);
649 else
650 getPointer()->clearBitsNotInMask(Mask, MaskWords);
651 }
652
653private:
654 template <bool AddBits, bool InvertMask>
655 void applyMask(const uint32_t *Mask, unsigned MaskWords) {
656 assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!")(static_cast <bool> (MaskWords <= sizeof(uintptr_t) &&
"Mask is larger than base!") ? void (0) : __assert_fail ("MaskWords <= sizeof(uintptr_t) && \"Mask is larger than base!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/ADT/SmallBitVector.h"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 uintptr_t M = Mask[0];
658 if (NumBaseBits == 64)
659 M |= uint64_t(Mask[1]) << 32;
660 if (InvertMask)
661 M = ~M;
662 if (AddBits)
663 setSmallBits(getSmallBits() | M);
664 else
665 setSmallBits(getSmallBits() & ~M);
666 }
667};
668
669inline SmallBitVector
670operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
671 SmallBitVector Result(LHS);
672 Result &= RHS;
673 return Result;
674}
675
676inline SmallBitVector
677operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
678 SmallBitVector Result(LHS);
679 Result |= RHS;
680 return Result;
681}
682
683inline SmallBitVector
684operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
685 SmallBitVector Result(LHS);
686 Result ^= RHS;
687 return Result;
688}
689
690} // end namespace llvm
691
692namespace std {
693
694/// Implement std::swap in terms of BitVector swap.
695inline void
696swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
697 LHS.swap(RHS);
698}
699
700} // end namespace std
701
702#endif // LLVM_ADT_SMALLBITVECTOR_H