Bug Summary

File:compiler-rt/lib/lsan/lsan_common.cpp
Warning:line 767, column 3
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name lsan_common.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I projects/compiler-rt/lib/lsan -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan -I include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-format -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-10-160236-22541-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp

1//=-- lsan_common.cpp -----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of LeakSanitizer.
10// Implementation of common leak checking functionality.
11//
12//===----------------------------------------------------------------------===//
13
14#include "lsan_common.h"
15
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_flag_parser.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_procmaps.h"
21#include "sanitizer_common/sanitizer_report_decorator.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_suppressions.h"
25#include "sanitizer_common/sanitizer_thread_registry.h"
26#include "sanitizer_common/sanitizer_tls_get_addr.h"
27
28#if CAN_SANITIZE_LEAKS1
29namespace __lsan {
30
31// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32// also to protect the global list of root regions.
33Mutex global_mutex;
34
35Flags lsan_flags;
36
37
38void DisableCounterUnderflow() {
39 if (common_flags()->detect_leaks) {
40 Report("Unmatched call to __lsan_enable().\n");
41 Die();
42 }
43}
44
45void Flags::SetDefaults() {
46#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
47#include "lsan_flags.inc"
48#undef LSAN_FLAG
49}
50
51void RegisterLsanFlags(FlagParser *parser, Flags *f) {
52#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
53 RegisterFlag(parser, #Name, Description, &f->Name);
54#include "lsan_flags.inc"
55#undef LSAN_FLAG
56}
57
58#define LOG_POINTERS(...)do { if (flags()->log_pointers) Report(...); } while (0) \
59 do { \
60 if (flags()->log_pointers) Report(__VA_ARGS__); \
61 } while (0)
62
63#define LOG_THREADS(...)do { if (flags()->log_threads) Report(...); } while (0) \
64 do { \
65 if (flags()->log_threads) Report(__VA_ARGS__); \
66 } while (0)
67
68class LeakSuppressionContext {
69 bool parsed = false;
70 SuppressionContext context;
71 bool suppressed_stacks_sorted = true;
72 InternalMmapVector<u32> suppressed_stacks;
73
74 Suppression *GetSuppressionForAddr(uptr addr);
75 void LazyInit();
76
77 public:
78 LeakSuppressionContext(const char *supprression_types[],
79 int suppression_types_num)
80 : context(supprression_types, suppression_types_num) {}
81
82 Suppression *GetSuppressionForStack(u32 stack_trace_id,
83 const StackTrace &stack);
84
85 const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
86 if (!suppressed_stacks_sorted) {
87 suppressed_stacks_sorted = true;
88 SortAndDedup(suppressed_stacks);
89 }
90 return suppressed_stacks;
91 }
92 void PrintMatchedSuppressions();
93};
94
95ALIGNED(64)__attribute__((aligned(64))) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
96static LeakSuppressionContext *suppression_ctx = nullptr;
97static const char kSuppressionLeak[] = "leak";
98static const char *kSuppressionTypes[] = { kSuppressionLeak };
99static const char kStdSuppressions[] =
100#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT0
101 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
102 // definition.
103 "leak:*pthread_exit*\n"
104#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
105#if SANITIZER_MAC0
106 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
107 "leak:*_os_trace*\n"
108#endif
109 // TLS leak in some glibc versions, described in
110 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
111 "leak:*tls_get_addr*\n";
112
113void InitializeSuppressions() {
114 CHECK_EQ(nullptr, suppression_ctx)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nullptr)); __sanitizer
::u64 v2 = (__sanitizer::u64)((suppression_ctx)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 114, "(" "(nullptr)" ") " "==" " (" "(suppression_ctx)" ")"
, v1, v2); } while (false)
;
115 suppression_ctx = new (suppression_placeholder)
116 LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)(sizeof(kSuppressionTypes)/sizeof((kSuppressionTypes)[0])));
117}
118
119void LeakSuppressionContext::LazyInit() {
120 if (!parsed) {
121 parsed = true;
122 context.ParseFromFile(flags()->suppressions);
123 if (&__lsan_default_suppressions)
124 context.Parse(__lsan_default_suppressions());
125 context.Parse(kStdSuppressions);
126 }
127}
128
129static LeakSuppressionContext *GetSuppressionContext() {
130 CHECK(suppression_ctx)do { __sanitizer::u64 v1 = (__sanitizer::u64)((suppression_ctx
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 130, "(" "(suppression_ctx)" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
131 return suppression_ctx;
132}
133
134static InternalMmapVector<RootRegion> *root_regions;
135
136InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
137
138void InitializeRootRegions() {
139 CHECK(!root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!root_regions)
); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 139, "(" "(!root_regions)" ") " "!=" " (" "0" ")", v1, v2);
} while (false)
;
140 ALIGNED(64)__attribute__((aligned(64))) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
141 root_regions = new (placeholder) InternalMmapVector<RootRegion>();
142}
143
144void InitCommonLsan() {
145 InitializeRootRegions();
146 if (common_flags()->detect_leaks) {
147 // Initialization which can fail or print warnings should only be done if
148 // LSan is actually enabled.
149 InitializeSuppressions();
150 InitializePlatformSpecificModules();
151 }
152}
153
154class Decorator: public __sanitizer::SanitizerCommonDecorator {
155 public:
156 Decorator() : SanitizerCommonDecorator() { }
157 const char *Error() { return Red(); }
158 const char *Leak() { return Blue(); }
159};
160
161static inline bool CanBeAHeapPointer(uptr p) {
162 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
163 // bound on heap addresses.
164 const uptr kMinAddress = 4 * 4096;
165 if (p < kMinAddress) return false;
166#if defined(__x86_64__1)
167 // Accept only canonical form user-space addresses.
168 return ((p >> 47) == 0);
169#elif defined(__mips64)
170 return ((p >> 40) == 0);
171#elif defined(__aarch64__)
172 unsigned runtimeVMA =
173 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()(__sanitizer::uptr) __builtin_frame_address(0)) + 1);
174 return ((p >> runtimeVMA) == 0);
175#else
176 return true;
177#endif
178}
179
180// Scans the memory range, looking for byte patterns that point into allocator
181// chunks. Marks those chunks with |tag| and adds them to |frontier|.
182// There are two usage modes for this function: finding reachable chunks
183// (|tag| = kReachable) and finding indirectly leaked chunks
184// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
185// so |frontier| = 0.
186void ScanRangeForPointers(uptr begin, uptr end,
187 Frontier *frontier,
188 const char *region_type, ChunkTag tag) {
189 CHECK(tag == kReachable || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tag == kReachable
|| tag == kIndirectlyLeaked)); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 189, "(" "(tag == kReachable || tag == kIndirectlyLeaked)" ") "
"!=" " (" "0" ")", v1, v2); } while (false)
;
190 const uptr alignment = flags()->pointer_alignment();
191 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,do { if (flags()->log_pointers) Report("Scanning %s range %p-%p.\n"
, region_type, (void *)begin, (void *)end); } while (0)
192 (void *)end)do { if (flags()->log_pointers) Report("Scanning %s range %p-%p.\n"
, region_type, (void *)begin, (void *)end); } while (0)
;
193 uptr pp = begin;
194 if (pp % alignment)
195 pp = pp + alignment - pp % alignment;
196 for (; pp + sizeof(void *) <= end; pp += alignment) {
197 void *p = *reinterpret_cast<void **>(pp);
198 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
199 uptr chunk = PointsIntoChunk(p);
200 if (!chunk) continue;
201 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
202 if (chunk == begin) continue;
203 LsanMetadata m(chunk);
204 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
205
206 // Do this check relatively late so we can log only the interesting cases.
207 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
208 LOG_POINTERS(do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
209 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
210 "%zu.\n",do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
211 (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
212 m.requested_size())do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
;
213 continue;
214 }
215
216 m.set_tag(tag);
217 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n"
, (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
218 (void *)pp, p, (void *)chunk,do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n"
, (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
219 (void *)(chunk + m.requested_size()), m.requested_size())do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n"
, (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size
()), m.requested_size()); } while (0)
;
220 if (frontier)
221 frontier->push_back(chunk);
222 }
223}
224
225// Scans a global range for pointers
226void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
227 uptr allocator_begin = 0, allocator_end = 0;
228 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
229 if (begin <= allocator_begin && allocator_begin < end) {
230 CHECK_LE(allocator_begin, allocator_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator_begin
)); __sanitizer::u64 v2 = (__sanitizer::u64)((allocator_end))
; if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 230, "(" "(allocator_begin)" ") " "<=" " (" "(allocator_end)"
")", v1, v2); } while (false)
;
231 CHECK_LE(allocator_end, end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator_end)
); __sanitizer::u64 v2 = (__sanitizer::u64)((end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 231, "(" "(allocator_end)" ") " "<=" " (" "(end)" ")", v1
, v2); } while (false)
;
232 if (begin < allocator_begin)
233 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
234 kReachable);
235 if (allocator_end < end)
236 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
237 } else {
238 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
239 }
240}
241
242void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
243 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
244 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
245}
246
247#if SANITIZER_FUCHSIA0
248
249// Fuchsia handles all threads together with its own callback.
250static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
251
252#else
253
254#if SANITIZER_ANDROID0
255// FIXME: Move this out into *libcdep.cpp
256extern "C" SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) void __libc_iterate_dynamic_tls(
257 pid_t, void (*cb)(void *, void *, uptr, void *), void *);
258#endif
259
260static void ProcessThreadRegistry(Frontier *frontier) {
261 InternalMmapVector<uptr> ptrs;
262 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
263 GetAdditionalThreadContextPtrs, &ptrs);
264
265 for (uptr i = 0; i < ptrs.size(); ++i) {
266 void *ptr = reinterpret_cast<void *>(ptrs[i]);
267 uptr chunk = PointsIntoChunk(ptr);
268 if (!chunk)
269 continue;
270 LsanMetadata m(chunk);
271 if (!m.allocated())
272 continue;
273
274 // Mark as reachable and add to frontier.
275 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr)do { if (flags()->log_pointers) Report("Treating pointer %p from ThreadContext as reachable\n"
, ptr); } while (0)
;
276 m.set_tag(kReachable);
277 frontier->push_back(chunk);
278 }
279}
280
281// Scans thread data (stacks and TLS) for heap pointers.
282static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
283 Frontier *frontier) {
284 InternalMmapVector<uptr> registers;
285 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
286 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
287 LOG_THREADS("Processing thread %llu.\n", os_id)do { if (flags()->log_threads) Report("Processing thread %llu.\n"
, os_id); } while (0)
;
288 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
289 DTLS *dtls;
290 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
291 &tls_begin, &tls_end,
292 &cache_begin, &cache_end, &dtls);
293 if (!thread_found) {
294 // If a thread can't be found in the thread registry, it's probably in the
295 // process of destruction. Log this event and move on.
296 LOG_THREADS("Thread %llu not found in registry.\n", os_id)do { if (flags()->log_threads) Report("Thread %llu not found in registry.\n"
, os_id); } while (0)
;
297 continue;
298 }
299 uptr sp;
300 PtraceRegistersStatus have_registers =
301 suspended_threads.GetRegistersAndSP(i, &registers, &sp);
302 if (have_registers != REGISTERS_AVAILABLE) {
303 Report("Unable to get registers from thread %llu.\n", os_id);
304 // If unable to get SP, consider the entire stack to be reachable unless
305 // GetRegistersAndSP failed with ESRCH.
306 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
307 sp = stack_begin;
308 }
309
310 if (flags()->use_registers && have_registers) {
311 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
312 uptr registers_end =
313 reinterpret_cast<uptr>(registers.data() + registers.size());
314 ScanRangeForPointers(registers_begin, registers_end, frontier,
315 "REGISTERS", kReachable);
316 }
317
318 if (flags()->use_stacks) {
319 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,do { if (flags()->log_threads) Report("Stack at %p-%p (SP = %p).\n"
, (void *)stack_begin, (void *)stack_end, (void *)sp); } while
(0)
320 (void *)stack_end, (void *)sp)do { if (flags()->log_threads) Report("Stack at %p-%p (SP = %p).\n"
, (void *)stack_begin, (void *)stack_end, (void *)sp); } while
(0)
;
321 if (sp < stack_begin || sp >= stack_end) {
322 // SP is outside the recorded stack range (e.g. the thread is running a
323 // signal handler on alternate stack, or swapcontext was used).
324 // Again, consider the entire stack range to be reachable.
325 LOG_THREADS("WARNING: stack pointer not in stack range.\n")do { if (flags()->log_threads) Report("WARNING: stack pointer not in stack range.\n"
); } while (0)
;
326 uptr page_size = GetPageSizeCached();
327 int skipped = 0;
328 while (stack_begin < stack_end &&
329 !IsAccessibleMemoryRange(stack_begin, 1)) {
330 skipped++;
331 stack_begin += page_size;
332 }
333 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n"
, skipped, (void *)stack_begin, (void *)stack_end); } while (
0)
334 skipped, (void *)stack_begin, (void *)stack_end)do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n"
, skipped, (void *)stack_begin, (void *)stack_end); } while (
0)
;
335 } else {
336 // Shrink the stack range to ignore out-of-scope values.
337 stack_begin = sp;
338 }
339 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
340 kReachable);
341 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
342 }
343
344 if (flags()->use_tls) {
345 if (tls_begin) {
346 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end)do { if (flags()->log_threads) Report("TLS at %p-%p.\n", (
void *)tls_begin, (void *)tls_end); } while (0)
;
347 // If the tls and cache ranges don't overlap, scan full tls range,
348 // otherwise, only scan the non-overlapping portions
349 if (cache_begin == cache_end || tls_end < cache_begin ||
350 tls_begin > cache_end) {
351 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
352 } else {
353 if (tls_begin < cache_begin)
354 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
355 kReachable);
356 if (tls_end > cache_end)
357 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
358 kReachable);
359 }
360 }
361#if SANITIZER_ANDROID0
362 auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
363 void *arg) -> void {
364 ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
365 reinterpret_cast<uptr>(dtls_end),
366 reinterpret_cast<Frontier *>(arg), "DTLS",
367 kReachable);
368 };
369
370 // FIXME: There might be a race-condition here (and in Bionic) if the
371 // thread is suspended in the middle of updating its DTLS. IOWs, we
372 // could scan already freed memory. (probably fine for now)
373 __libc_iterate_dynamic_tls(os_id, cb, frontier);
374#else
375 if (dtls && !DTLSInDestruction(dtls)) {
376 ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
377 uptr dtls_beg = dtv.beg;
378 uptr dtls_end = dtls_beg + dtv.size;
379 if (dtls_beg < dtls_end) {
380 LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,do { if (flags()->log_threads) Report("DTLS %d at %p-%p.\n"
, id, (void *)dtls_beg, (void *)dtls_end); } while (0)
381 (void *)dtls_end)do { if (flags()->log_threads) Report("DTLS %d at %p-%p.\n"
, id, (void *)dtls_beg, (void *)dtls_end); } while (0)
;
382 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
383 kReachable);
384 }
385 });
386 } else {
387 // We are handling a thread with DTLS under destruction. Log about
388 // this and continue.
389 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id)do { if (flags()->log_threads) Report("Thread %llu has DTLS under destruction.\n"
, os_id); } while (0)
;
390 }
391#endif
392 }
393 }
394
395 // Add pointers reachable from ThreadContexts
396 ProcessThreadRegistry(frontier);
397}
398
399#endif // SANITIZER_FUCHSIA
400
401void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
402 uptr region_begin, uptr region_end, bool is_readable) {
403 uptr intersection_begin = Max(root_region.begin, region_begin);
404 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
405 if (intersection_begin >= intersection_end) return;
406 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, (void *)root_region.begin, (void *)(root_region.begin + root_region
.size), (void *)region_begin, (void *)region_end, is_readable
? "readable" : "unreadable"); } while (0)
407 (void *)root_region.begin,do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, (void *)root_region.begin, (void *)(root_region.begin + root_region
.size), (void *)region_begin, (void *)region_end, is_readable
? "readable" : "unreadable"); } while (0)
408 (void *)(root_region.begin + root_region.size),do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, (void *)root_region.begin, (void *)(root_region.begin + root_region
.size), (void *)region_begin, (void *)region_end, is_readable
? "readable" : "unreadable"); } while (0)
409 (void *)region_begin, (void *)region_end,do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, (void *)root_region.begin, (void *)(root_region.begin + root_region
.size), (void *)region_begin, (void *)region_end, is_readable
? "readable" : "unreadable"); } while (0)
410 is_readable ? "readable" : "unreadable")do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, (void *)root_region.begin, (void *)(root_region.begin + root_region
.size), (void *)region_begin, (void *)region_end, is_readable
? "readable" : "unreadable"); } while (0)
;
411 if (is_readable)
412 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
413 kReachable);
414}
415
416static void ProcessRootRegion(Frontier *frontier,
417 const RootRegion &root_region) {
418 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
419 MemoryMappedSegment segment;
420 while (proc_maps.Next(&segment)) {
421 ScanRootRegion(frontier, root_region, segment.start, segment.end,
422 segment.IsReadable());
423 }
424}
425
426// Scans root regions for heap pointers.
427static void ProcessRootRegions(Frontier *frontier) {
428 if (!flags()->use_root_regions) return;
429 CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions))
; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 429, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
430 for (uptr i = 0; i < root_regions->size(); i++) {
431 ProcessRootRegion(frontier, (*root_regions)[i]);
432 }
433}
434
435static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
436 while (frontier->size()) {
437 uptr next_chunk = frontier->back();
438 frontier->pop_back();
439 LsanMetadata m(next_chunk);
440 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
441 "HEAP", tag);
442 }
443}
444
445// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
446// which are reachable from it as indirectly leaked.
447static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
448 chunk = GetUserBegin(chunk);
449 LsanMetadata m(chunk);
450 if (m.allocated() && m.tag() != kReachable) {
451 ScanRangeForPointers(chunk, chunk + m.requested_size(),
452 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
453 }
454}
455
456static void IgnoredSuppressedCb(uptr chunk, void *arg) {
457 CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 457, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
458 chunk = GetUserBegin(chunk);
459 LsanMetadata m(chunk);
460 if (!m.allocated() || m.tag() == kIgnored)
461 return;
462
463 const InternalMmapVector<u32> &suppressed =
464 *static_cast<const InternalMmapVector<u32> *>(arg);
465 uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
466 if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
467 return;
468
469 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,do { if (flags()->log_pointers) Report("Suppressed: chunk %p-%p of size %zu.\n"
, (void *)chunk, (void *)(chunk + m.requested_size()), m.requested_size
()); } while (0)
470 (void *)(chunk + m.requested_size()), m.requested_size())do { if (flags()->log_pointers) Report("Suppressed: chunk %p-%p of size %zu.\n"
, (void *)chunk, (void *)(chunk + m.requested_size()), m.requested_size
()); } while (0)
;
471 m.set_tag(kIgnored);
472}
473
474// ForEachChunk callback. If chunk is marked as ignored, adds its address to
475// frontier.
476static void CollectIgnoredCb(uptr chunk, void *arg) {
477 CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 477, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
478 chunk = GetUserBegin(chunk);
479 LsanMetadata m(chunk);
480 if (m.allocated() && m.tag() == kIgnored) {
481 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n"
, (void *)chunk, (void *)(chunk + m.requested_size()), m.requested_size
()); } while (0)
482 (void *)(chunk + m.requested_size()), m.requested_size())do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n"
, (void *)chunk, (void *)(chunk + m.requested_size()), m.requested_size
()); } while (0)
;
483 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
484 }
485}
486
487static uptr GetCallerPC(const StackTrace &stack) {
488 // The top frame is our malloc/calloc/etc. The next frame is the caller.
489 if (stack.size >= 2)
490 return stack.trace[1];
491 return 0;
492}
493
494struct InvalidPCParam {
495 Frontier *frontier;
496 bool skip_linker_allocations;
497};
498
499// ForEachChunk callback. If the caller pc is invalid or is within the linker,
500// mark as reachable. Called by ProcessPlatformSpecificAllocations.
501static void MarkInvalidPCCb(uptr chunk, void *arg) {
502 CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 502, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
503 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
504 chunk = GetUserBegin(chunk);
505 LsanMetadata m(chunk);
506 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
507 u32 stack_id = m.stack_trace_id();
508 uptr caller_pc = 0;
509 if (stack_id > 0)
510 caller_pc = GetCallerPC(StackDepotGet(stack_id));
511 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
512 // it as reachable, as we can't properly report its allocation stack anyway.
513 if (caller_pc == 0 || (param->skip_linker_allocations &&
514 GetLinker()->containsAddress(caller_pc))) {
515 m.set_tag(kReachable);
516 param->frontier->push_back(chunk);
517 }
518 }
519}
520
521// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
522// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
523// modules accounting etc.
524// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
525// They are allocated with a __libc_memalign() call in allocate_and_init()
526// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
527// blocks, but we can make sure they come from our own allocator by intercepting
528// __libc_memalign(). On top of that, there is no easy way to reach them. Their
529// addresses are stored in a dynamically allocated array (the DTV) which is
530// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
531// being reachable from the static TLS, and the dynamic TLS being reachable from
532// the DTV. This is because the initial DTV is allocated before our interception
533// mechanism kicks in, and thus we don't recognize it as allocated memory. We
534// can't special-case it either, since we don't know its size.
535// Our solution is to include in the root set all allocations made from
536// ld-linux.so (which is where allocate_and_init() is implemented). This is
537// guaranteed to include all dynamic TLS blocks (and possibly other allocations
538// which we don't care about).
539// On all other platforms, this simply checks to ensure that the caller pc is
540// valid before reporting chunks as leaked.
541static void ProcessPC(Frontier *frontier) {
542 InvalidPCParam arg;
543 arg.frontier = frontier;
544 arg.skip_linker_allocations =
545 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
546 ForEachChunk(MarkInvalidPCCb, &arg);
547}
548
549// Sets the appropriate tag on each chunk.
550static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
551 Frontier *frontier) {
552 const InternalMmapVector<u32> &suppressed_stacks =
553 GetSuppressionContext()->GetSortedSuppressedStacks();
554 if (!suppressed_stacks.empty()) {
555 ForEachChunk(IgnoredSuppressedCb,
556 const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
557 }
558 ForEachChunk(CollectIgnoredCb, frontier);
559 ProcessGlobalRegions(frontier);
560 ProcessThreads(suspended_threads, frontier);
561 ProcessRootRegions(frontier);
562 FloodFillTag(frontier, kReachable);
563
564 CHECK_EQ(0, frontier->size())do { __sanitizer::u64 v1 = (__sanitizer::u64)((0)); __sanitizer
::u64 v2 = (__sanitizer::u64)((frontier->size())); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 564, "(" "(0)" ") " "==" " (" "(frontier->size())" ")", v1
, v2); } while (false)
;
565 ProcessPC(frontier);
566
567 // The check here is relatively expensive, so we do this in a separate flood
568 // fill. That way we can skip the check for chunks that are reachable
569 // otherwise.
570 LOG_POINTERS("Processing platform-specific allocations.\n")do { if (flags()->log_pointers) Report("Processing platform-specific allocations.\n"
); } while (0)
;
571 ProcessPlatformSpecificAllocations(frontier);
572 FloodFillTag(frontier, kReachable);
573
574 // Iterate over leaked chunks and mark those that are reachable from other
575 // leaked chunks.
576 LOG_POINTERS("Scanning leaked chunks.\n")do { if (flags()->log_pointers) Report("Scanning leaked chunks.\n"
); } while (0)
;
577 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
578}
579
580// ForEachChunk callback. Resets the tags to pre-leak-check state.
581static void ResetTagsCb(uptr chunk, void *arg) {
582 (void)arg;
583 chunk = GetUserBegin(chunk);
584 LsanMetadata m(chunk);
585 if (m.allocated() && m.tag() != kIgnored)
586 m.set_tag(kDirectlyLeaked);
587}
588
589// ForEachChunk callback. Aggregates information about unreachable chunks into
590// a LeakReport.
591static void CollectLeaksCb(uptr chunk, void *arg) {
592 CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 592, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
593 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
594 chunk = GetUserBegin(chunk);
595 LsanMetadata m(chunk);
596 if (!m.allocated()) return;
597 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
598 leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
599 m.tag());
600 }
601}
602
603void LeakSuppressionContext::PrintMatchedSuppressions() {
604 InternalMmapVector<Suppression *> matched;
605 context.GetMatched(&matched);
606 if (!matched.size())
607 return;
608 const char *line = "-----------------------------------------------------";
609 Printf("%s\n", line);
610 Printf("Suppressions used:\n");
611 Printf(" count bytes template\n");
612 for (uptr i = 0; i < matched.size(); i++) {
613 Printf("%7zu %10zu %s\n",
614 static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
615 matched[i]->weight, matched[i]->templ);
616 }
617 Printf("%s\n\n", line);
618}
619
620static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
621 const InternalMmapVector<tid_t> &suspended_threads =
622 *(const InternalMmapVector<tid_t> *)arg;
623 if (tctx->status == ThreadStatusRunning) {
624 uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
625 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
626 Report(
627 "Running thread %llu was not suspended. False leaks are possible.\n",
628 tctx->os_id);
629 }
630}
631
632#if SANITIZER_FUCHSIA0
633
634// Fuchsia provides a libc interface that guarantees all threads are
635// covered, and SuspendedThreadList is never really used.
636static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
637
638#else // !SANITIZER_FUCHSIA
639
640static void ReportUnsuspendedThreads(
641 const SuspendedThreadsList &suspended_threads) {
642 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
643 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
644 threads[i] = suspended_threads.GetThreadID(i);
645
646 Sort(threads.data(), threads.size());
647
648 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
649 &ReportIfNotSuspended, &threads);
650}
651
652#endif // !SANITIZER_FUCHSIA
653
654static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
655 void *arg) {
656 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
657 CHECK(param)do { __sanitizer::u64 v1 = (__sanitizer::u64)((param)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 657, "(" "(param)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
658 CHECK(!param->success)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!param->success
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 658, "(" "(!param->success)" ") " "!=" " (" "0" ")", v1,
v2); } while (false)
;
659 ReportUnsuspendedThreads(suspended_threads);
660 ClassifyAllChunks(suspended_threads, &param->frontier);
661 ForEachChunk(CollectLeaksCb, &param->leak_report);
662 // Clean up for subsequent leak checks. This assumes we did not overwrite any
663 // kIgnored tags.
664 ForEachChunk(ResetTagsCb, nullptr);
665 param->success = true;
666}
667
668static bool PrintResults(LeakReport &report) {
669 uptr unsuppressed_count = report.UnsuppressedLeakCount();
670 if (unsuppressed_count) {
671 Decorator d;
672 Printf(
673 "\n"
674 "================================================================="
675 "\n");
676 Printf("%s", d.Error());
677 Report("ERROR: LeakSanitizer: detected memory leaks\n");
678 Printf("%s", d.Default());
679 report.ReportTopLeaks(flags()->max_leaks);
680 }
681 if (common_flags()->print_suppressions)
682 GetSuppressionContext()->PrintMatchedSuppressions();
683 if (unsuppressed_count > 0) {
684 report.PrintSummary();
685 return true;
686 }
687 return false;
688}
689
690static bool CheckForLeaks() {
691 if (&__lsan_is_turned_off && __lsan_is_turned_off())
692 return false;
693 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
694 // suppressions. However if a stack id was previously suppressed, it should be
695 // suppressed in future checks as well.
696 for (int i = 0;; ++i) {
697 EnsureMainThreadIDIsCorrect();
698 CheckForLeaksParam param;
699 LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
700 if (!param.success) {
701 Report("LeakSanitizer has encountered a fatal error.\n");
702 Report(
703 "HINT: For debugging, try setting environment variable "
704 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
705 Report(
706 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
707 "etc)\n");
708 Die();
709 }
710 // No new suppressions stacks, so rerun will not help and we can report.
711 if (!param.leak_report.ApplySuppressions())
712 return PrintResults(param.leak_report);
713
714 // No indirect leaks to report, so we are done here.
715 if (!param.leak_report.IndirectUnsuppressedLeakCount())
716 return PrintResults(param.leak_report);
717
718 if (i >= 8) {
719 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
720 return PrintResults(param.leak_report);
721 }
722
723 // We found a new previously unseen suppressed call stack. Rerun to make
724 // sure it does not hold indirect leaks.
725 VReport(1, "Rerun with %zu suppressed stacks.",do { if ((uptr)Verbosity() >= (1)) Report("Rerun with %zu suppressed stacks."
, GetSuppressionContext()->GetSortedSuppressedStacks().size
()); } while (0)
726 GetSuppressionContext()->GetSortedSuppressedStacks().size())do { if ((uptr)Verbosity() >= (1)) Report("Rerun with %zu suppressed stacks."
, GetSuppressionContext()->GetSortedSuppressedStacks().size
()); } while (0)
;
727 }
728}
729
730static bool has_reported_leaks = false;
731bool HasReportedLeaks() { return has_reported_leaks; }
732
733void DoLeakCheck() {
734 Lock l(&global_mutex);
735 static bool already_done;
736 if (already_done) return;
737 already_done = true;
738 has_reported_leaks = CheckForLeaks();
739 if (has_reported_leaks) HandleLeaks();
740}
741
742static int DoRecoverableLeakCheck() {
743 Lock l(&global_mutex);
744 bool have_leaks = CheckForLeaks();
745 return have_leaks ? 1 : 0;
746}
747
748void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
749
750Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
751 Suppression *s = nullptr;
752
753 // Suppress by module name.
754 if (const char *module_name
5.1
'module_name' is null
5.1
'module_name' is null
=
6
Taking false branch
755 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
1
Calling 'Symbolizer::GetModuleNameForPc'
5
Returning from 'Symbolizer::GetModuleNameForPc'
756 if (context.Match(module_name, kSuppressionLeak, &s))
757 return s;
758
759 // Suppress by file or function name.
760 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
7
'frames' initialized here
761 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
8
Assuming pointer value is null
9
Loop condition is false. Execution continues on line 767
762 if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
763 context.Match(cur->info.file, kSuppressionLeak, &s)) {
764 break;
765 }
766 }
767 frames->ClearAll();
10
Called C++ object pointer is null
768 return s;
769}
770
771Suppression *LeakSuppressionContext::GetSuppressionForStack(
772 u32 stack_trace_id, const StackTrace &stack) {
773 LazyInit();
774 for (uptr i = 0; i < stack.size; i++) {
775 Suppression *s = GetSuppressionForAddr(
776 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
777 if (s) {
778 suppressed_stacks_sorted = false;
779 suppressed_stacks.push_back(stack_trace_id);
780 return s;
781 }
782 }
783 return nullptr;
784}
785
786///// LeakReport implementation. /////
787
788// A hard limit on the number of distinct leaks, to avoid quadratic complexity
789// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
790// in real-world applications.
791// FIXME: Get rid of this limit by changing the implementation of LeakReport to
792// use a hash table.
793const uptr kMaxLeaksConsidered = 5000;
794
795void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
796 uptr leaked_size, ChunkTag tag) {
797 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tag == kDirectlyLeaked
|| tag == kIndirectlyLeaked)); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 797, "(" "(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
798
799 if (u32 resolution = flags()->resolution) {
800 StackTrace stack = StackDepotGet(stack_trace_id);
801 stack.size = Min(stack.size, resolution);
802 stack_trace_id = StackDepotPut(stack);
803 }
804
805 bool is_directly_leaked = (tag == kDirectlyLeaked);
806 uptr i;
807 for (i = 0; i < leaks_.size(); i++) {
808 if (leaks_[i].stack_trace_id == stack_trace_id &&
809 leaks_[i].is_directly_leaked == is_directly_leaked) {
810 leaks_[i].hit_count++;
811 leaks_[i].total_size += leaked_size;
812 break;
813 }
814 }
815 if (i == leaks_.size()) {
816 if (leaks_.size() == kMaxLeaksConsidered) return;
817 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
818 is_directly_leaked, /* is_suppressed */ false };
819 leaks_.push_back(leak);
820 }
821 if (flags()->report_objects) {
822 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
823 leaked_objects_.push_back(obj);
824 }
825}
826
827static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
828 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
829 return leak1.total_size > leak2.total_size;
830 else
831 return leak1.is_directly_leaked;
832}
833
834void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
835 CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (__sanitizer::u64)((leaks_.size() <=
kMaxLeaksConsidered)); __sanitizer::u64 v2 = (__sanitizer::u64
)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 835, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
836 Printf("\n");
837 if (leaks_.size() == kMaxLeaksConsidered)
838 Printf("Too many leaks! Only the first %zu leaks encountered will be "
839 "reported.\n",
840 kMaxLeaksConsidered);
841
842 uptr unsuppressed_count = UnsuppressedLeakCount();
843 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
844 Printf("The %zu top leak(s):\n", num_leaks_to_report);
845 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
846 uptr leaks_reported = 0;
847 for (uptr i = 0; i < leaks_.size(); i++) {
848 if (leaks_[i].is_suppressed) continue;
849 PrintReportForLeak(i);
850 leaks_reported++;
851 if (leaks_reported == num_leaks_to_report) break;
852 }
853 if (leaks_reported < unsuppressed_count) {
854 uptr remaining = unsuppressed_count - leaks_reported;
855 Printf("Omitting %zu more leak(s).\n", remaining);
856 }
857}
858
859void LeakReport::PrintReportForLeak(uptr index) {
860 Decorator d;
861 Printf("%s", d.Leak());
862 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
863 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
864 leaks_[index].total_size, leaks_[index].hit_count);
865 Printf("%s", d.Default());
866
867 CHECK(leaks_[index].stack_trace_id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((leaks_[index].
stack_trace_id)); __sanitizer::u64 v2 = (__sanitizer::u64)(0)
; if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 867, "(" "(leaks_[index].stack_trace_id)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
868 StackDepotGet(leaks_[index].stack_trace_id).Print();
869
870 if (flags()->report_objects) {
871 Printf("Objects leaked above:\n");
872 PrintLeakedObjectsForLeak(index);
873 Printf("\n");
874 }
875}
876
877void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
878 u32 leak_id = leaks_[index].id;
879 for (uptr j = 0; j < leaked_objects_.size(); j++) {
880 if (leaked_objects_[j].leak_id == leak_id)
881 Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
882 leaked_objects_[j].size);
883 }
884}
885
886void LeakReport::PrintSummary() {
887 CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (__sanitizer::u64)((leaks_.size() <=
kMaxLeaksConsidered)); __sanitizer::u64 v2 = (__sanitizer::u64
)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 887, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
888 uptr bytes = 0, allocations = 0;
889 for (uptr i = 0; i < leaks_.size(); i++) {
890 if (leaks_[i].is_suppressed) continue;
891 bytes += leaks_[i].total_size;
892 allocations += leaks_[i].hit_count;
893 }
894 InternalScopedString summary;
895 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
896 allocations);
897 ReportErrorSummary(summary.data());
898}
899
900uptr LeakReport::ApplySuppressions() {
901 LeakSuppressionContext *suppressions = GetSuppressionContext();
902 uptr new_suppressions = false;
903 for (uptr i = 0; i < leaks_.size(); i++) {
904 Suppression *s = suppressions->GetSuppressionForStack(
905 leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id));
906 if (s) {
907 s->weight += leaks_[i].total_size;
908 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
909 leaks_[i].hit_count);
910 leaks_[i].is_suppressed = true;
911 ++new_suppressions;
912 }
913 }
914 return new_suppressions;
915}
916
917uptr LeakReport::UnsuppressedLeakCount() {
918 uptr result = 0;
919 for (uptr i = 0; i < leaks_.size(); i++)
920 if (!leaks_[i].is_suppressed) result++;
921 return result;
922}
923
924uptr LeakReport::IndirectUnsuppressedLeakCount() {
925 uptr result = 0;
926 for (uptr i = 0; i < leaks_.size(); i++)
927 if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
928 result++;
929 return result;
930}
931
932} // namespace __lsan
933#else // CAN_SANITIZE_LEAKS
934namespace __lsan {
935void InitCommonLsan() { }
936void DoLeakCheck() { }
937void DoRecoverableLeakCheckVoid() { }
938void DisableInThisThread() { }
939void EnableInThisThread() { }
940}
941#endif // CAN_SANITIZE_LEAKS
942
943using namespace __lsan;
944
945extern "C" {
946SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
947void __lsan_ignore_object(const void *p) {
948#if CAN_SANITIZE_LEAKS1
949 if (!common_flags()->detect_leaks)
950 return;
951 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
952 // locked.
953 Lock l(&global_mutex);
954 IgnoreObjectResult res = IgnoreObjectLocked(p);
955 if (res == kIgnoreObjectInvalid)
956 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): no heap object found at %p"
, p); } while (0)
;
957 if (res == kIgnoreObjectAlreadyIgnored)
958 VReport(1, "__lsan_ignore_object(): "do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p); } while (
0)
959 "heap object at %p is already being ignored\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p); } while (
0)
;
960 if (res == kIgnoreObjectSuccess)
961 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): ignoring heap object at %p\n"
, p); } while (0)
;
962#endif // CAN_SANITIZE_LEAKS
963}
964
965SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
966void __lsan_register_root_region(const void *begin, uptr size) {
967#if CAN_SANITIZE_LEAKS1
968 Lock l(&global_mutex);
969 CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions))
; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 969, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
970 RootRegion region = {reinterpret_cast<uptr>(begin), size};
971 root_regions->push_back(region);
972 VReport(1, "Registered root region at %p of size %zu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Registered root region at %p of size %zu\n"
, begin, size); } while (0)
;
973#endif // CAN_SANITIZE_LEAKS
974}
975
976SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
977void __lsan_unregister_root_region(const void *begin, uptr size) {
978#if CAN_SANITIZE_LEAKS1
979 Lock l(&global_mutex);
980 CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions))
; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/lsan_common.cpp"
, 980, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
981 bool removed = false;
982 for (uptr i = 0; i < root_regions->size(); i++) {
983 RootRegion region = (*root_regions)[i];
984 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
985 removed = true;
986 uptr last_index = root_regions->size() - 1;
987 (*root_regions)[i] = (*root_regions)[last_index];
988 root_regions->pop_back();
989 VReport(1, "Unregistered root region at %p of size %zu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Unregistered root region at %p of size %zu\n"
, begin, size); } while (0)
;
990 break;
991 }
992 }
993 if (!removed) {
994 Report(
995 "__lsan_unregister_root_region(): region at %p of size %zu has not "
996 "been registered.\n",
997 begin, size);
998 Die();
999 }
1000#endif // CAN_SANITIZE_LEAKS
1001}
1002
1003SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
1004void __lsan_disable() {
1005#if CAN_SANITIZE_LEAKS1
1006 __lsan::DisableInThisThread();
1007#endif
1008}
1009
1010SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
1011void __lsan_enable() {
1012#if CAN_SANITIZE_LEAKS1
1013 __lsan::EnableInThisThread();
1014#endif
1015}
1016
1017SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
1018void __lsan_do_leak_check() {
1019#if CAN_SANITIZE_LEAKS1
1020 if (common_flags()->detect_leaks)
1021 __lsan::DoLeakCheck();
1022#endif // CAN_SANITIZE_LEAKS
1023}
1024
1025SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
1026int __lsan_do_recoverable_leak_check() {
1027#if CAN_SANITIZE_LEAKS1
1028 if (common_flags()->detect_leaks)
1029 return __lsan::DoRecoverableLeakCheck();
1030#endif // CAN_SANITIZE_LEAKS
1031 return 0;
1032}
1033
1034SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) const char * __lsan_default_options(void)
{
1035 return "";
1036}
1037
1038#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1039SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak))
1040int __lsan_is_turned_off() {
1041 return 0;
1042}
1043
1044SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak))
1045const char *__lsan_default_suppressions() {
1046 return "";
1047}
1048#endif
1049} // extern "C"

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/compiler-rt/lib/lsan/../sanitizer_common/sanitizer_symbolizer.h

1//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Symbolizer is used by sanitizers to map instruction address to a location in
10// source code at run-time. Symbolizer either uses __sanitizer_symbolize_*
11// defined in the program, or (if they are missing) tries to find and
12// launch "llvm-symbolizer" commandline tool in a separate process and
13// communicate with it.
14//
15// Generally we should try to avoid calling system library functions during
16// symbolization (and use their replacements from sanitizer_libc.h instead).
17//===----------------------------------------------------------------------===//
18#ifndef SANITIZER_SYMBOLIZER_H
19#define SANITIZER_SYMBOLIZER_H
20
21#include "sanitizer_common.h"
22#include "sanitizer_mutex.h"
23#include "sanitizer_vector.h"
24
25namespace __sanitizer {
26
27struct AddressInfo {
28 // Owns all the string members. Storage for them is
29 // (de)allocated using sanitizer internal allocator.
30 uptr address;
31
32 char *module;
33 uptr module_offset;
34 ModuleArch module_arch;
35
36 static const uptr kUnknown = ~(uptr)0;
37 char *function;
38 uptr function_offset;
39
40 char *file;
41 int line;
42 int column;
43
44 AddressInfo();
45 // Deletes all strings and resets all fields.
46 void Clear();
47 void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
48};
49
50// Linked list of symbolized frames (each frame is described by AddressInfo).
51struct SymbolizedStack {
52 SymbolizedStack *next;
53 AddressInfo info;
54 static SymbolizedStack *New(uptr addr);
55 // Deletes current, and all subsequent frames in the linked list.
56 // The object cannot be accessed after the call to this function.
57 void ClearAll();
58
59 private:
60 SymbolizedStack();
61};
62
63// For now, DataInfo is used to describe global variable.
64struct DataInfo {
65 // Owns all the string members. Storage for them is
66 // (de)allocated using sanitizer internal allocator.
67 char *module;
68 uptr module_offset;
69 ModuleArch module_arch;
70
71 char *file;
72 uptr line;
73 char *name;
74 uptr start;
75 uptr size;
76
77 DataInfo();
78 void Clear();
79};
80
81struct LocalInfo {
82 char *function_name = nullptr;
83 char *name = nullptr;
84 char *decl_file = nullptr;
85 unsigned decl_line = 0;
86
87 bool has_frame_offset = false;
88 bool has_size = false;
89 bool has_tag_offset = false;
90
91 sptr frame_offset;
92 uptr size;
93 uptr tag_offset;
94
95 void Clear();
96};
97
98struct FrameInfo {
99 char *module;
100 uptr module_offset;
101 ModuleArch module_arch;
102
103 InternalMmapVector<LocalInfo> locals;
104 void Clear();
105};
106
107class SymbolizerTool;
108
109class Symbolizer final {
110 public:
111 /// Initialize and return platform-specific implementation of symbolizer
112 /// (if it wasn't already initialized).
113 static Symbolizer *GetOrInit();
114 static void LateInitialize();
115 // Returns a list of symbolized frames for a given address (containing
116 // all inlined functions, if necessary).
117 SymbolizedStack *SymbolizePC(uptr address);
118 bool SymbolizeData(uptr address, DataInfo *info);
119 bool SymbolizeFrame(uptr address, FrameInfo *info);
120
121 // The module names Symbolizer returns are stable and unique for every given
122 // module. It is safe to store and compare them as pointers.
123 bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
124 uptr *module_address);
125 const char *GetModuleNameForPc(uptr pc) {
126 const char *module_name = nullptr;
127 uptr unused;
128 if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused))
2
Assuming the condition is false
3
Taking false branch
129 return module_name;
130 return nullptr;
4
Returning null pointer, which participates in a condition later
131 }
132
133 // Release internal caches (if any).
134 void Flush();
135 // Attempts to demangle the provided C++ mangled name.
136 const char *Demangle(const char *name);
137
138 // Allow user to install hooks that would be called before/after Symbolizer
139 // does the actual file/line info fetching. Specific sanitizers may need this
140 // to distinguish system library calls made in user code from calls made
141 // during in-process symbolization.
142 typedef void (*StartSymbolizationHook)();
143 typedef void (*EndSymbolizationHook)();
144 // May be called at most once.
145 void AddHooks(StartSymbolizationHook start_hook,
146 EndSymbolizationHook end_hook);
147
148 void RefreshModules();
149 const LoadedModule *FindModuleForAddress(uptr address);
150
151 void InvalidateModuleList();
152
153 private:
154 // GetModuleNameAndOffsetForPC has to return a string to the caller.
155 // Since the corresponding module might get unloaded later, we should create
156 // our owned copies of the strings that we can safely return.
157 // ModuleNameOwner does not provide any synchronization, thus calls to
158 // its method should be protected by |mu_|.
159 class ModuleNameOwner {
160 public:
161 explicit ModuleNameOwner(Mutex *synchronized_by)
162 : last_match_(nullptr), mu_(synchronized_by) {
163 storage_.reserve(kInitialCapacity);
164 }
165 const char *GetOwnedCopy(const char *str);
166
167 private:
168 static const uptr kInitialCapacity = 1000;
169 InternalMmapVector<const char*> storage_;
170 const char *last_match_;
171
172 Mutex *mu_;
173 } module_names_;
174
175 /// Platform-specific function for creating a Symbolizer object.
176 static Symbolizer *PlatformInit();
177
178 bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
179 uptr *module_offset,
180 ModuleArch *module_arch);
181 ListOfModules modules_;
182 ListOfModules fallback_modules_;
183 // If stale, need to reload the modules before looking up addresses.
184 bool modules_fresh_;
185
186 // Platform-specific default demangler, must not return nullptr.
187 const char *PlatformDemangle(const char *name);
188
189 static Symbolizer *symbolizer_;
190 static StaticSpinMutex init_mu_;
191
192 // Mutex locked from public methods of |Symbolizer|, so that the internals
193 // (including individual symbolizer tools and platform-specific methods) are
194 // always synchronized.
195 Mutex mu_;
196
197 IntrusiveList<SymbolizerTool> tools_;
198
199 explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);
200
201 static LowLevelAllocator symbolizer_allocator_;
202
203 StartSymbolizationHook start_hook_;
204 EndSymbolizationHook end_hook_;
205 class SymbolizerScope {
206 public:
207 explicit SymbolizerScope(const Symbolizer *sym);
208 ~SymbolizerScope();
209 private:
210 const Symbolizer *sym_;
211 };
212
213 // Calls `LateInitialize()` on all items in `tools_`.
214 void LateInitializeTools();
215};
216
217#ifdef SANITIZER_WINDOWS0
218void InitializeDbgHelpIfNeeded();
219#endif
220
221} // namespace __sanitizer
222
223#endif // SANITIZER_SYMBOLIZER_H