Bug Summary

File:projects/compiler-rt/lib/lsan/lsan_common.cc
Warning:line 511, column 3
Called C++ object pointer is null

Annotated Source Code

1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_flag_parser.h"
20#include "sanitizer_common/sanitizer_placement_new.h"
21#include "sanitizer_common/sanitizer_procmaps.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_suppressions.h"
25#include "sanitizer_common/sanitizer_report_decorator.h"
26#include "sanitizer_common/sanitizer_tls_get_addr.h"
27
28#if CAN_SANITIZE_LEAKS1
29namespace __lsan {
30
31// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32// also to protect the global list of root regions.
33BlockingMutex global_mutex(LINKER_INITIALIZED);
34
35Flags lsan_flags;
36
37void DisableCounterUnderflow() {
38 if (common_flags()->detect_leaks) {
39 Report("Unmatched call to __lsan_enable().\n");
40 Die();
41 }
42}
43
44void Flags::SetDefaults() {
45#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
46#include "lsan_flags.inc"
47#undef LSAN_FLAG
48}
49
50void RegisterLsanFlags(FlagParser *parser, Flags *f) {
51#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
52 RegisterFlag(parser, #Name, Description, &f->Name);
53#include "lsan_flags.inc"
54#undef LSAN_FLAG
55}
56
57#define LOG_POINTERS(...)do { if (flags()->log_pointers) Report(...); } while (0); \
58 do { \
59 if (flags()->log_pointers) Report(__VA_ARGS__); \
60 } while (0);
61
62#define LOG_THREADS(...)do { if (flags()->log_threads) Report(...); } while (0); \
63 do { \
64 if (flags()->log_threads) Report(__VA_ARGS__); \
65 } while (0);
66
67ALIGNED(64)__attribute__((aligned(64))) static char suppression_placeholder[sizeof(SuppressionContext)];
68static SuppressionContext *suppression_ctx = nullptr;
69static const char kSuppressionLeak[] = "leak";
70static const char *kSuppressionTypes[] = { kSuppressionLeak };
71
72void InitializeSuppressions() {
73 CHECK_EQ(nullptr, suppression_ctx)do { __sanitizer::u64 v1 = (u64)((nullptr)); __sanitizer::u64
v2 = (u64)((suppression_ctx)); if (__builtin_expect(!!(!(v1 ==
v2)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 73, "(" "(nullptr)" ") " "==" " (" "(suppression_ctx)" ")",
v1, v2); } while (false)
;
74 suppression_ctx = new (suppression_placeholder) // NOLINT
75 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)(sizeof(kSuppressionTypes)/sizeof((kSuppressionTypes)[0])));
76 suppression_ctx->ParseFromFile(flags()->suppressions);
77 if (&__lsan_default_suppressions)
78 suppression_ctx->Parse(__lsan_default_suppressions());
79}
80
81static SuppressionContext *GetSuppressionContext() {
82 CHECK(suppression_ctx)do { __sanitizer::u64 v1 = (u64)((suppression_ctx)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 82, "(" "(suppression_ctx)" ") " "!=" " (" "0" ")", v1, v2)
; } while (false)
;
83 return suppression_ctx;
84}
85
86struct RootRegion {
87 const void *begin;
88 uptr size;
89};
90
91InternalMmapVector<RootRegion> *root_regions;
92
93void InitializeRootRegions() {
94 CHECK(!root_regions)do { __sanitizer::u64 v1 = (u64)((!root_regions)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 94, "(" "(!root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
95 ALIGNED(64)__attribute__((aligned(64))) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
96 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
97}
98
99void InitCommonLsan() {
100 InitializeRootRegions();
101 if (common_flags()->detect_leaks) {
102 // Initialization which can fail or print warnings should only be done if
103 // LSan is actually enabled.
104 InitializeSuppressions();
105 InitializePlatformSpecificModules();
106 }
107}
108
109class Decorator: public __sanitizer::SanitizerCommonDecorator {
110 public:
111 Decorator() : SanitizerCommonDecorator() { }
112 const char *Error() { return Red(); }
113 const char *Leak() { return Blue(); }
114 const char *End() { return Default(); }
115};
116
117static inline bool CanBeAHeapPointer(uptr p) {
118 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
119 // bound on heap addresses.
120 const uptr kMinAddress = 4 * 4096;
121 if (p < kMinAddress) return false;
122#if defined(__x86_64__1)
123 // Accept only canonical form user-space addresses.
124 return ((p >> 47) == 0);
125#elif defined(__mips64)
126 return ((p >> 40) == 0);
127#elif defined(__aarch64__)
128 unsigned runtimeVMA =
129 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()(__sanitizer::uptr) __builtin_frame_address(0)) + 1);
130 return ((p >> runtimeVMA) == 0);
131#else
132 return true;
133#endif
134}
135
136// Scans the memory range, looking for byte patterns that point into allocator
137// chunks. Marks those chunks with |tag| and adds them to |frontier|.
138// There are two usage modes for this function: finding reachable chunks
139// (|tag| = kReachable) and finding indirectly leaked chunks
140// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
141// so |frontier| = 0.
142void ScanRangeForPointers(uptr begin, uptr end,
143 Frontier *frontier,
144 const char *region_type, ChunkTag tag) {
145 CHECK(tag == kReachable || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (u64)((tag == kReachable || tag ==
kIndirectlyLeaked)); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 145, "(" "(tag == kReachable || tag == kIndirectlyLeaked)" ") "
"!=" " (" "0" ")", v1, v2); } while (false)
;
146 const uptr alignment = flags()->pointer_alignment();
147 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end)do { if (flags()->log_pointers) Report("Scanning %s range %p-%p.\n"
, region_type, begin, end); } while (0);
;
148 uptr pp = begin;
149 if (pp % alignment)
150 pp = pp + alignment - pp % alignment;
151 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
152 void *p = *reinterpret_cast<void **>(pp);
153 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
154 uptr chunk = PointsIntoChunk(p);
155 if (!chunk) continue;
156 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
157 if (chunk == begin) continue;
158 LsanMetadata m(chunk);
159 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
160
161 // Do this check relatively late so we can log only the interesting cases.
162 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
163 LOG_POINTERS(do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size
()); } while (0);
164 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size
()); } while (0);
165 "%zu.\n",do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size
()); } while (0);
166 pp, p, chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size
()); } while (0);
;
167 continue;
168 }
169
170 m.set_tag(tag);
171 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n"
, pp, p, chunk, chunk + m.requested_size(), m.requested_size(
)); } while (0);
172 chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n"
, pp, p, chunk, chunk + m.requested_size(), m.requested_size(
)); } while (0);
;
173 if (frontier)
174 frontier->push_back(chunk);
175 }
176}
177
178void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
179 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
180 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
181}
182
183// Scans thread data (stacks and TLS) for heap pointers.
184static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
185 Frontier *frontier) {
186 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
187 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
188 uptr registers_end = registers_begin + registers.size();
189 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
190 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
191 LOG_THREADS("Processing thread %d.\n", os_id)do { if (flags()->log_threads) Report("Processing thread %d.\n"
, os_id); } while (0);
;
192 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
193 DTLS *dtls;
194 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
195 &tls_begin, &tls_end,
196 &cache_begin, &cache_end, &dtls);
197 if (!thread_found) {
198 // If a thread can't be found in the thread registry, it's probably in the
199 // process of destruction. Log this event and move on.
200 LOG_THREADS("Thread %d not found in registry.\n", os_id)do { if (flags()->log_threads) Report("Thread %d not found in registry.\n"
, os_id); } while (0);
;
201 continue;
202 }
203 uptr sp;
204 bool have_registers =
205 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
206 if (!have_registers) {
207 Report("Unable to get registers from thread %d.\n");
208 // If unable to get SP, consider the entire stack to be reachable.
209 sp = stack_begin;
210 }
211
212 if (flags()->use_registers && have_registers)
213 ScanRangeForPointers(registers_begin, registers_end, frontier,
214 "REGISTERS", kReachable);
215
216 if (flags()->use_stacks) {
217 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp)do { if (flags()->log_threads) Report("Stack at %p-%p (SP = %p).\n"
, stack_begin, stack_end, sp); } while (0);
;
218 if (sp < stack_begin || sp >= stack_end) {
219 // SP is outside the recorded stack range (e.g. the thread is running a
220 // signal handler on alternate stack, or swapcontext was used).
221 // Again, consider the entire stack range to be reachable.
222 LOG_THREADS("WARNING: stack pointer not in stack range.\n")do { if (flags()->log_threads) Report("WARNING: stack pointer not in stack range.\n"
); } while (0);
;
223 uptr page_size = GetPageSizeCached();
224 int skipped = 0;
225 while (stack_begin < stack_end &&
226 !IsAccessibleMemoryRange(stack_begin, 1)) {
227 skipped++;
228 stack_begin += page_size;
229 }
230 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n"
, skipped, stack_begin, stack_end); } while (0);
231 skipped, stack_begin, stack_end)do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n"
, skipped, stack_begin, stack_end); } while (0);
;
232 } else {
233 // Shrink the stack range to ignore out-of-scope values.
234 stack_begin = sp;
235 }
236 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
237 kReachable);
238 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
239 }
240
241 if (flags()->use_tls) {
242 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end)do { if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin
, tls_end); } while (0);
;
243 if (cache_begin == cache_end) {
244 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
245 } else {
246 // Because LSan should not be loaded with dlopen(), we can assume
247 // that allocator cache will be part of static TLS image.
248 CHECK_LE(tls_begin, cache_begin)do { __sanitizer::u64 v1 = (u64)((tls_begin)); __sanitizer::u64
v2 = (u64)((cache_begin)); if (__builtin_expect(!!(!(v1 <=
v2)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 248, "(" "(tls_begin)" ") " "<=" " (" "(cache_begin)" ")"
, v1, v2); } while (false)
;
249 CHECK_GE(tls_end, cache_end)do { __sanitizer::u64 v1 = (u64)((tls_end)); __sanitizer::u64
v2 = (u64)((cache_end)); if (__builtin_expect(!!(!(v1 >= v2
)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 249, "(" "(tls_end)" ") " ">=" " (" "(cache_end)" ")", v1
, v2); } while (false)
;
250 if (tls_begin < cache_begin)
251 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
252 kReachable);
253 if (tls_end > cache_end)
254 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
255 }
256 if (dtls) {
257 for (uptr j = 0; j < dtls->dtv_size; ++j) {
258 uptr dtls_beg = dtls->dtv[j].beg;
259 uptr dtls_end = dtls_beg + dtls->dtv[j].size;
260 if (dtls_beg < dtls_end) {
261 LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end)do { if (flags()->log_threads) Report("DTLS %zu at %p-%p.\n"
, j, dtls_beg, dtls_end); } while (0);
;
262 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
263 kReachable);
264 }
265 }
266 }
267 }
268 }
269}
270
271static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
272 uptr root_end) {
273 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
274 uptr begin, end, prot;
275 while (proc_maps.Next(&begin, &end,
276 /*offset*/ nullptr, /*filename*/ nullptr,
277 /*filename_size*/ 0, &prot)) {
278 uptr intersection_begin = Max(root_begin, begin);
279 uptr intersection_end = Min(end, root_end);
280 if (intersection_begin >= intersection_end) continue;
281 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
282 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, root_begin, root_end, begin, end, is_readable ? "readable" :
"unreadable"); } while (0);
283 root_begin, root_end, begin, end,do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, root_begin, root_end, begin, end, is_readable ? "readable" :
"unreadable"); } while (0);
284 is_readable ? "readable" : "unreadable")do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n"
, root_begin, root_end, begin, end, is_readable ? "readable" :
"unreadable"); } while (0);
;
285 if (is_readable)
286 ScanRangeForPointers(intersection_begin, intersection_end, frontier,
287 "ROOT", kReachable);
288 }
289}
290
291// Scans root regions for heap pointers.
292static void ProcessRootRegions(Frontier *frontier) {
293 if (!flags()->use_root_regions) return;
294 CHECK(root_regions)do { __sanitizer::u64 v1 = (u64)((root_regions)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 294, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
295 for (uptr i = 0; i < root_regions->size(); i++) {
296 RootRegion region = (*root_regions)[i];
297 uptr begin_addr = reinterpret_cast<uptr>(region.begin);
298 ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
299 }
300}
301
302static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
303 while (frontier->size()) {
304 uptr next_chunk = frontier->back();
305 frontier->pop_back();
306 LsanMetadata m(next_chunk);
307 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
308 "HEAP", tag);
309 }
310}
311
312// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
313// which are reachable from it as indirectly leaked.
314static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
315 chunk = GetUserBegin(chunk);
316 LsanMetadata m(chunk);
317 if (m.allocated() && m.tag() != kReachable) {
318 ScanRangeForPointers(chunk, chunk + m.requested_size(),
319 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
320 }
321}
322
323// ForEachChunk callback. If chunk is marked as ignored, adds its address to
324// frontier.
325static void CollectIgnoredCb(uptr chunk, void *arg) {
326 CHECK(arg)do { __sanitizer::u64 v1 = (u64)((arg)); __sanitizer::u64 v2 =
(u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 326, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
327 chunk = GetUserBegin(chunk);
328 LsanMetadata m(chunk);
329 if (m.allocated() && m.tag() == kIgnored) {
330 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n"
, chunk, chunk + m.requested_size(), m.requested_size()); } while
(0);
331 chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n"
, chunk, chunk + m.requested_size(), m.requested_size()); } while
(0);
;
332 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
333 }
334}
335
336// Sets the appropriate tag on each chunk.
337static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
338 // Holds the flood fill frontier.
339 Frontier frontier(1);
340
341 ForEachChunk(CollectIgnoredCb, &frontier);
342 ProcessGlobalRegions(&frontier);
343 ProcessThreads(suspended_threads, &frontier);
344 ProcessRootRegions(&frontier);
345 FloodFillTag(&frontier, kReachable);
346
347 // The check here is relatively expensive, so we do this in a separate flood
348 // fill. That way we can skip the check for chunks that are reachable
349 // otherwise.
350 LOG_POINTERS("Processing platform-specific allocations.\n")do { if (flags()->log_pointers) Report("Processing platform-specific allocations.\n"
); } while (0);
;
351 CHECK_EQ(0, frontier.size())do { __sanitizer::u64 v1 = (u64)((0)); __sanitizer::u64 v2 = (
u64)((frontier.size())); if (__builtin_expect(!!(!(v1 == v2))
, 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 351, "(" "(0)" ") " "==" " (" "(frontier.size())" ")", v1, v2
); } while (false)
;
352 ProcessPlatformSpecificAllocations(&frontier);
353 FloodFillTag(&frontier, kReachable);
354
355 // Iterate over leaked chunks and mark those that are reachable from other
356 // leaked chunks.
357 LOG_POINTERS("Scanning leaked chunks.\n")do { if (flags()->log_pointers) Report("Scanning leaked chunks.\n"
); } while (0);
;
358 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
359}
360
361// ForEachChunk callback. Resets the tags to pre-leak-check state.
362static void ResetTagsCb(uptr chunk, void *arg) {
363 (void)arg;
364 chunk = GetUserBegin(chunk);
365 LsanMetadata m(chunk);
366 if (m.allocated() && m.tag() != kIgnored)
367 m.set_tag(kDirectlyLeaked);
368}
369
370static void PrintStackTraceById(u32 stack_trace_id) {
371 CHECK(stack_trace_id)do { __sanitizer::u64 v1 = (u64)((stack_trace_id)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 371, "(" "(stack_trace_id)" ") " "!=" " (" "0" ")", v1, v2)
; } while (false)
;
372 StackDepotGet(stack_trace_id).Print();
373}
374
375// ForEachChunk callback. Aggregates information about unreachable chunks into
376// a LeakReport.
377static void CollectLeaksCb(uptr chunk, void *arg) {
378 CHECK(arg)do { __sanitizer::u64 v1 = (u64)((arg)); __sanitizer::u64 v2 =
(u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 378, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
379 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
380 chunk = GetUserBegin(chunk);
381 LsanMetadata m(chunk);
382 if (!m.allocated()) return;
383 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
384 u32 resolution = flags()->resolution;
385 u32 stack_trace_id = 0;
386 if (resolution > 0) {
387 StackTrace stack = StackDepotGet(m.stack_trace_id());
388 stack.size = Min(stack.size, resolution);
389 stack_trace_id = StackDepotPut(stack);
390 } else {
391 stack_trace_id = m.stack_trace_id();
392 }
393 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
394 m.tag());
395 }
396}
397
398static void PrintMatchedSuppressions() {
399 InternalMmapVector<Suppression *> matched(1);
400 GetSuppressionContext()->GetMatched(&matched);
401 if (!matched.size())
402 return;
403 const char *line = "-----------------------------------------------------";
404 Printf("%s\n", line);
405 Printf("Suppressions used:\n");
406 Printf(" count bytes template\n");
407 for (uptr i = 0; i < matched.size(); i++)
408 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
409 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
410 Printf("%s\n\n", line);
411}
412
413struct CheckForLeaksParam {
414 bool success;
415 LeakReport leak_report;
416};
417
418static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
419 void *arg) {
420 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
421 CHECK(param)do { __sanitizer::u64 v1 = (u64)((param)); __sanitizer::u64 v2
= (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 421, "(" "(param)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
422 CHECK(!param->success)do { __sanitizer::u64 v1 = (u64)((!param->success)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 422, "(" "(!param->success)" ") " "!=" " (" "0" ")", v1,
v2); } while (false)
;
423 ClassifyAllChunks(suspended_threads);
424 ForEachChunk(CollectLeaksCb, &param->leak_report);
425 // Clean up for subsequent leak checks. This assumes we did not overwrite any
426 // kIgnored tags.
427 ForEachChunk(ResetTagsCb, nullptr);
428 param->success = true;
429}
430
431static bool CheckForLeaks() {
432 if (&__lsan_is_turned_off && __lsan_is_turned_off())
433 return false;
434 EnsureMainThreadIDIsCorrect();
435 CheckForLeaksParam param;
436 param.success = false;
437 LockThreadRegistry();
438 LockAllocator();
439 DoStopTheWorld(CheckForLeaksCallback, &param);
440 UnlockAllocator();
441 UnlockThreadRegistry();
442
443 if (!param.success) {
444 Report("LeakSanitizer has encountered a fatal error.\n");
445 Report(
446 "HINT: For debugging, try setting environment variable "
447 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
448 Report(
449 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
450 Die();
451 }
452 param.leak_report.ApplySuppressions();
453 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
454 if (unsuppressed_count > 0) {
455 Decorator d;
456 Printf("\n"
457 "================================================================="
458 "\n");
459 Printf("%s", d.Error());
460 Report("ERROR: LeakSanitizer: detected memory leaks\n");
461 Printf("%s", d.End());
462 param.leak_report.ReportTopLeaks(flags()->max_leaks);
463 }
464 if (common_flags()->print_suppressions)
465 PrintMatchedSuppressions();
466 if (unsuppressed_count > 0) {
467 param.leak_report.PrintSummary();
468 return true;
469 }
470 return false;
471}
472
473void DoLeakCheck() {
474 BlockingMutexLock l(&global_mutex);
475 static bool already_done;
476 if (already_done) return;
477 already_done = true;
478 bool have_leaks = CheckForLeaks();
479 if (!have_leaks) {
480 return;
481 }
482 if (common_flags()->exitcode) {
483 Die();
484 }
485}
486
487static int DoRecoverableLeakCheck() {
488 BlockingMutexLock l(&global_mutex);
489 bool have_leaks = CheckForLeaks();
490 return have_leaks ? 1 : 0;
491}
492
493static Suppression *GetSuppressionForAddr(uptr addr) {
494 Suppression *s = nullptr;
495
496 // Suppress by module name.
497 SuppressionContext *suppressions = GetSuppressionContext();
498 if (const char *module_name =
1
Taking false branch
499 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
500 if (suppressions->Match(module_name, kSuppressionLeak, &s))
501 return s;
502
503 // Suppress by file or function name.
504 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
2
'frames' initialized here
505 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
3
Assuming pointer value is null
4
Loop condition is false. Execution continues on line 511
506 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
507 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
508 break;
509 }
510 }
511 frames->ClearAll();
5
Called C++ object pointer is null
512 return s;
513}
514
515static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
516 StackTrace stack = StackDepotGet(stack_trace_id);
517 for (uptr i = 0; i < stack.size; i++) {
518 Suppression *s = GetSuppressionForAddr(
519 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
520 if (s) return s;
521 }
522 return nullptr;
523}
524
525///// LeakReport implementation. /////
526
527// A hard limit on the number of distinct leaks, to avoid quadratic complexity
528// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
529// in real-world applications.
530// FIXME: Get rid of this limit by changing the implementation of LeakReport to
531// use a hash table.
532const uptr kMaxLeaksConsidered = 5000;
533
534void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
535 uptr leaked_size, ChunkTag tag) {
536 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (u64)((tag == kDirectlyLeaked || tag
== kIndirectlyLeaked)); __sanitizer::u64 v2 = (u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 536, "(" "(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
537 bool is_directly_leaked = (tag == kDirectlyLeaked);
538 uptr i;
539 for (i = 0; i < leaks_.size(); i++) {
540 if (leaks_[i].stack_trace_id == stack_trace_id &&
541 leaks_[i].is_directly_leaked == is_directly_leaked) {
542 leaks_[i].hit_count++;
543 leaks_[i].total_size += leaked_size;
544 break;
545 }
546 }
547 if (i == leaks_.size()) {
548 if (leaks_.size() == kMaxLeaksConsidered) return;
549 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
550 is_directly_leaked, /* is_suppressed */ false };
551 leaks_.push_back(leak);
552 }
553 if (flags()->report_objects) {
554 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
555 leaked_objects_.push_back(obj);
556 }
557}
558
559static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
560 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
561 return leak1.total_size > leak2.total_size;
562 else
563 return leak1.is_directly_leaked;
564}
565
566void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
567 CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (u64)((leaks_.size() <= kMaxLeaksConsidered
)); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!
(v1 != v2)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 567, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
568 Printf("\n");
569 if (leaks_.size() == kMaxLeaksConsidered)
570 Printf("Too many leaks! Only the first %zu leaks encountered will be "
571 "reported.\n",
572 kMaxLeaksConsidered);
573
574 uptr unsuppressed_count = UnsuppressedLeakCount();
575 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
576 Printf("The %zu top leak(s):\n", num_leaks_to_report);
577 InternalSort(&leaks_, leaks_.size(), LeakComparator);
578 uptr leaks_reported = 0;
579 for (uptr i = 0; i < leaks_.size(); i++) {
580 if (leaks_[i].is_suppressed) continue;
581 PrintReportForLeak(i);
582 leaks_reported++;
583 if (leaks_reported == num_leaks_to_report) break;
584 }
585 if (leaks_reported < unsuppressed_count) {
586 uptr remaining = unsuppressed_count - leaks_reported;
587 Printf("Omitting %zu more leak(s).\n", remaining);
588 }
589}
590
591void LeakReport::PrintReportForLeak(uptr index) {
592 Decorator d;
593 Printf("%s", d.Leak());
594 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
595 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
596 leaks_[index].total_size, leaks_[index].hit_count);
597 Printf("%s", d.End());
598
599 PrintStackTraceById(leaks_[index].stack_trace_id);
600
601 if (flags()->report_objects) {
602 Printf("Objects leaked above:\n");
603 PrintLeakedObjectsForLeak(index);
604 Printf("\n");
605 }
606}
607
608void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
609 u32 leak_id = leaks_[index].id;
610 for (uptr j = 0; j < leaked_objects_.size(); j++) {
611 if (leaked_objects_[j].leak_id == leak_id)
612 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
613 leaked_objects_[j].size);
614 }
615}
616
617void LeakReport::PrintSummary() {
618 CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (u64)((leaks_.size() <= kMaxLeaksConsidered
)); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!
(v1 != v2)), 0)) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 618, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
619 uptr bytes = 0, allocations = 0;
620 for (uptr i = 0; i < leaks_.size(); i++) {
621 if (leaks_[i].is_suppressed) continue;
622 bytes += leaks_[i].total_size;
623 allocations += leaks_[i].hit_count;
624 }
625 InternalScopedString summary(kMaxSummaryLength);
626 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
627 allocations);
628 ReportErrorSummary(summary.data());
629}
630
631void LeakReport::ApplySuppressions() {
632 for (uptr i = 0; i < leaks_.size(); i++) {
633 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
634 if (s) {
635 s->weight += leaks_[i].total_size;
636 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
637 leaks_[i].hit_count);
638 leaks_[i].is_suppressed = true;
639 }
640 }
641}
642
643uptr LeakReport::UnsuppressedLeakCount() {
644 uptr result = 0;
645 for (uptr i = 0; i < leaks_.size(); i++)
646 if (!leaks_[i].is_suppressed) result++;
647 return result;
648}
649
650} // namespace __lsan
651#else // CAN_SANITIZE_LEAKS
652namespace __lsan {
653void InitCommonLsan() { }
654void DoLeakCheck() { }
655void DisableInThisThread() { }
656void EnableInThisThread() { }
657}
658#endif // CAN_SANITIZE_LEAKS
659
660using namespace __lsan; // NOLINT
661
662extern "C" {
663SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
664void __lsan_ignore_object(const void *p) {
665#if CAN_SANITIZE_LEAKS1
666 if (!common_flags()->detect_leaks)
667 return;
668 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
669 // locked.
670 BlockingMutexLock l(&global_mutex);
671 IgnoreObjectResult res = IgnoreObjectLocked(p);
672 if (res == kIgnoreObjectInvalid)
673 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): no heap object found at %p"
, p); } while (0)
;
674 if (res == kIgnoreObjectAlreadyIgnored)
675 VReport(1, "__lsan_ignore_object(): "do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p); } while (
0)
676 "heap object at %p is already being ignored\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p); } while (
0)
;
677 if (res == kIgnoreObjectSuccess)
678 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): ignoring heap object at %p\n"
, p); } while (0)
;
679#endif // CAN_SANITIZE_LEAKS
680}
681
682SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
683void __lsan_register_root_region(const void *begin, uptr size) {
684#if CAN_SANITIZE_LEAKS1
685 BlockingMutexLock l(&global_mutex);
686 CHECK(root_regions)do { __sanitizer::u64 v1 = (u64)((root_regions)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 686, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
687 RootRegion region = {begin, size};
688 root_regions->push_back(region);
689 VReport(1, "Registered root region at %p of size %llu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Registered root region at %p of size %llu\n"
, begin, size); } while (0)
;
690#endif // CAN_SANITIZE_LEAKS
691}
692
693SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
694void __lsan_unregister_root_region(const void *begin, uptr size) {
695#if CAN_SANITIZE_LEAKS1
696 BlockingMutexLock l(&global_mutex);
697 CHECK(root_regions)do { __sanitizer::u64 v1 = (u64)((root_regions)); __sanitizer
::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)
) __sanitizer::CheckFailed("/tmp/buildd/llvm-toolchain-snapshot-5.0~svn298304/projects/compiler-rt/lib/lsan/lsan_common.cc"
, 697, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); }
while (false)
;
698 bool removed = false;
699 for (uptr i = 0; i < root_regions->size(); i++) {
700 RootRegion region = (*root_regions)[i];
701 if (region.begin == begin && region.size == size) {
702 removed = true;
703 uptr last_index = root_regions->size() - 1;
704 (*root_regions)[i] = (*root_regions)[last_index];
705 root_regions->pop_back();
706 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Unregistered root region at %p of size %llu\n"
, begin, size); } while (0)
;
707 break;
708 }
709 }
710 if (!removed) {
711 Report(
712 "__lsan_unregister_root_region(): region at %p of size %llu has not "
713 "been registered.\n",
714 begin, size);
715 Die();
716 }
717#endif // CAN_SANITIZE_LEAKS
718}
719
720SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
721void __lsan_disable() {
722#if CAN_SANITIZE_LEAKS1
723 __lsan::DisableInThisThread();
724#endif
725}
726
727SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
728void __lsan_enable() {
729#if CAN_SANITIZE_LEAKS1
730 __lsan::EnableInThisThread();
731#endif
732}
733
734SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
735void __lsan_do_leak_check() {
736#if CAN_SANITIZE_LEAKS1
737 if (common_flags()->detect_leaks)
738 __lsan::DoLeakCheck();
739#endif // CAN_SANITIZE_LEAKS
740}
741
742SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default")))
743int __lsan_do_recoverable_leak_check() {
744#if CAN_SANITIZE_LEAKS1
745 if (common_flags()->detect_leaks)
746 return __lsan::DoRecoverableLeakCheck();
747#endif // CAN_SANITIZE_LEAKS
748 return 0;
749}
750
751#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
752SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak))
753int __lsan_is_turned_off() {
754 return 0;
755}
756
757SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak))
758const char *__lsan_default_suppressions() {
759 return "";
760}
761#endif
762} // extern "C"