File: | projects/compiler-rt/lib/lsan/lsan_common.cc |
Warning: | line 647, column 3 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //=-- lsan_common.cc ------------------------------------------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file is a part of LeakSanitizer. | |||
10 | // Implementation of common leak checking functionality. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "lsan_common.h" | |||
15 | ||||
16 | #include "sanitizer_common/sanitizer_common.h" | |||
17 | #include "sanitizer_common/sanitizer_flag_parser.h" | |||
18 | #include "sanitizer_common/sanitizer_flags.h" | |||
19 | #include "sanitizer_common/sanitizer_placement_new.h" | |||
20 | #include "sanitizer_common/sanitizer_procmaps.h" | |||
21 | #include "sanitizer_common/sanitizer_report_decorator.h" | |||
22 | #include "sanitizer_common/sanitizer_stackdepot.h" | |||
23 | #include "sanitizer_common/sanitizer_stacktrace.h" | |||
24 | #include "sanitizer_common/sanitizer_suppressions.h" | |||
25 | #include "sanitizer_common/sanitizer_thread_registry.h" | |||
26 | #include "sanitizer_common/sanitizer_tls_get_addr.h" | |||
27 | ||||
28 | #if CAN_SANITIZE_LEAKS1 | |||
29 | namespace __lsan { | |||
30 | ||||
31 | // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and | |||
32 | // also to protect the global list of root regions. | |||
33 | BlockingMutex global_mutex(LINKER_INITIALIZED); | |||
34 | ||||
35 | Flags lsan_flags; | |||
36 | ||||
37 | void DisableCounterUnderflow() { | |||
38 | if (common_flags()->detect_leaks) { | |||
39 | Report("Unmatched call to __lsan_enable().\n"); | |||
40 | Die(); | |||
41 | } | |||
42 | } | |||
43 | ||||
44 | void Flags::SetDefaults() { | |||
45 | #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; | |||
46 | #include "lsan_flags.inc" | |||
47 | #undef LSAN_FLAG | |||
48 | } | |||
49 | ||||
50 | void RegisterLsanFlags(FlagParser *parser, Flags *f) { | |||
51 | #define LSAN_FLAG(Type, Name, DefaultValue, Description) \ | |||
52 | RegisterFlag(parser, #Name, Description, &f->Name); | |||
53 | #include "lsan_flags.inc" | |||
54 | #undef LSAN_FLAG | |||
55 | } | |||
56 | ||||
57 | #define LOG_POINTERS(...)do { if (flags()->log_pointers) Report(...); } while (0) \ | |||
58 | do { \ | |||
59 | if (flags()->log_pointers) Report(__VA_ARGS__); \ | |||
60 | } while (0) | |||
61 | ||||
62 | #define LOG_THREADS(...)do { if (flags()->log_threads) Report(...); } while (0) \ | |||
63 | do { \ | |||
64 | if (flags()->log_threads) Report(__VA_ARGS__); \ | |||
65 | } while (0) | |||
66 | ||||
67 | ALIGNED(64)__attribute__((aligned(64))) static char suppression_placeholder[sizeof(SuppressionContext)]; | |||
68 | static SuppressionContext *suppression_ctx = nullptr; | |||
69 | static const char kSuppressionLeak[] = "leak"; | |||
70 | static const char *kSuppressionTypes[] = { kSuppressionLeak }; | |||
71 | static const char kStdSuppressions[] = | |||
72 | #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT0 | |||
73 | // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT | |||
74 | // definition. | |||
75 | "leak:*pthread_exit*\n" | |||
76 | #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT | |||
77 | #if SANITIZER_MAC0 | |||
78 | // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 | |||
79 | "leak:*_os_trace*\n" | |||
80 | #endif | |||
81 | // TLS leak in some glibc versions, described in | |||
82 | // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. | |||
83 | "leak:*tls_get_addr*\n"; | |||
84 | ||||
85 | void InitializeSuppressions() { | |||
86 | CHECK_EQ(nullptr, suppression_ctx)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nullptr)); __sanitizer ::u64 v2 = (__sanitizer::u64)((suppression_ctx)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 86, "(" "(nullptr)" ") " "==" " (" "(suppression_ctx)" ")", v1, v2); } while (false); | |||
87 | suppression_ctx = new (suppression_placeholder) // NOLINT | |||
88 | SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)(sizeof(kSuppressionTypes)/sizeof((kSuppressionTypes)[0]))); | |||
89 | suppression_ctx->ParseFromFile(flags()->suppressions); | |||
90 | if (&__lsan_default_suppressions) | |||
91 | suppression_ctx->Parse(__lsan_default_suppressions()); | |||
92 | suppression_ctx->Parse(kStdSuppressions); | |||
93 | } | |||
94 | ||||
95 | static SuppressionContext *GetSuppressionContext() { | |||
96 | CHECK(suppression_ctx)do { __sanitizer::u64 v1 = (__sanitizer::u64)((suppression_ctx )); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 96, "(" "(suppression_ctx)" ") " "!=" " (" "0" ")", v1, v2) ; } while (false); | |||
97 | return suppression_ctx; | |||
98 | } | |||
99 | ||||
100 | static InternalMmapVector<RootRegion> *root_regions; | |||
101 | ||||
102 | InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } | |||
103 | ||||
104 | void InitializeRootRegions() { | |||
105 | CHECK(!root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!root_regions) ); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 105, "(" "(!root_regions)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
106 | ALIGNED(64)__attribute__((aligned(64))) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; | |||
107 | root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT | |||
108 | } | |||
109 | ||||
110 | const char *MaybeCallLsanDefaultOptions() { | |||
111 | return (&__lsan_default_options) ? __lsan_default_options() : ""; | |||
112 | } | |||
113 | ||||
114 | void InitCommonLsan() { | |||
115 | InitializeRootRegions(); | |||
116 | if (common_flags()->detect_leaks) { | |||
117 | // Initialization which can fail or print warnings should only be done if | |||
118 | // LSan is actually enabled. | |||
119 | InitializeSuppressions(); | |||
120 | InitializePlatformSpecificModules(); | |||
121 | } | |||
122 | } | |||
123 | ||||
124 | class Decorator: public __sanitizer::SanitizerCommonDecorator { | |||
125 | public: | |||
126 | Decorator() : SanitizerCommonDecorator() { } | |||
127 | const char *Error() { return Red(); } | |||
128 | const char *Leak() { return Blue(); } | |||
129 | }; | |||
130 | ||||
131 | static inline bool CanBeAHeapPointer(uptr p) { | |||
132 | // Since our heap is located in mmap-ed memory, we can assume a sensible lower | |||
133 | // bound on heap addresses. | |||
134 | const uptr kMinAddress = 4 * 4096; | |||
135 | if (p < kMinAddress) return false; | |||
136 | #if defined(__x86_64__) | |||
137 | // Accept only canonical form user-space addresses. | |||
138 | return ((p >> 47) == 0); | |||
139 | #elif defined(__mips64) | |||
140 | return ((p >> 40) == 0); | |||
141 | #elif defined(__aarch64__) | |||
142 | unsigned runtimeVMA = | |||
143 | (MostSignificantSetBitIndex(GET_CURRENT_FRAME()(__sanitizer::uptr) __builtin_frame_address(0)) + 1); | |||
144 | return ((p >> runtimeVMA) == 0); | |||
145 | #else | |||
146 | return true; | |||
147 | #endif | |||
148 | } | |||
149 | ||||
150 | // Scans the memory range, looking for byte patterns that point into allocator | |||
151 | // chunks. Marks those chunks with |tag| and adds them to |frontier|. | |||
152 | // There are two usage modes for this function: finding reachable chunks | |||
153 | // (|tag| = kReachable) and finding indirectly leaked chunks | |||
154 | // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, | |||
155 | // so |frontier| = 0. | |||
156 | void ScanRangeForPointers(uptr begin, uptr end, | |||
157 | Frontier *frontier, | |||
158 | const char *region_type, ChunkTag tag) { | |||
159 | CHECK(tag == kReachable || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tag == kReachable || tag == kIndirectlyLeaked)); __sanitizer::u64 v2 = (__sanitizer ::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 159, "(" "(tag == kReachable || tag == kIndirectlyLeaked)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
160 | const uptr alignment = flags()->pointer_alignment(); | |||
161 | LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end)do { if (flags()->log_pointers) Report("Scanning %s range %p-%p.\n" , region_type, begin, end); } while (0); | |||
162 | uptr pp = begin; | |||
163 | if (pp % alignment) | |||
164 | pp = pp + alignment - pp % alignment; | |||
165 | for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT | |||
166 | void *p = *reinterpret_cast<void **>(pp); | |||
167 | if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; | |||
168 | uptr chunk = PointsIntoChunk(p); | |||
169 | if (!chunk) continue; | |||
170 | // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. | |||
171 | if (chunk == begin) continue; | |||
172 | LsanMetadata m(chunk); | |||
173 | if (m.tag() == kReachable || m.tag() == kIgnored) continue; | |||
174 | ||||
175 | // Do this check relatively late so we can log only the interesting cases. | |||
176 | if (!flags()->use_poisoned && WordIsPoisoned(pp)) { | |||
177 | LOG_POINTERS(do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size ()); } while (0) | |||
178 | "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size ()); } while (0) | |||
179 | "%zu.\n",do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size ()); } while (0) | |||
180 | pp, p, chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("%p is poisoned: ignoring %p pointing into chunk %p-%p of size " "%zu.\n", pp, p, chunk, chunk + m.requested_size(), m.requested_size ()); } while (0); | |||
181 | continue; | |||
182 | } | |||
183 | ||||
184 | m.set_tag(tag); | |||
185 | LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n" , pp, p, chunk, chunk + m.requested_size(), m.requested_size( )); } while (0) | |||
186 | chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %zu.\n" , pp, p, chunk, chunk + m.requested_size(), m.requested_size( )); } while (0); | |||
187 | if (frontier) | |||
188 | frontier->push_back(chunk); | |||
189 | } | |||
190 | } | |||
191 | ||||
192 | // Scans a global range for pointers | |||
193 | void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { | |||
194 | uptr allocator_begin = 0, allocator_end = 0; | |||
195 | GetAllocatorGlobalRange(&allocator_begin, &allocator_end); | |||
196 | if (begin <= allocator_begin && allocator_begin < end) { | |||
197 | CHECK_LE(allocator_begin, allocator_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator_begin )); __sanitizer::u64 v2 = (__sanitizer::u64)((allocator_end)) ; if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 197, "(" "(allocator_begin)" ") " "<=" " (" "(allocator_end)" ")", v1, v2); } while (false); | |||
198 | CHECK_LE(allocator_end, end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator_end) ); __sanitizer::u64 v2 = (__sanitizer::u64)((end)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 198, "(" "(allocator_end)" ") " "<=" " (" "(end)" ")", v1 , v2); } while (false); | |||
199 | if (begin < allocator_begin) | |||
200 | ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", | |||
201 | kReachable); | |||
202 | if (allocator_end < end) | |||
203 | ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); | |||
204 | } else { | |||
205 | ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); | |||
206 | } | |||
207 | } | |||
208 | ||||
209 | void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { | |||
210 | Frontier *frontier = reinterpret_cast<Frontier *>(arg); | |||
211 | ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); | |||
212 | } | |||
213 | ||||
214 | // Scans thread data (stacks and TLS) for heap pointers. | |||
215 | static void ProcessThreads(SuspendedThreadsList const &suspended_threads, | |||
216 | Frontier *frontier) { | |||
217 | InternalMmapVector<uptr> registers(suspended_threads.RegisterCount()); | |||
218 | uptr registers_begin = reinterpret_cast<uptr>(registers.data()); | |||
219 | uptr registers_end = | |||
220 | reinterpret_cast<uptr>(registers.data() + registers.size()); | |||
221 | for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { | |||
222 | tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); | |||
223 | LOG_THREADS("Processing thread %d.\n", os_id)do { if (flags()->log_threads) Report("Processing thread %d.\n" , os_id); } while (0); | |||
224 | uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; | |||
225 | DTLS *dtls; | |||
226 | bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, | |||
227 | &tls_begin, &tls_end, | |||
228 | &cache_begin, &cache_end, &dtls); | |||
229 | if (!thread_found) { | |||
230 | // If a thread can't be found in the thread registry, it's probably in the | |||
231 | // process of destruction. Log this event and move on. | |||
232 | LOG_THREADS("Thread %d not found in registry.\n", os_id)do { if (flags()->log_threads) Report("Thread %d not found in registry.\n" , os_id); } while (0); | |||
233 | continue; | |||
234 | } | |||
235 | uptr sp; | |||
236 | PtraceRegistersStatus have_registers = | |||
237 | suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); | |||
238 | if (have_registers != REGISTERS_AVAILABLE) { | |||
239 | Report("Unable to get registers from thread %d.\n", os_id); | |||
240 | // If unable to get SP, consider the entire stack to be reachable unless | |||
241 | // GetRegistersAndSP failed with ESRCH. | |||
242 | if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; | |||
243 | sp = stack_begin; | |||
244 | } | |||
245 | ||||
246 | if (flags()->use_registers && have_registers) | |||
247 | ScanRangeForPointers(registers_begin, registers_end, frontier, | |||
248 | "REGISTERS", kReachable); | |||
249 | ||||
250 | if (flags()->use_stacks) { | |||
251 | LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp)do { if (flags()->log_threads) Report("Stack at %p-%p (SP = %p).\n" , stack_begin, stack_end, sp); } while (0); | |||
252 | if (sp < stack_begin || sp >= stack_end) { | |||
253 | // SP is outside the recorded stack range (e.g. the thread is running a | |||
254 | // signal handler on alternate stack, or swapcontext was used). | |||
255 | // Again, consider the entire stack range to be reachable. | |||
256 | LOG_THREADS("WARNING: stack pointer not in stack range.\n")do { if (flags()->log_threads) Report("WARNING: stack pointer not in stack range.\n" ); } while (0); | |||
257 | uptr page_size = GetPageSizeCached(); | |||
258 | int skipped = 0; | |||
259 | while (stack_begin < stack_end && | |||
260 | !IsAccessibleMemoryRange(stack_begin, 1)) { | |||
261 | skipped++; | |||
262 | stack_begin += page_size; | |||
263 | } | |||
264 | LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n" , skipped, stack_begin, stack_end); } while (0) | |||
265 | skipped, stack_begin, stack_end)do { if (flags()->log_threads) Report("Skipped %d guard page(s) to obtain stack %p-%p.\n" , skipped, stack_begin, stack_end); } while (0); | |||
266 | } else { | |||
267 | // Shrink the stack range to ignore out-of-scope values. | |||
268 | stack_begin = sp; | |||
269 | } | |||
270 | ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", | |||
271 | kReachable); | |||
272 | ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); | |||
273 | } | |||
274 | ||||
275 | if (flags()->use_tls) { | |||
276 | if (tls_begin) { | |||
277 | LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end)do { if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin , tls_end); } while (0); | |||
278 | // If the tls and cache ranges don't overlap, scan full tls range, | |||
279 | // otherwise, only scan the non-overlapping portions | |||
280 | if (cache_begin == cache_end || tls_end < cache_begin || | |||
281 | tls_begin > cache_end) { | |||
282 | ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); | |||
283 | } else { | |||
284 | if (tls_begin < cache_begin) | |||
285 | ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", | |||
286 | kReachable); | |||
287 | if (tls_end > cache_end) | |||
288 | ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", | |||
289 | kReachable); | |||
290 | } | |||
291 | } | |||
292 | if (dtls && !DTLSInDestruction(dtls)) { | |||
293 | for (uptr j = 0; j < dtls->dtv_size; ++j) { | |||
294 | uptr dtls_beg = dtls->dtv[j].beg; | |||
295 | uptr dtls_end = dtls_beg + dtls->dtv[j].size; | |||
296 | if (dtls_beg < dtls_end) { | |||
297 | LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end)do { if (flags()->log_threads) Report("DTLS %zu at %p-%p.\n" , j, dtls_beg, dtls_end); } while (0); | |||
298 | ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", | |||
299 | kReachable); | |||
300 | } | |||
301 | } | |||
302 | } else { | |||
303 | // We are handling a thread with DTLS under destruction. Log about | |||
304 | // this and continue. | |||
305 | LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id)do { if (flags()->log_threads) Report("Thread %d has DTLS under destruction.\n" , os_id); } while (0); | |||
306 | } | |||
307 | } | |||
308 | } | |||
309 | } | |||
310 | ||||
311 | void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, | |||
312 | uptr region_begin, uptr region_end, bool is_readable) { | |||
313 | uptr intersection_begin = Max(root_region.begin, region_begin); | |||
314 | uptr intersection_end = Min(region_end, root_region.begin + root_region.size); | |||
315 | if (intersection_begin >= intersection_end) return; | |||
316 | LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n" , root_region.begin, root_region.begin + root_region.size, region_begin , region_end, is_readable ? "readable" : "unreadable"); } while (0) | |||
317 | root_region.begin, root_region.begin + root_region.size,do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n" , root_region.begin, root_region.begin + root_region.size, region_begin , region_end, is_readable ? "readable" : "unreadable"); } while (0) | |||
318 | region_begin, region_end,do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n" , root_region.begin, root_region.begin + root_region.size, region_begin , region_end, is_readable ? "readable" : "unreadable"); } while (0) | |||
319 | is_readable ? "readable" : "unreadable")do { if (flags()->log_pointers) Report("Root region %p-%p intersects with mapped region %p-%p (%s)\n" , root_region.begin, root_region.begin + root_region.size, region_begin , region_end, is_readable ? "readable" : "unreadable"); } while (0); | |||
320 | if (is_readable) | |||
321 | ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", | |||
322 | kReachable); | |||
323 | } | |||
324 | ||||
325 | static void ProcessRootRegion(Frontier *frontier, | |||
326 | const RootRegion &root_region) { | |||
327 | MemoryMappingLayout proc_maps(/*cache_enabled*/ true); | |||
328 | MemoryMappedSegment segment; | |||
329 | while (proc_maps.Next(&segment)) { | |||
330 | ScanRootRegion(frontier, root_region, segment.start, segment.end, | |||
331 | segment.IsReadable()); | |||
332 | } | |||
333 | } | |||
334 | ||||
335 | // Scans root regions for heap pointers. | |||
336 | static void ProcessRootRegions(Frontier *frontier) { | |||
337 | if (!flags()->use_root_regions) return; | |||
338 | CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions)) ; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 338, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
339 | for (uptr i = 0; i < root_regions->size(); i++) { | |||
340 | ProcessRootRegion(frontier, (*root_regions)[i]); | |||
341 | } | |||
342 | } | |||
343 | ||||
344 | static void FloodFillTag(Frontier *frontier, ChunkTag tag) { | |||
345 | while (frontier->size()) { | |||
346 | uptr next_chunk = frontier->back(); | |||
347 | frontier->pop_back(); | |||
348 | LsanMetadata m(next_chunk); | |||
349 | ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, | |||
350 | "HEAP", tag); | |||
351 | } | |||
352 | } | |||
353 | ||||
354 | // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks | |||
355 | // which are reachable from it as indirectly leaked. | |||
356 | static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { | |||
357 | chunk = GetUserBegin(chunk); | |||
358 | LsanMetadata m(chunk); | |||
359 | if (m.allocated() && m.tag() != kReachable) { | |||
360 | ScanRangeForPointers(chunk, chunk + m.requested_size(), | |||
361 | /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); | |||
362 | } | |||
363 | } | |||
364 | ||||
365 | // ForEachChunk callback. If chunk is marked as ignored, adds its address to | |||
366 | // frontier. | |||
367 | static void CollectIgnoredCb(uptr chunk, void *arg) { | |||
368 | CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 368, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | |||
369 | chunk = GetUserBegin(chunk); | |||
370 | LsanMetadata m(chunk); | |||
371 | if (m.allocated() && m.tag() == kIgnored) { | |||
372 | LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n" , chunk, chunk + m.requested_size(), m.requested_size()); } while (0) | |||
373 | chunk, chunk + m.requested_size(), m.requested_size())do { if (flags()->log_pointers) Report("Ignored: chunk %p-%p of size %zu.\n" , chunk, chunk + m.requested_size(), m.requested_size()); } while (0); | |||
374 | reinterpret_cast<Frontier *>(arg)->push_back(chunk); | |||
375 | } | |||
376 | } | |||
377 | ||||
378 | static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { | |||
379 | CHECK(stack_id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack_id)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 379, "(" "(stack_id)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
380 | StackTrace stack = map->Get(stack_id); | |||
381 | // The top frame is our malloc/calloc/etc. The next frame is the caller. | |||
382 | if (stack.size >= 2) | |||
383 | return stack.trace[1]; | |||
384 | return 0; | |||
385 | } | |||
386 | ||||
387 | struct InvalidPCParam { | |||
388 | Frontier *frontier; | |||
389 | StackDepotReverseMap *stack_depot_reverse_map; | |||
390 | bool skip_linker_allocations; | |||
391 | }; | |||
392 | ||||
393 | // ForEachChunk callback. If the caller pc is invalid or is within the linker, | |||
394 | // mark as reachable. Called by ProcessPlatformSpecificAllocations. | |||
395 | static void MarkInvalidPCCb(uptr chunk, void *arg) { | |||
396 | CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 396, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | |||
397 | InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg); | |||
398 | chunk = GetUserBegin(chunk); | |||
399 | LsanMetadata m(chunk); | |||
400 | if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { | |||
401 | u32 stack_id = m.stack_trace_id(); | |||
402 | uptr caller_pc = 0; | |||
403 | if (stack_id > 0) | |||
404 | caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); | |||
405 | // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark | |||
406 | // it as reachable, as we can't properly report its allocation stack anyway. | |||
407 | if (caller_pc == 0 || (param->skip_linker_allocations && | |||
408 | GetLinker()->containsAddress(caller_pc))) { | |||
409 | m.set_tag(kReachable); | |||
410 | param->frontier->push_back(chunk); | |||
411 | } | |||
412 | } | |||
413 | } | |||
414 | ||||
415 | // On Linux, treats all chunks allocated from ld-linux.so as reachable, which | |||
416 | // covers dynamically allocated TLS blocks, internal dynamic loader's loaded | |||
417 | // modules accounting etc. | |||
418 | // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. | |||
419 | // They are allocated with a __libc_memalign() call in allocate_and_init() | |||
420 | // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those | |||
421 | // blocks, but we can make sure they come from our own allocator by intercepting | |||
422 | // __libc_memalign(). On top of that, there is no easy way to reach them. Their | |||
423 | // addresses are stored in a dynamically allocated array (the DTV) which is | |||
424 | // referenced from the static TLS. Unfortunately, we can't just rely on the DTV | |||
425 | // being reachable from the static TLS, and the dynamic TLS being reachable from | |||
426 | // the DTV. This is because the initial DTV is allocated before our interception | |||
427 | // mechanism kicks in, and thus we don't recognize it as allocated memory. We | |||
428 | // can't special-case it either, since we don't know its size. | |||
429 | // Our solution is to include in the root set all allocations made from | |||
430 | // ld-linux.so (which is where allocate_and_init() is implemented). This is | |||
431 | // guaranteed to include all dynamic TLS blocks (and possibly other allocations | |||
432 | // which we don't care about). | |||
433 | // On all other platforms, this simply checks to ensure that the caller pc is | |||
434 | // valid before reporting chunks as leaked. | |||
435 | void ProcessPC(Frontier *frontier) { | |||
436 | StackDepotReverseMap stack_depot_reverse_map; | |||
437 | InvalidPCParam arg; | |||
438 | arg.frontier = frontier; | |||
439 | arg.stack_depot_reverse_map = &stack_depot_reverse_map; | |||
440 | arg.skip_linker_allocations = | |||
441 | flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; | |||
442 | ForEachChunk(MarkInvalidPCCb, &arg); | |||
443 | } | |||
444 | ||||
445 | // Sets the appropriate tag on each chunk. | |||
446 | static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { | |||
447 | // Holds the flood fill frontier. | |||
448 | Frontier frontier; | |||
449 | ||||
450 | ForEachChunk(CollectIgnoredCb, &frontier); | |||
451 | ProcessGlobalRegions(&frontier); | |||
452 | ProcessThreads(suspended_threads, &frontier); | |||
453 | ProcessRootRegions(&frontier); | |||
454 | FloodFillTag(&frontier, kReachable); | |||
455 | ||||
456 | CHECK_EQ(0, frontier.size())do { __sanitizer::u64 v1 = (__sanitizer::u64)((0)); __sanitizer ::u64 v2 = (__sanitizer::u64)((frontier.size())); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 456, "(" "(0)" ") " "==" " (" "(frontier.size())" ")", v1, v2 ); } while (false); | |||
457 | ProcessPC(&frontier); | |||
458 | ||||
459 | // The check here is relatively expensive, so we do this in a separate flood | |||
460 | // fill. That way we can skip the check for chunks that are reachable | |||
461 | // otherwise. | |||
462 | LOG_POINTERS("Processing platform-specific allocations.\n")do { if (flags()->log_pointers) Report("Processing platform-specific allocations.\n" ); } while (0); | |||
463 | ProcessPlatformSpecificAllocations(&frontier); | |||
464 | FloodFillTag(&frontier, kReachable); | |||
465 | ||||
466 | // Iterate over leaked chunks and mark those that are reachable from other | |||
467 | // leaked chunks. | |||
468 | LOG_POINTERS("Scanning leaked chunks.\n")do { if (flags()->log_pointers) Report("Scanning leaked chunks.\n" ); } while (0); | |||
469 | ForEachChunk(MarkIndirectlyLeakedCb, nullptr); | |||
470 | } | |||
471 | ||||
472 | // ForEachChunk callback. Resets the tags to pre-leak-check state. | |||
473 | static void ResetTagsCb(uptr chunk, void *arg) { | |||
474 | (void)arg; | |||
475 | chunk = GetUserBegin(chunk); | |||
476 | LsanMetadata m(chunk); | |||
477 | if (m.allocated() && m.tag() != kIgnored) | |||
478 | m.set_tag(kDirectlyLeaked); | |||
479 | } | |||
480 | ||||
481 | static void PrintStackTraceById(u32 stack_trace_id) { | |||
482 | CHECK(stack_trace_id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack_trace_id )); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 482, "(" "(stack_trace_id)" ") " "!=" " (" "0" ")", v1, v2) ; } while (false); | |||
483 | StackDepotGet(stack_trace_id).Print(); | |||
484 | } | |||
485 | ||||
486 | // ForEachChunk callback. Aggregates information about unreachable chunks into | |||
487 | // a LeakReport. | |||
488 | static void CollectLeaksCb(uptr chunk, void *arg) { | |||
489 | CHECK(arg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((arg)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 489, "(" "(arg)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | |||
490 | LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); | |||
491 | chunk = GetUserBegin(chunk); | |||
492 | LsanMetadata m(chunk); | |||
493 | if (!m.allocated()) return; | |||
494 | if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { | |||
495 | u32 resolution = flags()->resolution; | |||
496 | u32 stack_trace_id = 0; | |||
497 | if (resolution > 0) { | |||
498 | StackTrace stack = StackDepotGet(m.stack_trace_id()); | |||
499 | stack.size = Min(stack.size, resolution); | |||
500 | stack_trace_id = StackDepotPut(stack); | |||
501 | } else { | |||
502 | stack_trace_id = m.stack_trace_id(); | |||
503 | } | |||
504 | leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), | |||
505 | m.tag()); | |||
506 | } | |||
507 | } | |||
508 | ||||
509 | static void PrintMatchedSuppressions() { | |||
510 | InternalMmapVector<Suppression *> matched; | |||
511 | GetSuppressionContext()->GetMatched(&matched); | |||
512 | if (!matched.size()) | |||
513 | return; | |||
514 | const char *line = "-----------------------------------------------------"; | |||
515 | Printf("%s\n", line); | |||
516 | Printf("Suppressions used:\n"); | |||
517 | Printf(" count bytes template\n"); | |||
518 | for (uptr i = 0; i < matched.size(); i++) | |||
519 | Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed( | |||
520 | &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); | |||
521 | Printf("%s\n\n", line); | |||
522 | } | |||
523 | ||||
524 | struct CheckForLeaksParam { | |||
525 | bool success; | |||
526 | LeakReport leak_report; | |||
527 | }; | |||
528 | ||||
529 | static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { | |||
530 | const InternalMmapVector<tid_t> &suspended_threads = | |||
531 | *(const InternalMmapVector<tid_t> *)arg; | |||
532 | if (tctx->status == ThreadStatusRunning) { | |||
533 | uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), | |||
534 | tctx->os_id, CompareLess<int>()); | |||
535 | if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) | |||
536 | Report("Running thread %d was not suspended. False leaks are possible.\n", | |||
537 | tctx->os_id); | |||
538 | }; | |||
539 | } | |||
540 | ||||
541 | static void ReportUnsuspendedThreads( | |||
542 | const SuspendedThreadsList &suspended_threads) { | |||
543 | InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); | |||
544 | for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) | |||
545 | threads[i] = suspended_threads.GetThreadID(i); | |||
546 | ||||
547 | Sort(threads.data(), threads.size()); | |||
548 | ||||
549 | GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( | |||
550 | &ReportIfNotSuspended, &threads); | |||
551 | } | |||
552 | ||||
553 | static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, | |||
554 | void *arg) { | |||
555 | CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); | |||
556 | CHECK(param)do { __sanitizer::u64 v1 = (__sanitizer::u64)((param)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 556, "(" "(param)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
557 | CHECK(!param->success)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!param->success )); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 557, "(" "(!param->success)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
558 | ReportUnsuspendedThreads(suspended_threads); | |||
559 | ClassifyAllChunks(suspended_threads); | |||
560 | ForEachChunk(CollectLeaksCb, ¶m->leak_report); | |||
561 | // Clean up for subsequent leak checks. This assumes we did not overwrite any | |||
562 | // kIgnored tags. | |||
563 | ForEachChunk(ResetTagsCb, nullptr); | |||
564 | param->success = true; | |||
565 | } | |||
566 | ||||
567 | static bool CheckForLeaks() { | |||
568 | if (&__lsan_is_turned_off && __lsan_is_turned_off()) | |||
569 | return false; | |||
570 | EnsureMainThreadIDIsCorrect(); | |||
571 | CheckForLeaksParam param; | |||
572 | param.success = false; | |||
573 | LockThreadRegistry(); | |||
574 | LockAllocator(); | |||
575 | DoStopTheWorld(CheckForLeaksCallback, ¶m); | |||
576 | UnlockAllocator(); | |||
577 | UnlockThreadRegistry(); | |||
578 | ||||
579 | if (!param.success) { | |||
580 | Report("LeakSanitizer has encountered a fatal error.\n"); | |||
581 | Report( | |||
582 | "HINT: For debugging, try setting environment variable " | |||
583 | "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); | |||
584 | Report( | |||
585 | "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); | |||
586 | Die(); | |||
587 | } | |||
588 | param.leak_report.ApplySuppressions(); | |||
589 | uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); | |||
590 | if (unsuppressed_count > 0) { | |||
591 | Decorator d; | |||
592 | Printf("\n" | |||
593 | "=================================================================" | |||
594 | "\n"); | |||
595 | Printf("%s", d.Error()); | |||
596 | Report("ERROR: LeakSanitizer: detected memory leaks\n"); | |||
597 | Printf("%s", d.Default()); | |||
598 | param.leak_report.ReportTopLeaks(flags()->max_leaks); | |||
599 | } | |||
600 | if (common_flags()->print_suppressions) | |||
601 | PrintMatchedSuppressions(); | |||
602 | if (unsuppressed_count > 0) { | |||
603 | param.leak_report.PrintSummary(); | |||
604 | return true; | |||
605 | } | |||
606 | return false; | |||
607 | } | |||
608 | ||||
609 | static bool has_reported_leaks = false; | |||
610 | bool HasReportedLeaks() { return has_reported_leaks; } | |||
611 | ||||
612 | void DoLeakCheck() { | |||
613 | BlockingMutexLock l(&global_mutex); | |||
614 | static bool already_done; | |||
615 | if (already_done) return; | |||
616 | already_done = true; | |||
617 | has_reported_leaks = CheckForLeaks(); | |||
618 | if (has_reported_leaks) HandleLeaks(); | |||
619 | } | |||
620 | ||||
621 | static int DoRecoverableLeakCheck() { | |||
622 | BlockingMutexLock l(&global_mutex); | |||
623 | bool have_leaks = CheckForLeaks(); | |||
624 | return have_leaks ? 1 : 0; | |||
625 | } | |||
626 | ||||
627 | void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } | |||
628 | ||||
629 | static Suppression *GetSuppressionForAddr(uptr addr) { | |||
630 | Suppression *s = nullptr; | |||
631 | ||||
632 | // Suppress by module name. | |||
633 | SuppressionContext *suppressions = GetSuppressionContext(); | |||
634 | if (const char *module_name = | |||
| ||||
635 | Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) | |||
636 | if (suppressions->Match(module_name, kSuppressionLeak, &s)) | |||
637 | return s; | |||
638 | ||||
639 | // Suppress by file or function name. | |||
640 | SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); | |||
641 | for (SymbolizedStack *cur = frames; cur; cur = cur->next) { | |||
642 | if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || | |||
643 | suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { | |||
644 | break; | |||
645 | } | |||
646 | } | |||
647 | frames->ClearAll(); | |||
| ||||
648 | return s; | |||
649 | } | |||
650 | ||||
651 | static Suppression *GetSuppressionForStack(u32 stack_trace_id) { | |||
652 | StackTrace stack = StackDepotGet(stack_trace_id); | |||
653 | for (uptr i = 0; i < stack.size; i++) { | |||
654 | Suppression *s = GetSuppressionForAddr( | |||
655 | StackTrace::GetPreviousInstructionPc(stack.trace[i])); | |||
656 | if (s) return s; | |||
657 | } | |||
658 | return nullptr; | |||
659 | } | |||
660 | ||||
661 | ///// LeakReport implementation. ///// | |||
662 | ||||
663 | // A hard limit on the number of distinct leaks, to avoid quadratic complexity | |||
664 | // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks | |||
665 | // in real-world applications. | |||
666 | // FIXME: Get rid of this limit by changing the implementation of LeakReport to | |||
667 | // use a hash table. | |||
668 | const uptr kMaxLeaksConsidered = 5000; | |||
669 | ||||
670 | void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, | |||
671 | uptr leaked_size, ChunkTag tag) { | |||
672 | CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tag == kDirectlyLeaked || tag == kIndirectlyLeaked)); __sanitizer::u64 v2 = (__sanitizer ::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 672, "(" "(tag == kDirectlyLeaked || tag == kIndirectlyLeaked)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
673 | bool is_directly_leaked = (tag == kDirectlyLeaked); | |||
674 | uptr i; | |||
675 | for (i = 0; i < leaks_.size(); i++) { | |||
676 | if (leaks_[i].stack_trace_id == stack_trace_id && | |||
677 | leaks_[i].is_directly_leaked == is_directly_leaked) { | |||
678 | leaks_[i].hit_count++; | |||
679 | leaks_[i].total_size += leaked_size; | |||
680 | break; | |||
681 | } | |||
682 | } | |||
683 | if (i == leaks_.size()) { | |||
684 | if (leaks_.size() == kMaxLeaksConsidered) return; | |||
685 | Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, | |||
686 | is_directly_leaked, /* is_suppressed */ false }; | |||
687 | leaks_.push_back(leak); | |||
688 | } | |||
689 | if (flags()->report_objects) { | |||
690 | LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; | |||
691 | leaked_objects_.push_back(obj); | |||
692 | } | |||
693 | } | |||
694 | ||||
695 | static bool LeakComparator(const Leak &leak1, const Leak &leak2) { | |||
696 | if (leak1.is_directly_leaked == leak2.is_directly_leaked) | |||
697 | return leak1.total_size > leak2.total_size; | |||
698 | else | |||
699 | return leak1.is_directly_leaked; | |||
700 | } | |||
701 | ||||
702 | void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { | |||
703 | CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (__sanitizer::u64)((leaks_.size() <= kMaxLeaksConsidered)); __sanitizer::u64 v2 = (__sanitizer::u64 )(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 703, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
704 | Printf("\n"); | |||
705 | if (leaks_.size() == kMaxLeaksConsidered) | |||
706 | Printf("Too many leaks! Only the first %zu leaks encountered will be " | |||
707 | "reported.\n", | |||
708 | kMaxLeaksConsidered); | |||
709 | ||||
710 | uptr unsuppressed_count = UnsuppressedLeakCount(); | |||
711 | if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) | |||
712 | Printf("The %zu top leak(s):\n", num_leaks_to_report); | |||
713 | Sort(leaks_.data(), leaks_.size(), &LeakComparator); | |||
714 | uptr leaks_reported = 0; | |||
715 | for (uptr i = 0; i < leaks_.size(); i++) { | |||
716 | if (leaks_[i].is_suppressed) continue; | |||
717 | PrintReportForLeak(i); | |||
718 | leaks_reported++; | |||
719 | if (leaks_reported == num_leaks_to_report) break; | |||
720 | } | |||
721 | if (leaks_reported < unsuppressed_count) { | |||
722 | uptr remaining = unsuppressed_count - leaks_reported; | |||
723 | Printf("Omitting %zu more leak(s).\n", remaining); | |||
724 | } | |||
725 | } | |||
726 | ||||
727 | void LeakReport::PrintReportForLeak(uptr index) { | |||
728 | Decorator d; | |||
729 | Printf("%s", d.Leak()); | |||
730 | Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", | |||
731 | leaks_[index].is_directly_leaked ? "Direct" : "Indirect", | |||
732 | leaks_[index].total_size, leaks_[index].hit_count); | |||
733 | Printf("%s", d.Default()); | |||
734 | ||||
735 | PrintStackTraceById(leaks_[index].stack_trace_id); | |||
736 | ||||
737 | if (flags()->report_objects) { | |||
738 | Printf("Objects leaked above:\n"); | |||
739 | PrintLeakedObjectsForLeak(index); | |||
740 | Printf("\n"); | |||
741 | } | |||
742 | } | |||
743 | ||||
744 | void LeakReport::PrintLeakedObjectsForLeak(uptr index) { | |||
745 | u32 leak_id = leaks_[index].id; | |||
746 | for (uptr j = 0; j < leaked_objects_.size(); j++) { | |||
747 | if (leaked_objects_[j].leak_id == leak_id) | |||
748 | Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, | |||
749 | leaked_objects_[j].size); | |||
750 | } | |||
751 | } | |||
752 | ||||
753 | void LeakReport::PrintSummary() { | |||
754 | CHECK(leaks_.size() <= kMaxLeaksConsidered)do { __sanitizer::u64 v1 = (__sanitizer::u64)((leaks_.size() <= kMaxLeaksConsidered)); __sanitizer::u64 v2 = (__sanitizer::u64 )(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 754, "(" "(leaks_.size() <= kMaxLeaksConsidered)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
755 | uptr bytes = 0, allocations = 0; | |||
756 | for (uptr i = 0; i < leaks_.size(); i++) { | |||
757 | if (leaks_[i].is_suppressed) continue; | |||
758 | bytes += leaks_[i].total_size; | |||
759 | allocations += leaks_[i].hit_count; | |||
760 | } | |||
761 | InternalScopedString summary(kMaxSummaryLength); | |||
762 | summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, | |||
763 | allocations); | |||
764 | ReportErrorSummary(summary.data()); | |||
765 | } | |||
766 | ||||
767 | void LeakReport::ApplySuppressions() { | |||
768 | for (uptr i = 0; i < leaks_.size(); i++) { | |||
769 | Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); | |||
770 | if (s) { | |||
771 | s->weight += leaks_[i].total_size; | |||
772 | atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + | |||
773 | leaks_[i].hit_count); | |||
774 | leaks_[i].is_suppressed = true; | |||
775 | } | |||
776 | } | |||
777 | } | |||
778 | ||||
779 | uptr LeakReport::UnsuppressedLeakCount() { | |||
780 | uptr result = 0; | |||
781 | for (uptr i = 0; i < leaks_.size(); i++) | |||
782 | if (!leaks_[i].is_suppressed) result++; | |||
783 | return result; | |||
784 | } | |||
785 | ||||
786 | } // namespace __lsan | |||
787 | #else // CAN_SANITIZE_LEAKS | |||
788 | namespace __lsan { | |||
789 | void InitCommonLsan() { } | |||
790 | void DoLeakCheck() { } | |||
791 | void DoRecoverableLeakCheckVoid() { } | |||
792 | void DisableInThisThread() { } | |||
793 | void EnableInThisThread() { } | |||
794 | } | |||
795 | #endif // CAN_SANITIZE_LEAKS | |||
796 | ||||
797 | using namespace __lsan; // NOLINT | |||
798 | ||||
799 | extern "C" { | |||
800 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
801 | void __lsan_ignore_object(const void *p) { | |||
802 | #if CAN_SANITIZE_LEAKS1 | |||
803 | if (!common_flags()->detect_leaks) | |||
804 | return; | |||
805 | // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not | |||
806 | // locked. | |||
807 | BlockingMutexLock l(&global_mutex); | |||
808 | IgnoreObjectResult res = IgnoreObjectLocked(p); | |||
809 | if (res == kIgnoreObjectInvalid) | |||
810 | VReport(1, "__lsan_ignore_object(): no heap object found at %p", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): no heap object found at %p" , p); } while (0); | |||
811 | if (res == kIgnoreObjectAlreadyIgnored) | |||
812 | VReport(1, "__lsan_ignore_object(): "do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): " "heap object at %p is already being ignored\n", p); } while ( 0) | |||
813 | "heap object at %p is already being ignored\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): " "heap object at %p is already being ignored\n", p); } while ( 0); | |||
814 | if (res == kIgnoreObjectSuccess) | |||
815 | VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p)do { if ((uptr)Verbosity() >= (1)) Report("__lsan_ignore_object(): ignoring heap object at %p\n" , p); } while (0); | |||
816 | #endif // CAN_SANITIZE_LEAKS | |||
817 | } | |||
818 | ||||
819 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
820 | void __lsan_register_root_region(const void *begin, uptr size) { | |||
821 | #if CAN_SANITIZE_LEAKS1 | |||
822 | BlockingMutexLock l(&global_mutex); | |||
823 | CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions)) ; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 823, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
824 | RootRegion region = {reinterpret_cast<uptr>(begin), size}; | |||
825 | root_regions->push_back(region); | |||
826 | VReport(1, "Registered root region at %p of size %llu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Registered root region at %p of size %llu\n" , begin, size); } while (0); | |||
827 | #endif // CAN_SANITIZE_LEAKS | |||
828 | } | |||
829 | ||||
830 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
831 | void __lsan_unregister_root_region(const void *begin, uptr size) { | |||
832 | #if CAN_SANITIZE_LEAKS1 | |||
833 | BlockingMutexLock l(&global_mutex); | |||
834 | CHECK(root_regions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((root_regions)) ; __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/lsan/lsan_common.cc" , 834, "(" "(root_regions)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | |||
835 | bool removed = false; | |||
836 | for (uptr i = 0; i < root_regions->size(); i++) { | |||
837 | RootRegion region = (*root_regions)[i]; | |||
838 | if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) { | |||
839 | removed = true; | |||
840 | uptr last_index = root_regions->size() - 1; | |||
841 | (*root_regions)[i] = (*root_regions)[last_index]; | |||
842 | root_regions->pop_back(); | |||
843 | VReport(1, "Unregistered root region at %p of size %llu\n", begin, size)do { if ((uptr)Verbosity() >= (1)) Report("Unregistered root region at %p of size %llu\n" , begin, size); } while (0); | |||
844 | break; | |||
845 | } | |||
846 | } | |||
847 | if (!removed) { | |||
848 | Report( | |||
849 | "__lsan_unregister_root_region(): region at %p of size %llu has not " | |||
850 | "been registered.\n", | |||
851 | begin, size); | |||
852 | Die(); | |||
853 | } | |||
854 | #endif // CAN_SANITIZE_LEAKS | |||
855 | } | |||
856 | ||||
857 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
858 | void __lsan_disable() { | |||
859 | #if CAN_SANITIZE_LEAKS1 | |||
860 | __lsan::DisableInThisThread(); | |||
861 | #endif | |||
862 | } | |||
863 | ||||
864 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
865 | void __lsan_enable() { | |||
866 | #if CAN_SANITIZE_LEAKS1 | |||
867 | __lsan::EnableInThisThread(); | |||
868 | #endif | |||
869 | } | |||
870 | ||||
871 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
872 | void __lsan_do_leak_check() { | |||
873 | #if CAN_SANITIZE_LEAKS1 | |||
874 | if (common_flags()->detect_leaks) | |||
875 | __lsan::DoLeakCheck(); | |||
876 | #endif // CAN_SANITIZE_LEAKS | |||
877 | } | |||
878 | ||||
879 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) | |||
880 | int __lsan_do_recoverable_leak_check() { | |||
881 | #if CAN_SANITIZE_LEAKS1 | |||
882 | if (common_flags()->detect_leaks) | |||
883 | return __lsan::DoRecoverableLeakCheck(); | |||
884 | #endif // CAN_SANITIZE_LEAKS | |||
885 | return 0; | |||
886 | } | |||
887 | ||||
888 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | |||
889 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) | |||
890 | const char * __lsan_default_options() { | |||
891 | return ""; | |||
892 | } | |||
893 | ||||
894 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) | |||
895 | int __lsan_is_turned_off() { | |||
896 | return 0; | |||
897 | } | |||
898 | ||||
899 | SANITIZER_INTERFACE_ATTRIBUTE__attribute__((visibility("default"))) SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) | |||
900 | const char *__lsan_default_suppressions() { | |||
901 | return ""; | |||
902 | } | |||
903 | #endif | |||
904 | } // extern "C" |