File: | compiler-rt/lib/asan/asan_allocator.cpp |
Warning: | line 609, column 7 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- asan_allocator.cpp ------------------------------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file is a part of AddressSanitizer, an address sanity checker. | ||||
10 | // | ||||
11 | // Implementation of ASan's memory allocator, 2-nd version. | ||||
12 | // This variant uses the allocator from sanitizer_common, i.e. the one shared | ||||
13 | // with ThreadSanitizer and MemorySanitizer. | ||||
14 | // | ||||
15 | //===----------------------------------------------------------------------===// | ||||
16 | |||||
17 | #include "asan_allocator.h" | ||||
18 | |||||
19 | #include "asan_mapping.h" | ||||
20 | #include "asan_poisoning.h" | ||||
21 | #include "asan_report.h" | ||||
22 | #include "asan_stack.h" | ||||
23 | #include "asan_thread.h" | ||||
24 | #include "lsan/lsan_common.h" | ||||
25 | #include "sanitizer_common/sanitizer_allocator_checks.h" | ||||
26 | #include "sanitizer_common/sanitizer_allocator_interface.h" | ||||
27 | #include "sanitizer_common/sanitizer_errno.h" | ||||
28 | #include "sanitizer_common/sanitizer_flags.h" | ||||
29 | #include "sanitizer_common/sanitizer_internal_defs.h" | ||||
30 | #include "sanitizer_common/sanitizer_list.h" | ||||
31 | #include "sanitizer_common/sanitizer_quarantine.h" | ||||
32 | #include "sanitizer_common/sanitizer_stackdepot.h" | ||||
33 | |||||
34 | namespace __asan { | ||||
35 | |||||
36 | // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. | ||||
37 | // We use adaptive redzones: for larger allocation larger redzones are used. | ||||
38 | static u32 RZLog2Size(u32 rz_log) { | ||||
39 | CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer ::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(! (v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 39, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while (false); | ||||
40 | return 16 << rz_log; | ||||
41 | } | ||||
42 | |||||
43 | static u32 RZSize2Log(u32 rz_size) { | ||||
44 | CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!( !(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 44, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); } while (false); | ||||
45 | CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(! !(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 45, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2) ; } while (false); | ||||
46 | CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 46, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1 , v2); } while (false); | ||||
47 | u32 res = Log2(rz_size) - 4; | ||||
48 | CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 48, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")", v1, v2); } while (false); | ||||
49 | return res; | ||||
50 | } | ||||
51 | |||||
52 | static AsanAllocator &get_allocator(); | ||||
53 | |||||
54 | static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, | ||||
55 | u32 tid, u32 stack) { | ||||
56 | u64 context = tid; | ||||
57 | context <<= 32; | ||||
58 | context += stack; | ||||
59 | atomic_store(atomic_context, context, memory_order_relaxed); | ||||
60 | } | ||||
61 | |||||
62 | static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context, | ||||
63 | u32 &tid, u32 &stack) { | ||||
64 | u64 context = atomic_load(atomic_context, memory_order_relaxed); | ||||
65 | stack = context; | ||||
66 | context >>= 32; | ||||
67 | tid = context; | ||||
68 | } | ||||
69 | |||||
70 | // The memory chunk allocated from the underlying allocator looks like this: | ||||
71 | // L L L L L L H H U U U U U U R R | ||||
72 | // L -- left redzone words (0 or more bytes) | ||||
73 | // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. | ||||
74 | // U -- user memory. | ||||
75 | // R -- right redzone (0 or more bytes) | ||||
76 | // ChunkBase consists of ChunkHeader and other bytes that overlap with user | ||||
77 | // memory. | ||||
78 | |||||
79 | // If the left redzone is greater than the ChunkHeader size we store a magic | ||||
80 | // value in the first uptr word of the memory block and store the address of | ||||
81 | // ChunkBase in the next uptr. | ||||
82 | // M B L L L L L L L L L H H U U U U U U | ||||
83 | // | ^ | ||||
84 | // ---------------------| | ||||
85 | // M -- magic value kAllocBegMagic | ||||
86 | // B -- address of ChunkHeader pointing to the first 'H' | ||||
87 | |||||
88 | class ChunkHeader { | ||||
89 | public: | ||||
90 | atomic_uint8_t chunk_state; | ||||
91 | u8 alloc_type : 2; | ||||
92 | u8 lsan_tag : 2; | ||||
93 | |||||
94 | // align < 8 -> 0 | ||||
95 | // else -> log2(min(align, 512)) - 2 | ||||
96 | u8 user_requested_alignment_log : 3; | ||||
97 | |||||
98 | private: | ||||
99 | u16 user_requested_size_hi; | ||||
100 | u32 user_requested_size_lo; | ||||
101 | atomic_uint64_t alloc_context_id; | ||||
102 | |||||
103 | public: | ||||
104 | uptr UsedSize() const { | ||||
105 | uptr R = user_requested_size_lo; | ||||
106 | if (sizeof(uptr) > sizeof(user_requested_size_lo)) | ||||
107 | R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo)); | ||||
108 | return R; | ||||
109 | } | ||||
110 | |||||
111 | void SetUsedSize(uptr size) { | ||||
112 | user_requested_size_lo = size; | ||||
113 | if (sizeof(uptr) > sizeof(user_requested_size_lo)) { | ||||
114 | size >>= (8 * sizeof(user_requested_size_lo)); | ||||
115 | user_requested_size_hi = size; | ||||
116 | CHECK_EQ(user_requested_size_hi, size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_requested_size_hi )); __sanitizer::u64 v2 = (__sanitizer::u64)((size)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 116, "(" "(user_requested_size_hi)" ") " "==" " (" "(size)" ")", v1, v2); } while (false); | ||||
117 | } | ||||
118 | } | ||||
119 | |||||
120 | void SetAllocContext(u32 tid, u32 stack) { | ||||
121 | AtomicContextStore(&alloc_context_id, tid, stack); | ||||
122 | } | ||||
123 | |||||
124 | void GetAllocContext(u32 &tid, u32 &stack) const { | ||||
125 | AtomicContextLoad(&alloc_context_id, tid, stack); | ||||
126 | } | ||||
127 | }; | ||||
128 | |||||
129 | class ChunkBase : public ChunkHeader { | ||||
130 | atomic_uint64_t free_context_id; | ||||
131 | |||||
132 | public: | ||||
133 | void SetFreeContext(u32 tid, u32 stack) { | ||||
134 | AtomicContextStore(&free_context_id, tid, stack); | ||||
135 | } | ||||
136 | |||||
137 | void GetFreeContext(u32 &tid, u32 &stack) const { | ||||
138 | AtomicContextLoad(&free_context_id, tid, stack); | ||||
139 | } | ||||
140 | }; | ||||
141 | |||||
142 | static const uptr kChunkHeaderSize = sizeof(ChunkHeader); | ||||
143 | static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; | ||||
144 | COMPILER_CHECK(kChunkHeaderSize == 16)static_assert(kChunkHeaderSize == 16, ""); | ||||
145 | COMPILER_CHECK(kChunkHeader2Size <= 16)static_assert(kChunkHeader2Size <= 16, ""); | ||||
146 | |||||
147 | enum { | ||||
148 | // Either just allocated by underlying allocator, but AsanChunk is not yet | ||||
149 | // ready, or almost returned to undelying allocator and AsanChunk is already | ||||
150 | // meaningless. | ||||
151 | CHUNK_INVALID = 0, | ||||
152 | // The chunk is allocated and not yet freed. | ||||
153 | CHUNK_ALLOCATED = 2, | ||||
154 | // The chunk was freed and put into quarantine zone. | ||||
155 | CHUNK_QUARANTINE = 3, | ||||
156 | }; | ||||
157 | |||||
158 | class AsanChunk : public ChunkBase { | ||||
159 | public: | ||||
160 | uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } | ||||
161 | bool AddrIsInside(uptr addr) { | ||||
162 | return (addr >= Beg()) && (addr < Beg() + UsedSize()); | ||||
163 | } | ||||
164 | }; | ||||
165 | |||||
166 | class LargeChunkHeader { | ||||
167 | static constexpr uptr kAllocBegMagic = | ||||
168 | FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL)(0xCC6E96B9CC6E96B9ULL); | ||||
169 | atomic_uintptr_t magic; | ||||
170 | AsanChunk *chunk_header; | ||||
171 | |||||
172 | public: | ||||
173 | AsanChunk *Get() const { | ||||
174 | return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic | ||||
175 | ? chunk_header | ||||
176 | : nullptr; | ||||
177 | } | ||||
178 | |||||
179 | void Set(AsanChunk *p) { | ||||
180 | if (p) { | ||||
181 | chunk_header = p; | ||||
182 | atomic_store(&magic, kAllocBegMagic, memory_order_release); | ||||
183 | return; | ||||
184 | } | ||||
185 | |||||
186 | uptr old = kAllocBegMagic; | ||||
187 | if (!atomic_compare_exchange_strong(&magic, &old, 0, | ||||
188 | memory_order_release)) { | ||||
189 | CHECK_EQ(old, kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old)); __sanitizer ::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 189, "(" "(old)" ") " "==" " (" "(kAllocBegMagic)" ")", v1, v2); } while (false); | ||||
190 | } | ||||
191 | } | ||||
192 | }; | ||||
193 | |||||
194 | struct QuarantineCallback { | ||||
195 | QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) | ||||
196 | : cache_(cache), | ||||
197 | stack_(stack) { | ||||
198 | } | ||||
199 | |||||
200 | void Recycle(AsanChunk *m) { | ||||
201 | void *p = get_allocator().GetBlockBegin(m); | ||||
202 | if (p != m) { | ||||
203 | // Clear the magic value, as allocator internals may overwrite the | ||||
204 | // contents of deallocated chunk, confusing GetAsanChunk lookup. | ||||
205 | reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr); | ||||
206 | } | ||||
207 | |||||
208 | u8 old_chunk_state = CHUNK_QUARANTINE; | ||||
209 | if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, | ||||
210 | CHUNK_INVALID, memory_order_acquire)) { | ||||
211 | CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_chunk_state )); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 211, "(" "(old_chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false); | ||||
212 | } | ||||
213 | |||||
214 | PoisonShadow(m->Beg(), | ||||
215 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||
216 | kAsanHeapLeftRedzoneMagic); | ||||
217 | |||||
218 | // Statistics. | ||||
219 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
220 | thread_stats.real_frees++; | ||||
221 | thread_stats.really_freed += m->UsedSize(); | ||||
222 | |||||
223 | get_allocator().Deallocate(cache_, p); | ||||
224 | } | ||||
225 | |||||
226 | void *Allocate(uptr size) { | ||||
227 | void *res = get_allocator().Allocate(cache_, size, 1); | ||||
228 | // TODO(alekseys): Consider making quarantine OOM-friendly. | ||||
229 | if (UNLIKELY(!res)__builtin_expect(!!(!res), 0)) | ||||
230 | ReportOutOfMemory(size, stack_); | ||||
231 | return res; | ||||
232 | } | ||||
233 | |||||
234 | void Deallocate(void *p) { | ||||
235 | get_allocator().Deallocate(cache_, p); | ||||
236 | } | ||||
237 | |||||
238 | private: | ||||
239 | AllocatorCache* const cache_; | ||||
240 | BufferedStackTrace* const stack_; | ||||
241 | }; | ||||
242 | |||||
243 | typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; | ||||
244 | typedef AsanQuarantine::Cache QuarantineCache; | ||||
245 | |||||
246 | void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { | ||||
247 | PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); | ||||
248 | // Statistics. | ||||
249 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
250 | thread_stats.mmaps++; | ||||
251 | thread_stats.mmaped += size; | ||||
252 | } | ||||
253 | void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { | ||||
254 | PoisonShadow(p, size, 0); | ||||
255 | // We are about to unmap a chunk of user memory. | ||||
256 | // Mark the corresponding shadow memory as not needed. | ||||
257 | FlushUnneededASanShadowMemory(p, size); | ||||
258 | // Statistics. | ||||
259 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
260 | thread_stats.munmaps++; | ||||
261 | thread_stats.munmaped += size; | ||||
262 | } | ||||
263 | |||||
264 | // We can not use THREADLOCAL because it is not supported on some of the | ||||
265 | // platforms we care about (OSX 10.6, Android). | ||||
266 | // static THREADLOCAL AllocatorCache cache; | ||||
267 | AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { | ||||
268 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 268, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||
269 | return &ms->allocator_cache; | ||||
270 | } | ||||
271 | |||||
272 | QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { | ||||
273 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 273, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||
274 | CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache ))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms-> quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 274, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))" ")", v1, v2); } while (false); | ||||
275 | return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); | ||||
276 | } | ||||
277 | |||||
278 | void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { | ||||
279 | quarantine_size_mb = f->quarantine_size_mb; | ||||
280 | thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; | ||||
281 | min_redzone = f->redzone; | ||||
282 | max_redzone = f->max_redzone; | ||||
283 | may_return_null = cf->allocator_may_return_null; | ||||
284 | alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; | ||||
285 | release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; | ||||
286 | } | ||||
287 | |||||
288 | void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { | ||||
289 | f->quarantine_size_mb = quarantine_size_mb; | ||||
290 | f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; | ||||
291 | f->redzone = min_redzone; | ||||
292 | f->max_redzone = max_redzone; | ||||
293 | cf->allocator_may_return_null = may_return_null; | ||||
294 | f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; | ||||
295 | cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; | ||||
296 | } | ||||
297 | |||||
298 | struct Allocator { | ||||
299 | static const uptr kMaxAllowedMallocSize = | ||||
300 | FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(1ULL << 40); | ||||
301 | |||||
302 | AsanAllocator allocator; | ||||
303 | AsanQuarantine quarantine; | ||||
304 | StaticSpinMutex fallback_mutex; | ||||
305 | AllocatorCache fallback_allocator_cache; | ||||
306 | QuarantineCache fallback_quarantine_cache; | ||||
307 | |||||
308 | uptr max_user_defined_malloc_size; | ||||
309 | atomic_uint8_t rss_limit_exceeded; | ||||
310 | |||||
311 | // ------------------- Options -------------------------- | ||||
312 | atomic_uint16_t min_redzone; | ||||
313 | atomic_uint16_t max_redzone; | ||||
314 | atomic_uint8_t alloc_dealloc_mismatch; | ||||
315 | |||||
316 | // ------------------- Initialization ------------------------ | ||||
317 | explicit Allocator(LinkerInitialized) | ||||
318 | : quarantine(LINKER_INITIALIZED), | ||||
319 | fallback_quarantine_cache(LINKER_INITIALIZED) {} | ||||
320 | |||||
321 | void CheckOptions(const AllocatorOptions &options) const { | ||||
322 | CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 322, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")" , v1, v2); } while (false); | ||||
323 | CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone )); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 323, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)" ")", v1, v2); } while (false); | ||||
324 | CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 324, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)" ")", v1, v2); } while (false); | ||||
325 | CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 325, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
326 | CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 326, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
327 | } | ||||
328 | |||||
329 | void SharedInitCode(const AllocatorOptions &options) { | ||||
330 | CheckOptions(options); | ||||
331 | quarantine.Init((uptr)options.quarantine_size_mb << 20, | ||||
332 | (uptr)options.thread_local_quarantine_size_kb << 10); | ||||
333 | atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, | ||||
334 | memory_order_release); | ||||
335 | atomic_store(&min_redzone, options.min_redzone, memory_order_release); | ||||
336 | atomic_store(&max_redzone, options.max_redzone, memory_order_release); | ||||
337 | } | ||||
338 | |||||
339 | void InitLinkerInitialized(const AllocatorOptions &options) { | ||||
340 | SetAllocatorMayReturnNull(options.may_return_null); | ||||
341 | allocator.InitLinkerInitialized(options.release_to_os_interval_ms); | ||||
342 | SharedInitCode(options); | ||||
343 | max_user_defined_malloc_size = common_flags()->max_allocation_size_mb | ||||
344 | ? common_flags()->max_allocation_size_mb | ||||
345 | << 20 | ||||
346 | : kMaxAllowedMallocSize; | ||||
347 | } | ||||
348 | |||||
349 | bool RssLimitExceeded() { | ||||
350 | return atomic_load(&rss_limit_exceeded, memory_order_relaxed); | ||||
351 | } | ||||
352 | |||||
353 | void SetRssLimitExceeded(bool limit_exceeded) { | ||||
354 | atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); | ||||
355 | } | ||||
356 | |||||
357 | void RePoisonChunk(uptr chunk) { | ||||
358 | // This could be a user-facing chunk (with redzones), or some internal | ||||
359 | // housekeeping chunk, like TransferBatch. Start by assuming the former. | ||||
360 | AsanChunk *ac = GetAsanChunk((void *)chunk); | ||||
361 | uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk); | ||||
362 | if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == | ||||
363 | CHUNK_ALLOCATED) { | ||||
364 | uptr beg = ac->Beg(); | ||||
365 | uptr end = ac->Beg() + ac->UsedSize(); | ||||
366 | uptr chunk_end = chunk + allocated_size; | ||||
367 | if (chunk < beg && beg < end && end <= chunk_end) { | ||||
368 | // Looks like a valid AsanChunk in use, poison redzones only. | ||||
369 | PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); | ||||
370 | uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||
371 | FastPoisonShadowPartialRightRedzone( | ||||
372 | end_aligned_down, end - end_aligned_down, | ||||
373 | chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); | ||||
374 | return; | ||||
375 | } | ||||
376 | } | ||||
377 | |||||
378 | // This is either not an AsanChunk or freed or quarantined AsanChunk. | ||||
379 | // In either case, poison everything. | ||||
380 | PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||
381 | } | ||||
382 | |||||
383 | void ReInitialize(const AllocatorOptions &options) { | ||||
384 | SetAllocatorMayReturnNull(options.may_return_null); | ||||
385 | allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); | ||||
386 | SharedInitCode(options); | ||||
387 | |||||
388 | // Poison all existing allocation's redzones. | ||||
389 | if (CanPoisonMemory()) { | ||||
390 | allocator.ForceLock(); | ||||
391 | allocator.ForEachChunk( | ||||
392 | [](uptr chunk, void *alloc) { | ||||
393 | ((Allocator *)alloc)->RePoisonChunk(chunk); | ||||
394 | }, | ||||
395 | this); | ||||
396 | allocator.ForceUnlock(); | ||||
397 | } | ||||
398 | } | ||||
399 | |||||
400 | void GetOptions(AllocatorOptions *options) const { | ||||
401 | options->quarantine_size_mb = quarantine.GetSize() >> 20; | ||||
402 | options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; | ||||
403 | options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); | ||||
404 | options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); | ||||
405 | options->may_return_null = AllocatorMayReturnNull(); | ||||
406 | options->alloc_dealloc_mismatch = | ||||
407 | atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); | ||||
408 | options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); | ||||
409 | } | ||||
410 | |||||
411 | // -------------------- Helper methods. ------------------------- | ||||
412 | uptr ComputeRZLog(uptr user_requested_size) { | ||||
413 | u32 rz_log = user_requested_size <= 64 - 16 ? 0 | ||||
414 | : user_requested_size <= 128 - 32 ? 1 | ||||
415 | : user_requested_size <= 512 - 64 ? 2 | ||||
416 | : user_requested_size <= 4096 - 128 ? 3 | ||||
417 | : user_requested_size <= (1 << 14) - 256 ? 4 | ||||
418 | : user_requested_size <= (1 << 15) - 512 ? 5 | ||||
419 | : user_requested_size <= (1 << 16) - 1024 ? 6 | ||||
420 | : 7; | ||||
421 | u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader))); | ||||
422 | u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire)); | ||||
423 | u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire)); | ||||
424 | return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log)); | ||||
425 | } | ||||
426 | |||||
427 | static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { | ||||
428 | if (user_requested_alignment < 8) | ||||
429 | return 0; | ||||
430 | if (user_requested_alignment > 512) | ||||
431 | user_requested_alignment = 512; | ||||
432 | return Log2(user_requested_alignment) - 2; | ||||
433 | } | ||||
434 | |||||
435 | static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { | ||||
436 | if (user_requested_alignment_log == 0) | ||||
437 | return 0; | ||||
438 | return 1LL << (user_requested_alignment_log + 2); | ||||
439 | } | ||||
440 | |||||
441 | // We have an address between two chunks, and we want to report just one. | ||||
442 | AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, | ||||
443 | AsanChunk *right_chunk) { | ||||
444 | if (!left_chunk) | ||||
445 | return right_chunk; | ||||
446 | if (!right_chunk) | ||||
447 | return left_chunk; | ||||
448 | // Prefer an allocated chunk over freed chunk and freed chunk | ||||
449 | // over available chunk. | ||||
450 | u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed); | ||||
451 | u8 right_state = | ||||
452 | atomic_load(&right_chunk->chunk_state, memory_order_relaxed); | ||||
453 | if (left_state != right_state) { | ||||
454 | if (left_state == CHUNK_ALLOCATED) | ||||
455 | return left_chunk; | ||||
456 | if (right_state == CHUNK_ALLOCATED) | ||||
457 | return right_chunk; | ||||
458 | if (left_state == CHUNK_QUARANTINE) | ||||
459 | return left_chunk; | ||||
460 | if (right_state == CHUNK_QUARANTINE) | ||||
461 | return right_chunk; | ||||
462 | } | ||||
463 | // Same chunk_state: choose based on offset. | ||||
464 | sptr l_offset = 0, r_offset = 0; | ||||
465 | CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 465, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
466 | CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 466, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
467 | if (l_offset < r_offset) | ||||
468 | return left_chunk; | ||||
469 | return right_chunk; | ||||
470 | } | ||||
471 | |||||
472 | bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { | ||||
473 | AsanChunk *m = GetAsanChunkByAddr(addr); | ||||
474 | if (!m) return false; | ||||
475 | if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) | ||||
476 | return false; | ||||
477 | if (m->Beg() != addr) return false; | ||||
478 | AsanThread *t = GetCurrentThread(); | ||||
479 | m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); | ||||
480 | return true; | ||||
481 | } | ||||
482 | |||||
483 | // -------------------- Allocation/Deallocation routines --------------- | ||||
484 | void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, | ||||
485 | AllocType alloc_type, bool can_fill) { | ||||
486 | if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0)) | ||||
487 | AsanInitFromRtl(); | ||||
488 | if (RssLimitExceeded()) { | ||||
489 | if (AllocatorMayReturnNull()) | ||||
490 | return nullptr; | ||||
491 | ReportRssLimitExceeded(stack); | ||||
492 | } | ||||
493 | Flags &fl = *flags(); | ||||
494 | CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 494, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
495 | const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale); | ||||
496 | const uptr user_requested_alignment_log = | ||||
497 | ComputeUserRequestedAlignmentLog(alignment); | ||||
498 | if (alignment
| ||||
499 | alignment = min_alignment; | ||||
500 | if (size == 0) { | ||||
501 | // We'd be happy to avoid allocating memory for zero-size requests, but | ||||
502 | // some programs/tests depend on this behavior and assume that malloc | ||||
503 | // would not return NULL even for zero-size allocations. Moreover, it | ||||
504 | // looks like operator new should never return NULL, and results of | ||||
505 | // consecutive "new" calls must be different even if the allocated size | ||||
506 | // is zero. | ||||
507 | size = 1; | ||||
508 | } | ||||
509 | CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 509, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")" , v1, v2); } while (false); | ||||
510 | uptr rz_log = ComputeRZLog(size); | ||||
511 | uptr rz_size = RZLog2Size(rz_log); | ||||
512 | uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); | ||||
513 | uptr needed_size = rounded_size + rz_size; | ||||
514 | if (alignment > min_alignment) | ||||
515 | needed_size += alignment; | ||||
516 | // If we are allocating from the secondary allocator, there will be no | ||||
517 | // automatic right redzone, so add the right redzone manually. | ||||
518 | if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) | ||||
519 | needed_size += rz_size; | ||||
520 | CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size , min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 520, "(" "(IsAligned(needed_size, min_alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
521 | if (size
| ||||
522 | size > max_user_defined_malloc_size) { | ||||
523 | if (AllocatorMayReturnNull()) { | ||||
524 | Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", | ||||
525 | (void*)size); | ||||
526 | return nullptr; | ||||
527 | } | ||||
528 | uptr malloc_limit = | ||||
529 | Min(kMaxAllowedMallocSize, max_user_defined_malloc_size); | ||||
530 | ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); | ||||
531 | } | ||||
532 | |||||
533 | AsanThread *t = GetCurrentThread(); | ||||
534 | void *allocated; | ||||
535 | if (t) { | ||||
536 | AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); | ||||
537 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||
538 | } else { | ||||
539 | SpinMutexLock l(&fallback_mutex); | ||||
540 | AllocatorCache *cache = &fallback_allocator_cache; | ||||
541 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||
542 | } | ||||
543 | if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) { | ||||
544 | SetAllocatorOutOfMemory(); | ||||
545 | if (AllocatorMayReturnNull()) | ||||
546 | return nullptr; | ||||
547 | ReportOutOfMemory(size, stack); | ||||
548 | } | ||||
549 | |||||
550 | if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShort64bitShadowOffset )) == 0 && CanPoisonMemory()) { | ||||
551 | // Heap poisoning is enabled, but the allocator provides an unpoisoned | ||||
552 | // chunk. This is possible if CanPoisonMemory() was false for some | ||||
553 | // time, for example, due to flags()->start_disabled. | ||||
554 | // Anyway, poison the block before using it for anything else. | ||||
555 | uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); | ||||
556 | PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||
557 | } | ||||
558 | |||||
559 | uptr alloc_beg = reinterpret_cast<uptr>(allocated); | ||||
560 | uptr alloc_end = alloc_beg + needed_size; | ||||
561 | uptr user_beg = alloc_beg + rz_size; | ||||
562 | if (!IsAligned(user_beg, alignment)) | ||||
563 | user_beg = RoundUpTo(user_beg, alignment); | ||||
564 | uptr user_end = user_beg + size; | ||||
565 | CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer ::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 565, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1 , v2); } while (false); | ||||
566 | uptr chunk_beg = user_beg - kChunkHeaderSize; | ||||
567 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||
568 | m->alloc_type = alloc_type; | ||||
569 | CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 569, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | ||||
570 | m->SetUsedSize(size); | ||||
571 | m->user_requested_alignment_log = user_requested_alignment_log; | ||||
572 | |||||
573 | m->SetAllocContext(t
| ||||
574 | |||||
575 | uptr size_rounded_down_to_granularity = | ||||
576 | RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||
577 | // Unpoison the bulk of the memory region. | ||||
578 | if (size_rounded_down_to_granularity) | ||||
579 | PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); | ||||
580 | // Deal with the end of the region if size is not aligned to granularity. | ||||
581 | if (size
| ||||
582 | u8 *shadow = | ||||
583 | (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); | ||||
584 | *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0; | ||||
585 | } | ||||
586 | |||||
587 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
588 | thread_stats.mallocs++; | ||||
589 | thread_stats.malloced += size; | ||||
590 | thread_stats.malloced_redzones += needed_size - size; | ||||
591 | if (needed_size > SizeClassMap::kMaxSize) | ||||
592 | thread_stats.malloc_large++; | ||||
593 | else | ||||
594 | thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; | ||||
595 | |||||
596 | void *res = reinterpret_cast<void *>(user_beg); | ||||
597 | if (can_fill
| ||||
598 | uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); | ||||
599 | REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size); | ||||
600 | } | ||||
601 | #if CAN_SANITIZE_LEAKS1 | ||||
602 | m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored | ||||
603 | : __lsan::kDirectlyLeaked; | ||||
604 | #endif | ||||
605 | // Must be the last mutation of metadata in this function. | ||||
606 | atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); | ||||
607 | if (alloc_beg != chunk_beg) { | ||||
608 | CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg + sizeof (LargeChunkHeader))); __sanitizer::u64 v2 = (__sanitizer::u64 )((chunk_beg)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 608, "(" "(alloc_beg + sizeof(LargeChunkHeader))" ") " "<=" " (" "(chunk_beg)" ")", v1, v2); } while (false); | ||||
609 | reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m); | ||||
| |||||
610 | } | ||||
611 | ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook (res, size); RunMallocHooks(res, size); } while (false); | ||||
612 | return res; | ||||
613 | } | ||||
614 | |||||
615 | // Set quarantine flag if chunk is allocated, issue ASan error report on | ||||
616 | // available and quarantined chunks. Return true on success, false otherwise. | ||||
617 | bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, | ||||
618 | BufferedStackTrace *stack) { | ||||
619 | u8 old_chunk_state = CHUNK_ALLOCATED; | ||||
620 | // Flip the chunk_state atomically to avoid race on double-free. | ||||
621 | if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, | ||||
622 | CHUNK_QUARANTINE, | ||||
623 | memory_order_acquire)) { | ||||
624 | ReportInvalidFree(ptr, old_chunk_state, stack); | ||||
625 | // It's not safe to push a chunk in quarantine on invalid free. | ||||
626 | return false; | ||||
627 | } | ||||
628 | CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED )); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 628, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)" ")", v1, v2); } while (false); | ||||
629 | // It was a user data. | ||||
630 | m->SetFreeContext(kInvalidTid, 0); | ||||
631 | return true; | ||||
632 | } | ||||
633 | |||||
634 | // Expects the chunk to already be marked as quarantined by using | ||||
635 | // AtomicallySetQuarantineFlagIfAllocated. | ||||
636 | void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { | ||||
637 | CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),do { __sanitizer::u64 v1 = (__sanitizer::u64)((atomic_load(& m->chunk_state, memory_order_relaxed))); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 638, "(" "(atomic_load(&m->chunk_state, memory_order_relaxed))" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false ) | ||||
638 | CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((atomic_load(& m->chunk_state, memory_order_relaxed))); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 638, "(" "(atomic_load(&m->chunk_state, memory_order_relaxed))" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false ); | ||||
639 | AsanThread *t = GetCurrentThread(); | ||||
640 | m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack)); | ||||
641 | |||||
642 | Flags &fl = *flags(); | ||||
643 | if (fl.max_free_fill_size > 0) { | ||||
644 | // We have to skip the chunk header, it contains free_context_id. | ||||
645 | uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; | ||||
646 | if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. | ||||
647 | uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; | ||||
648 | size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); | ||||
649 | REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill); | ||||
650 | } | ||||
651 | } | ||||
652 | |||||
653 | // Poison the region. | ||||
654 | PoisonShadow(m->Beg(), | ||||
655 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||
656 | kAsanHeapFreeMagic); | ||||
657 | |||||
658 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
659 | thread_stats.frees++; | ||||
660 | thread_stats.freed += m->UsedSize(); | ||||
661 | |||||
662 | // Push into quarantine. | ||||
663 | if (t) { | ||||
664 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||
665 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||
666 | quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, | ||||
667 | m->UsedSize()); | ||||
668 | } else { | ||||
669 | SpinMutexLock l(&fallback_mutex); | ||||
670 | AllocatorCache *ac = &fallback_allocator_cache; | ||||
671 | quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), | ||||
672 | m, m->UsedSize()); | ||||
673 | } | ||||
674 | } | ||||
675 | |||||
676 | void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, | ||||
677 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||
678 | uptr p = reinterpret_cast<uptr>(ptr); | ||||
679 | if (p == 0) return; | ||||
680 | |||||
681 | uptr chunk_beg = p - kChunkHeaderSize; | ||||
682 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||
683 | |||||
684 | // On Windows, uninstrumented DLLs may allocate memory before ASan hooks | ||||
685 | // malloc. Don't report an invalid free in this case. | ||||
686 | if (SANITIZER_WINDOWS0 && | ||||
687 | !get_allocator().PointerIsMine(ptr)) { | ||||
688 | if (!IsSystemHeapAddress(p)) | ||||
689 | ReportFreeNotMalloced(p, stack); | ||||
690 | return; | ||||
691 | } | ||||
692 | |||||
693 | ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr ); RunFreeHooks(ptr); } while (false); | ||||
694 | |||||
695 | // Must mark the chunk as quarantined before any changes to its metadata. | ||||
696 | // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. | ||||
697 | if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; | ||||
698 | |||||
699 | if (m->alloc_type != alloc_type) { | ||||
700 | if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { | ||||
701 | ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, | ||||
702 | (AllocType)alloc_type); | ||||
703 | } | ||||
704 | } else { | ||||
705 | if (flags()->new_delete_type_mismatch && | ||||
706 | (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && | ||||
707 | ((delete_size && delete_size != m->UsedSize()) || | ||||
708 | ComputeUserRequestedAlignmentLog(delete_alignment) != | ||||
709 | m->user_requested_alignment_log)) { | ||||
710 | ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); | ||||
711 | } | ||||
712 | } | ||||
713 | |||||
714 | QuarantineChunk(m, ptr, stack); | ||||
715 | } | ||||
716 | |||||
717 | void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { | ||||
718 | CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr && new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 718, "(" "(old_ptr && new_size)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
719 | uptr p = reinterpret_cast<uptr>(old_ptr); | ||||
720 | uptr chunk_beg = p - kChunkHeaderSize; | ||||
721 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||
722 | |||||
723 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||
724 | thread_stats.reallocs++; | ||||
725 | thread_stats.realloced += new_size; | ||||
726 | |||||
727 | void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); | ||||
728 | if (new_ptr) { | ||||
729 | u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); | ||||
730 | if (chunk_state != CHUNK_ALLOCATED) | ||||
731 | ReportInvalidFree(old_ptr, chunk_state, stack); | ||||
732 | CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception ::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr )); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 732, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)" ")", v1, v2); } while (false); | ||||
733 | uptr memcpy_size = Min(new_size, m->UsedSize()); | ||||
734 | // If realloc() races with free(), we may start copying freed memory. | ||||
735 | // However, we will report racy double-free later anyway. | ||||
736 | REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size); | ||||
737 | Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); | ||||
738 | } | ||||
739 | return new_ptr; | ||||
740 | } | ||||
741 | |||||
742 | void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||
743 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||
744 | if (AllocatorMayReturnNull()) | ||||
745 | return nullptr; | ||||
746 | ReportCallocOverflow(nmemb, size, stack); | ||||
747 | } | ||||
748 | void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); | ||||
749 | // If the memory comes from the secondary allocator no need to clear it | ||||
750 | // as it comes directly from mmap. | ||||
751 | if (ptr && allocator.FromPrimary(ptr)) | ||||
752 | REAL(memset)__interception::real_memset(ptr, 0, nmemb * size); | ||||
753 | return ptr; | ||||
754 | } | ||||
755 | |||||
756 | void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { | ||||
757 | if (chunk_state == CHUNK_QUARANTINE) | ||||
758 | ReportDoubleFree((uptr)ptr, stack); | ||||
759 | else | ||||
760 | ReportFreeNotMalloced((uptr)ptr, stack); | ||||
761 | } | ||||
762 | |||||
763 | void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { | ||||
764 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||
765 | quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); | ||||
766 | allocator.SwallowCache(ac); | ||||
767 | } | ||||
768 | |||||
769 | // -------------------------- Chunk lookup ---------------------- | ||||
770 | |||||
771 | // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). | ||||
772 | // Returns nullptr if AsanChunk is not yet initialized just after | ||||
773 | // get_allocator().Allocate(), or is being destroyed just before | ||||
774 | // get_allocator().Deallocate(). | ||||
775 | AsanChunk *GetAsanChunk(void *alloc_beg) { | ||||
776 | if (!alloc_beg) | ||||
777 | return nullptr; | ||||
778 | AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get(); | ||||
779 | if (!p) { | ||||
780 | if (!allocator.FromPrimary(alloc_beg)) | ||||
781 | return nullptr; | ||||
782 | p = reinterpret_cast<AsanChunk *>(alloc_beg); | ||||
783 | } | ||||
784 | u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); | ||||
785 | // It does not guaranty that Chunk is initialized, but it's | ||||
786 | // definitely not for any other value. | ||||
787 | if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE) | ||||
788 | return p; | ||||
789 | return nullptr; | ||||
790 | } | ||||
791 | |||||
792 | AsanChunk *GetAsanChunkByAddr(uptr p) { | ||||
793 | void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); | ||||
794 | return GetAsanChunk(alloc_beg); | ||||
795 | } | ||||
796 | |||||
797 | // Allocator must be locked when this function is called. | ||||
798 | AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { | ||||
799 | void *alloc_beg = | ||||
800 | allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); | ||||
801 | return GetAsanChunk(alloc_beg); | ||||
802 | } | ||||
803 | |||||
804 | uptr AllocationSize(uptr p) { | ||||
805 | AsanChunk *m = GetAsanChunkByAddr(p); | ||||
806 | if (!m) return 0; | ||||
807 | if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) | ||||
808 | return 0; | ||||
809 | if (m->Beg() != p) return 0; | ||||
810 | return m->UsedSize(); | ||||
811 | } | ||||
812 | |||||
813 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||
814 | AsanChunk *m1 = GetAsanChunkByAddr(addr); | ||||
815 | sptr offset = 0; | ||||
816 | if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { | ||||
817 | // The address is in the chunk's left redzone, so maybe it is actually | ||||
818 | // a right buffer overflow from the other chunk to the left. | ||||
819 | // Search a bit to the left to see if there is another chunk. | ||||
820 | AsanChunk *m2 = nullptr; | ||||
821 | for (uptr l = 1; l < GetPageSizeCached(); l++) { | ||||
822 | m2 = GetAsanChunkByAddr(addr - l); | ||||
823 | if (m2 == m1) continue; // Still the same chunk. | ||||
824 | break; | ||||
825 | } | ||||
826 | if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) | ||||
827 | m1 = ChooseChunk(addr, m2, m1); | ||||
828 | } | ||||
829 | return AsanChunkView(m1); | ||||
830 | } | ||||
831 | |||||
832 | void Purge(BufferedStackTrace *stack) { | ||||
833 | AsanThread *t = GetCurrentThread(); | ||||
834 | if (t) { | ||||
835 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||
836 | quarantine.DrainAndRecycle(GetQuarantineCache(ms), | ||||
837 | QuarantineCallback(GetAllocatorCache(ms), | ||||
838 | stack)); | ||||
839 | } | ||||
840 | { | ||||
841 | SpinMutexLock l(&fallback_mutex); | ||||
842 | quarantine.DrainAndRecycle(&fallback_quarantine_cache, | ||||
843 | QuarantineCallback(&fallback_allocator_cache, | ||||
844 | stack)); | ||||
845 | } | ||||
846 | |||||
847 | allocator.ForceReleaseToOS(); | ||||
848 | } | ||||
849 | |||||
850 | void PrintStats() { | ||||
851 | allocator.PrintStats(); | ||||
852 | quarantine.PrintStats(); | ||||
853 | } | ||||
854 | |||||
855 | void ForceLock() ACQUIRE(fallback_mutex)__attribute__((acquire_capability(fallback_mutex))) { | ||||
856 | allocator.ForceLock(); | ||||
857 | fallback_mutex.Lock(); | ||||
858 | } | ||||
859 | |||||
860 | void ForceUnlock() RELEASE(fallback_mutex)__attribute__((release_capability(fallback_mutex))) { | ||||
861 | fallback_mutex.Unlock(); | ||||
862 | allocator.ForceUnlock(); | ||||
863 | } | ||||
864 | }; | ||||
865 | |||||
866 | static Allocator instance(LINKER_INITIALIZED); | ||||
867 | |||||
868 | static AsanAllocator &get_allocator() { | ||||
869 | return instance.allocator; | ||||
870 | } | ||||
871 | |||||
872 | bool AsanChunkView::IsValid() const { | ||||
873 | return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != | ||||
874 | CHUNK_INVALID; | ||||
875 | } | ||||
876 | bool AsanChunkView::IsAllocated() const { | ||||
877 | return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == | ||||
878 | CHUNK_ALLOCATED; | ||||
879 | } | ||||
880 | bool AsanChunkView::IsQuarantined() const { | ||||
881 | return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == | ||||
882 | CHUNK_QUARANTINE; | ||||
883 | } | ||||
884 | uptr AsanChunkView::Beg() const { return chunk_->Beg(); } | ||||
885 | uptr AsanChunkView::End() const { return Beg() + UsedSize(); } | ||||
886 | uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } | ||||
887 | u32 AsanChunkView::UserRequestedAlignment() const { | ||||
888 | return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); | ||||
889 | } | ||||
890 | |||||
891 | uptr AsanChunkView::AllocTid() const { | ||||
892 | u32 tid = 0; | ||||
893 | u32 stack = 0; | ||||
894 | chunk_->GetAllocContext(tid, stack); | ||||
895 | return tid; | ||||
896 | } | ||||
897 | |||||
898 | uptr AsanChunkView::FreeTid() const { | ||||
899 | if (!IsQuarantined()) | ||||
900 | return kInvalidTid; | ||||
901 | u32 tid = 0; | ||||
902 | u32 stack = 0; | ||||
903 | chunk_->GetFreeContext(tid, stack); | ||||
904 | return tid; | ||||
905 | } | ||||
906 | |||||
907 | AllocType AsanChunkView::GetAllocType() const { | ||||
908 | return (AllocType)chunk_->alloc_type; | ||||
909 | } | ||||
910 | |||||
911 | static StackTrace GetStackTraceFromId(u32 id) { | ||||
912 | CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 912, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||
913 | StackTrace res = StackDepotGet(id); | ||||
914 | CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 914, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
915 | return res; | ||||
916 | } | ||||
917 | |||||
918 | u32 AsanChunkView::GetAllocStackId() const { | ||||
919 | u32 tid = 0; | ||||
920 | u32 stack = 0; | ||||
921 | chunk_->GetAllocContext(tid, stack); | ||||
922 | return stack; | ||||
923 | } | ||||
924 | |||||
925 | u32 AsanChunkView::GetFreeStackId() const { | ||||
926 | if (!IsQuarantined()) | ||||
927 | return 0; | ||||
928 | u32 tid = 0; | ||||
929 | u32 stack = 0; | ||||
930 | chunk_->GetFreeContext(tid, stack); | ||||
931 | return stack; | ||||
932 | } | ||||
933 | |||||
934 | StackTrace AsanChunkView::GetAllocStack() const { | ||||
935 | return GetStackTraceFromId(GetAllocStackId()); | ||||
936 | } | ||||
937 | |||||
938 | StackTrace AsanChunkView::GetFreeStack() const { | ||||
939 | return GetStackTraceFromId(GetFreeStackId()); | ||||
940 | } | ||||
941 | |||||
942 | void InitializeAllocator(const AllocatorOptions &options) { | ||||
943 | instance.InitLinkerInitialized(options); | ||||
944 | } | ||||
945 | |||||
946 | void ReInitializeAllocator(const AllocatorOptions &options) { | ||||
947 | instance.ReInitialize(options); | ||||
948 | } | ||||
949 | |||||
950 | void GetAllocatorOptions(AllocatorOptions *options) { | ||||
951 | instance.GetOptions(options); | ||||
952 | } | ||||
953 | |||||
954 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||
955 | return instance.FindHeapChunkByAddress(addr); | ||||
956 | } | ||||
957 | AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { | ||||
958 | return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); | ||||
959 | } | ||||
960 | |||||
961 | void AsanThreadLocalMallocStorage::CommitBack() { | ||||
962 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||
963 | instance.CommitBack(this, &stack); | ||||
964 | } | ||||
965 | |||||
966 | void PrintInternalAllocatorStats() { | ||||
967 | instance.PrintStats(); | ||||
968 | } | ||||
969 | |||||
970 | void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { | ||||
971 | instance.Deallocate(ptr, 0, 0, stack, alloc_type); | ||||
972 | } | ||||
973 | |||||
974 | void asan_delete(void *ptr, uptr size, uptr alignment, | ||||
975 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||
976 | instance.Deallocate(ptr, size, alignment, stack, alloc_type); | ||||
977 | } | ||||
978 | |||||
979 | void *asan_malloc(uptr size, BufferedStackTrace *stack) { | ||||
980 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||
981 | } | ||||
982 | |||||
983 | void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||
984 | return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); | ||||
985 | } | ||||
986 | |||||
987 | void *asan_reallocarray(void *p, uptr nmemb, uptr size, | ||||
988 | BufferedStackTrace *stack) { | ||||
989 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||
990 | errno(*__errno_location()) = errno_ENOMEM12; | ||||
991 | if (AllocatorMayReturnNull()) | ||||
992 | return nullptr; | ||||
993 | ReportReallocArrayOverflow(nmemb, size, stack); | ||||
994 | } | ||||
995 | return asan_realloc(p, nmemb * size, stack); | ||||
996 | } | ||||
997 | |||||
998 | void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { | ||||
999 | if (!p) | ||||
1000 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||
1001 | if (size == 0) { | ||||
1002 | if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { | ||||
1003 | instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); | ||||
1004 | return nullptr; | ||||
1005 | } | ||||
1006 | // Allocate a size of 1 if we shouldn't free() on Realloc to 0 | ||||
1007 | size = 1; | ||||
1008 | } | ||||
1009 | return SetErrnoOnNull(instance.Reallocate(p, size, stack)); | ||||
1010 | } | ||||
1011 | |||||
1012 | void *asan_valloc(uptr size, BufferedStackTrace *stack) { | ||||
1013 | return SetErrnoOnNull( | ||||
1014 | instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); | ||||
| |||||
1015 | } | ||||
1016 | |||||
1017 | void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { | ||||
1018 | uptr PageSize = GetPageSizeCached(); | ||||
1019 | if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)), 0)) { | ||||
1020 | errno(*__errno_location()) = errno_ENOMEM12; | ||||
1021 | if (AllocatorMayReturnNull()) | ||||
1022 | return nullptr; | ||||
1023 | ReportPvallocOverflow(size, stack); | ||||
1024 | } | ||||
1025 | // pvalloc(0) should allocate one page. | ||||
1026 | size = size ? RoundUpTo(size, PageSize) : PageSize; | ||||
1027 | return SetErrnoOnNull( | ||||
1028 | instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); | ||||
1029 | } | ||||
1030 | |||||
1031 | void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, | ||||
1032 | AllocType alloc_type) { | ||||
1033 | if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) { | ||||
1034 | errno(*__errno_location()) = errno_EINVAL22; | ||||
1035 | if (AllocatorMayReturnNull()) | ||||
1036 | return nullptr; | ||||
1037 | ReportInvalidAllocationAlignment(alignment, stack); | ||||
1038 | } | ||||
1039 | return SetErrnoOnNull( | ||||
1040 | instance.Allocate(size, alignment, stack, alloc_type, true)); | ||||
1041 | } | ||||
1042 | |||||
1043 | void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { | ||||
1044 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment , size)), 0)) { | ||||
1045 | errno(*__errno_location()) = errno_EINVAL22; | ||||
1046 | if (AllocatorMayReturnNull()) | ||||
1047 | return nullptr; | ||||
1048 | ReportInvalidAlignedAllocAlignment(size, alignment, stack); | ||||
1049 | } | ||||
1050 | return SetErrnoOnNull( | ||||
1051 | instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); | ||||
1052 | } | ||||
1053 | |||||
1054 | int asan_posix_memalign(void **memptr, uptr alignment, uptr size, | ||||
1055 | BufferedStackTrace *stack) { | ||||
1056 | if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)), 0)) { | ||||
1057 | if (AllocatorMayReturnNull()) | ||||
1058 | return errno_EINVAL22; | ||||
1059 | ReportInvalidPosixMemalignAlignment(alignment, stack); | ||||
1060 | } | ||||
1061 | void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); | ||||
1062 | if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0)) | ||||
1063 | // OOM error is already taken care of by Allocate. | ||||
1064 | return errno_ENOMEM12; | ||||
1065 | CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr )ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/asan/asan_allocator.cpp" , 1065, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||
1066 | *memptr = ptr; | ||||
1067 | return 0; | ||||
1068 | } | ||||
1069 | |||||
1070 | uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { | ||||
1071 | if (!ptr) return 0; | ||||
1072 | uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||
1073 | if (flags()->check_malloc_usable_size && (usable_size == 0)) { | ||||
1074 | GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags ()->fast_unwind_on_fatal); | ||||
1075 | ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); | ||||
1076 | } | ||||
1077 | return usable_size; | ||||
1078 | } | ||||
1079 | |||||
1080 | uptr asan_mz_size(const void *ptr) { | ||||
1081 | return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||
1082 | } | ||||
1083 | |||||
1084 | void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { instance.ForceLock(); } | ||||
1085 | |||||
1086 | void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { | ||||
1087 | instance.ForceUnlock(); | ||||
1088 | } | ||||
1089 | |||||
1090 | void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { | ||||
1091 | instance.SetRssLimitExceeded(limit_exceeded); | ||||
1092 | } | ||||
1093 | |||||
1094 | } // namespace __asan | ||||
1095 | |||||
1096 | // --- Implementation of LSan-specific functions --- {{{1 | ||||
1097 | namespace __lsan { | ||||
1098 | void LockAllocator() { | ||||
1099 | __asan::get_allocator().ForceLock(); | ||||
1100 | } | ||||
1101 | |||||
1102 | void UnlockAllocator() { | ||||
1103 | __asan::get_allocator().ForceUnlock(); | ||||
1104 | } | ||||
1105 | |||||
1106 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { | ||||
1107 | *begin = (uptr)&__asan::get_allocator(); | ||||
1108 | *end = *begin + sizeof(__asan::get_allocator()); | ||||
1109 | } | ||||
1110 | |||||
1111 | uptr PointsIntoChunk(void *p) { | ||||
1112 | uptr addr = reinterpret_cast<uptr>(p); | ||||
1113 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); | ||||
1114 | if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != | ||||
1115 | __asan::CHUNK_ALLOCATED) | ||||
1116 | return 0; | ||||
1117 | uptr chunk = m->Beg(); | ||||
1118 | if (m->AddrIsInside(addr)) | ||||
1119 | return chunk; | ||||
1120 | if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) | ||||
1121 | return chunk; | ||||
1122 | return 0; | ||||
1123 | } | ||||
1124 | |||||
1125 | uptr GetUserBegin(uptr chunk) { | ||||
1126 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); | ||||
1127 | return m ? m->Beg() : 0; | ||||
1128 | } | ||||
1129 | |||||
1130 | LsanMetadata::LsanMetadata(uptr chunk) { | ||||
1131 | metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize) | ||||
1132 | : nullptr; | ||||
1133 | } | ||||
1134 | |||||
1135 | bool LsanMetadata::allocated() const { | ||||
1136 | if (!metadata_) | ||||
1137 | return false; | ||||
1138 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||
1139 | return atomic_load(&m->chunk_state, memory_order_relaxed) == | ||||
1140 | __asan::CHUNK_ALLOCATED; | ||||
1141 | } | ||||
1142 | |||||
1143 | ChunkTag LsanMetadata::tag() const { | ||||
1144 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||
1145 | return static_cast<ChunkTag>(m->lsan_tag); | ||||
1146 | } | ||||
1147 | |||||
1148 | void LsanMetadata::set_tag(ChunkTag value) { | ||||
1149 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||
1150 | m->lsan_tag = value; | ||||
1151 | } | ||||
1152 | |||||
1153 | uptr LsanMetadata::requested_size() const { | ||||
1154 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||
1155 | return m->UsedSize(); | ||||
1156 | } | ||||
1157 | |||||
1158 | u32 LsanMetadata::stack_trace_id() const { | ||||
1159 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||
1160 | u32 tid = 0; | ||||
1161 | u32 stack = 0; | ||||
1162 | m->GetAllocContext(tid, stack); | ||||
1163 | return stack; | ||||
1164 | } | ||||
1165 | |||||
1166 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | ||||
1167 | __asan::get_allocator().ForEachChunk(callback, arg); | ||||
1168 | } | ||||
1169 | |||||
1170 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { | ||||
1171 | uptr addr = reinterpret_cast<uptr>(p); | ||||
1172 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); | ||||
1173 | if (!m || | ||||
1174 | (atomic_load(&m->chunk_state, memory_order_acquire) != | ||||
1175 | __asan::CHUNK_ALLOCATED) || | ||||
1176 | !m->AddrIsInside(addr)) { | ||||
1177 | return kIgnoreObjectInvalid; | ||||
1178 | } | ||||
1179 | if (m->lsan_tag == kIgnored) | ||||
1180 | return kIgnoreObjectAlreadyIgnored; | ||||
1181 | m->lsan_tag = __lsan::kIgnored; | ||||
1182 | return kIgnoreObjectSuccess; | ||||
1183 | } | ||||
1184 | |||||
1185 | void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) { | ||||
1186 | // Look for the arg pointer of threads that have been created or are running. | ||||
1187 | // This is necessary to prevent false positive leaks due to the AsanThread | ||||
1188 | // holding the only live reference to a heap object. This can happen because | ||||
1189 | // the `pthread_create()` interceptor doesn't wait for the child thread to | ||||
1190 | // start before returning and thus loosing the the only live reference to the | ||||
1191 | // heap object on the stack. | ||||
1192 | |||||
1193 | __asan::AsanThreadContext *atctx = | ||||
1194 | reinterpret_cast<__asan::AsanThreadContext *>(tctx); | ||||
1195 | __asan::AsanThread *asan_thread = atctx->thread; | ||||
1196 | |||||
1197 | // Note ThreadStatusRunning is required because there is a small window where | ||||
1198 | // the thread status switches to `ThreadStatusRunning` but the `arg` pointer | ||||
1199 | // still isn't on the stack yet. | ||||
1200 | if (atctx->status != ThreadStatusCreated && | ||||
1201 | atctx->status != ThreadStatusRunning) | ||||
1202 | return; | ||||
1203 | |||||
1204 | uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg()); | ||||
1205 | if (!thread_arg) | ||||
1206 | return; | ||||
1207 | |||||
1208 | auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs); | ||||
1209 | ptrsVec->push_back(thread_arg); | ||||
1210 | } | ||||
1211 | |||||
1212 | } // namespace __lsan | ||||
1213 | |||||
1214 | // ---------------------- Interface ---------------- {{{1 | ||||
1215 | using namespace __asan; | ||||
1216 | |||||
1217 | // ASan allocator doesn't reserve extra bytes, so normally we would | ||||
1218 | // just return "size". We don't want to expose our redzone sizes, etc here. | ||||
1219 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { | ||||
1220 | return size; | ||||
1221 | } | ||||
1222 | |||||
1223 | int __sanitizer_get_ownership(const void *p) { | ||||
1224 | uptr ptr = reinterpret_cast<uptr>(p); | ||||
1225 | return instance.AllocationSize(ptr) > 0; | ||||
1226 | } | ||||
1227 | |||||
1228 | uptr __sanitizer_get_allocated_size(const void *p) { | ||||
1229 | if (!p) return 0; | ||||
1230 | uptr ptr = reinterpret_cast<uptr>(p); | ||||
1231 | uptr allocated_size = instance.AllocationSize(ptr); | ||||
1232 | // Die if p is not malloced or if it is already freed. | ||||
1233 | if (allocated_size == 0) { | ||||
1234 | GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack .size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer [0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack .trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address (0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer ::uptr) __builtin_frame_address(0), nullptr, common_flags()-> fast_unwind_on_fatal, kStackTraceMax); }; | ||||
1235 | ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); | ||||
1236 | } | ||||
1237 | return allocated_size; | ||||
1238 | } | ||||
1239 | |||||
1240 | void __sanitizer_purge_allocator() { | ||||
1241 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||
1242 | instance.Purge(&stack); | ||||
1243 | } | ||||
1244 | |||||
1245 | int __asan_update_allocation_context(void* addr) { | ||||
1246 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||
1247 | return instance.UpdateAllocationStack((uptr)addr, &stack); | ||||
1248 | } | ||||
1249 | |||||
1250 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | ||||
1251 | // Provide default (no-op) implementation of malloc hooks. | ||||
1252 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) | ||||
1253 | void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) { | ||||
1254 | (void)ptr; | ||||
1255 | (void)size; | ||||
1256 | } | ||||
1257 | |||||
1258 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_free_hook(void *ptr) { | ||||
1259 | (void)ptr; | ||||
1260 | } | ||||
1261 | #endif |
1 | //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. | ||||
10 | // Not intended for direct inclusion. Include sanitizer_atomic.h. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #ifndef SANITIZER_ATOMIC_CLANG_X86_H | ||||
15 | #define SANITIZER_ATOMIC_CLANG_X86_H | ||||
16 | |||||
17 | namespace __sanitizer { | ||||
18 | |||||
19 | inline void proc_yield(int cnt) { | ||||
20 | __asm__ __volatile__("" ::: "memory"); | ||||
21 | for (int i = 0; i < cnt; i++) | ||||
22 | __asm__ __volatile__("pause"); | ||||
23 | __asm__ __volatile__("" ::: "memory"); | ||||
24 | } | ||||
25 | |||||
26 | template<typename T> | ||||
27 | inline typename T::Type atomic_load( | ||||
28 | const volatile T *a, memory_order mo) { | ||||
29 | DCHECK(mo & (memory_order_relaxed | memory_order_consume | ||||
30 | | memory_order_acquire | memory_order_seq_cst)); | ||||
31 | DCHECK(!((uptr)a % sizeof(*a))); | ||||
32 | typename T::Type v; | ||||
33 | |||||
34 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||
35 | // Assume that aligned loads are atomic. | ||||
36 | if (mo
| ||||
37 | v = a->val_dont_use; | ||||
38 | } else if (mo == memory_order_consume) { | ||||
39 | // Assume that processor respects data dependencies | ||||
40 | // (and that compiler won't break them). | ||||
41 | __asm__ __volatile__("" ::: "memory"); | ||||
42 | v = a->val_dont_use; | ||||
43 | __asm__ __volatile__("" ::: "memory"); | ||||
44 | } else if (mo == memory_order_acquire) { | ||||
45 | __asm__ __volatile__("" ::: "memory"); | ||||
46 | v = a->val_dont_use; | ||||
47 | // On x86 loads are implicitly acquire. | ||||
48 | __asm__ __volatile__("" ::: "memory"); | ||||
49 | } else { // seq_cst | ||||
50 | // On x86 plain MOV is enough for seq_cst store. | ||||
51 | __asm__ __volatile__("" ::: "memory"); | ||||
52 | v = a->val_dont_use; | ||||
53 | __asm__ __volatile__("" ::: "memory"); | ||||
54 | } | ||||
55 | } else { | ||||
56 | // 64-bit load on 32-bit platform. | ||||
57 | __asm__ __volatile__( | ||||
58 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||
59 | "movq %%mm0, %0;" // (ptr could be read-only) | ||||
60 | "emms;" // Empty mmx state/Reset FP regs | ||||
61 | : "=m" (v) | ||||
62 | : "m" (a->val_dont_use) | ||||
63 | : // mark the mmx registers as clobbered | ||||
64 | #ifdef __MMX__1 | ||||
65 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||
66 | #endif // #ifdef __MMX__ | ||||
67 | "memory"); | ||||
68 | } | ||||
69 | return v; | ||||
70 | } | ||||
71 | |||||
72 | template<typename T> | ||||
73 | inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { | ||||
74 | DCHECK(mo & (memory_order_relaxed | memory_order_release | ||||
75 | | memory_order_seq_cst)); | ||||
76 | DCHECK(!((uptr)a % sizeof(*a))); | ||||
77 | |||||
78 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||
79 | // Assume that aligned loads are atomic. | ||||
80 | if (mo == memory_order_relaxed) { | ||||
81 | a->val_dont_use = v; | ||||
82 | } else if (mo == memory_order_release) { | ||||
83 | // On x86 stores are implicitly release. | ||||
84 | __asm__ __volatile__("" ::: "memory"); | ||||
85 | a->val_dont_use = v; | ||||
86 | __asm__ __volatile__("" ::: "memory"); | ||||
87 | } else { // seq_cst | ||||
88 | // On x86 stores are implicitly release. | ||||
89 | __asm__ __volatile__("" ::: "memory"); | ||||
90 | a->val_dont_use = v; | ||||
91 | __sync_synchronize(); | ||||
92 | } | ||||
93 | } else { | ||||
94 | // 64-bit store on 32-bit platform. | ||||
95 | __asm__ __volatile__( | ||||
96 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||
97 | "movq %%mm0, %0;" | ||||
98 | "emms;" // Empty mmx state/Reset FP regs | ||||
99 | : "=m" (a->val_dont_use) | ||||
100 | : "m" (v) | ||||
101 | : // mark the mmx registers as clobbered | ||||
102 | #ifdef __MMX__1 | ||||
103 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||
104 | #endif // #ifdef __MMX__ | ||||
105 | "memory"); | ||||
106 | if (mo == memory_order_seq_cst) | ||||
107 | __sync_synchronize(); | ||||
108 | } | ||||
109 | } | ||||
110 | |||||
111 | } // namespace __sanitizer | ||||
112 | |||||
113 | #endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H |