File: | compiler-rt/lib/asan/asan_allocator.cpp |
Warning: | line 511, column 46 Array access (from variable 'alloc_beg') results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- asan_allocator.cpp ------------------------------------------------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file is a part of AddressSanitizer, an address sanity checker. | ||||||||
10 | // | ||||||||
11 | // Implementation of ASan's memory allocator, 2-nd version. | ||||||||
12 | // This variant uses the allocator from sanitizer_common, i.e. the one shared | ||||||||
13 | // with ThreadSanitizer and MemorySanitizer. | ||||||||
14 | // | ||||||||
15 | //===----------------------------------------------------------------------===// | ||||||||
16 | |||||||||
17 | #include "asan_allocator.h" | ||||||||
18 | |||||||||
19 | #include "asan_mapping.h" | ||||||||
20 | #include "asan_poisoning.h" | ||||||||
21 | #include "asan_report.h" | ||||||||
22 | #include "asan_stack.h" | ||||||||
23 | #include "asan_thread.h" | ||||||||
24 | #include "lsan/lsan_common.h" | ||||||||
25 | #include "sanitizer_common/sanitizer_allocator_checks.h" | ||||||||
26 | #include "sanitizer_common/sanitizer_allocator_interface.h" | ||||||||
27 | #include "sanitizer_common/sanitizer_errno.h" | ||||||||
28 | #include "sanitizer_common/sanitizer_flags.h" | ||||||||
29 | #include "sanitizer_common/sanitizer_internal_defs.h" | ||||||||
30 | #include "sanitizer_common/sanitizer_list.h" | ||||||||
31 | #include "sanitizer_common/sanitizer_quarantine.h" | ||||||||
32 | #include "sanitizer_common/sanitizer_stackdepot.h" | ||||||||
33 | |||||||||
34 | namespace __asan { | ||||||||
35 | |||||||||
36 | // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. | ||||||||
37 | // We use adaptive redzones: for larger allocation larger redzones are used. | ||||||||
38 | static u32 RZLog2Size(u32 rz_log) { | ||||||||
39 | CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer ::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(! (v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 39, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while (false); | ||||||||
40 | return 16 << rz_log; | ||||||||
41 | } | ||||||||
42 | |||||||||
43 | static u32 RZSize2Log(u32 rz_size) { | ||||||||
44 | CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!( !(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 44, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); } while (false); | ||||||||
45 | CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(! !(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 45, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2) ; } while (false); | ||||||||
46 | CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 46, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1 , v2); } while (false); | ||||||||
47 | u32 res = Log2(rz_size) - 4; | ||||||||
48 | CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 48, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")", v1, v2); } while (false); | ||||||||
49 | return res; | ||||||||
50 | } | ||||||||
51 | |||||||||
52 | static AsanAllocator &get_allocator(); | ||||||||
53 | |||||||||
54 | // The memory chunk allocated from the underlying allocator looks like this: | ||||||||
55 | // L L L L L L H H U U U U U U R R | ||||||||
56 | // L -- left redzone words (0 or more bytes) | ||||||||
57 | // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. | ||||||||
58 | // U -- user memory. | ||||||||
59 | // R -- right redzone (0 or more bytes) | ||||||||
60 | // ChunkBase consists of ChunkHeader and other bytes that overlap with user | ||||||||
61 | // memory. | ||||||||
62 | |||||||||
63 | // If the left redzone is greater than the ChunkHeader size we store a magic | ||||||||
64 | // value in the first uptr word of the memory block and store the address of | ||||||||
65 | // ChunkBase in the next uptr. | ||||||||
66 | // M B L L L L L L L L L H H U U U U U U | ||||||||
67 | // | ^ | ||||||||
68 | // ---------------------| | ||||||||
69 | // M -- magic value kAllocBegMagic | ||||||||
70 | // B -- address of ChunkHeader pointing to the first 'H' | ||||||||
71 | static const uptr kAllocBegMagic = 0xCC6E96B9; | ||||||||
72 | |||||||||
73 | struct ChunkHeader { | ||||||||
74 | // 1-st 8 bytes. | ||||||||
75 | u32 chunk_state : 8; // Must be first. | ||||||||
76 | u32 alloc_tid : 24; | ||||||||
77 | |||||||||
78 | u32 free_tid : 24; | ||||||||
79 | u32 from_memalign : 1; | ||||||||
80 | u32 alloc_type : 2; | ||||||||
81 | u32 rz_log : 3; | ||||||||
82 | u32 lsan_tag : 2; | ||||||||
83 | // 2-nd 8 bytes | ||||||||
84 | // This field is used for small sizes. For large sizes it is equal to | ||||||||
85 | // SizeClassMap::kMaxSize and the actual size is stored in the | ||||||||
86 | // SecondaryAllocator's metadata. | ||||||||
87 | u32 user_requested_size : 29; | ||||||||
88 | // align < 8 -> 0 | ||||||||
89 | // else -> log2(min(align, 512)) - 2 | ||||||||
90 | u32 user_requested_alignment_log : 3; | ||||||||
91 | u32 alloc_context_id; | ||||||||
92 | }; | ||||||||
93 | |||||||||
94 | struct ChunkBase : ChunkHeader { | ||||||||
95 | // Header2, intersects with user memory. | ||||||||
96 | u32 free_context_id; | ||||||||
97 | }; | ||||||||
98 | |||||||||
99 | static const uptr kChunkHeaderSize = sizeof(ChunkHeader); | ||||||||
100 | static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; | ||||||||
101 | COMPILER_CHECK(kChunkHeaderSize == 16)static_assert(kChunkHeaderSize == 16, ""); | ||||||||
102 | COMPILER_CHECK(kChunkHeader2Size <= 16)static_assert(kChunkHeader2Size <= 16, ""); | ||||||||
103 | |||||||||
104 | // Every chunk of memory allocated by this allocator can be in one of 3 states: | ||||||||
105 | // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. | ||||||||
106 | // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. | ||||||||
107 | // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. | ||||||||
108 | enum { | ||||||||
109 | CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. | ||||||||
110 | CHUNK_ALLOCATED = 2, | ||||||||
111 | CHUNK_QUARANTINE = 3 | ||||||||
112 | }; | ||||||||
113 | |||||||||
114 | struct AsanChunk: ChunkBase { | ||||||||
115 | uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } | ||||||||
116 | uptr UsedSize(bool locked_version = false) { | ||||||||
117 | if (user_requested_size != SizeClassMap::kMaxSize) | ||||||||
118 | return user_requested_size; | ||||||||
119 | return *reinterpret_cast<uptr *>( | ||||||||
120 | get_allocator().GetMetaData(AllocBeg(locked_version))); | ||||||||
121 | } | ||||||||
122 | void *AllocBeg(bool locked_version = false) { | ||||||||
123 | if (from_memalign) { | ||||||||
124 | if (locked_version) | ||||||||
125 | return get_allocator().GetBlockBeginFastLocked( | ||||||||
126 | reinterpret_cast<void *>(this)); | ||||||||
127 | return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this)); | ||||||||
128 | } | ||||||||
129 | return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); | ||||||||
130 | } | ||||||||
131 | bool AddrIsInside(uptr addr, bool locked_version = false) { | ||||||||
132 | return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); | ||||||||
133 | } | ||||||||
134 | }; | ||||||||
135 | |||||||||
136 | struct QuarantineCallback { | ||||||||
137 | QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) | ||||||||
138 | : cache_(cache), | ||||||||
139 | stack_(stack) { | ||||||||
140 | } | ||||||||
141 | |||||||||
142 | void Recycle(AsanChunk *m) { | ||||||||
143 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state )); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 143, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false); | ||||||||
144 | atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); | ||||||||
145 | CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 145, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)" ")", v1, v2); } while (false); | ||||||||
146 | CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 146, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")" , v1, v2); } while (false); | ||||||||
147 | PoisonShadow(m->Beg(), | ||||||||
148 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||||||
149 | kAsanHeapLeftRedzoneMagic); | ||||||||
150 | void *p = reinterpret_cast<void *>(m->AllocBeg()); | ||||||||
151 | if (p != m) { | ||||||||
152 | uptr *alloc_magic = reinterpret_cast<uptr *>(p); | ||||||||
153 | CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0] )); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic) ); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 153, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)" ")", v1, v2); } while (false); | ||||||||
154 | // Clear the magic value, as allocator internals may overwrite the | ||||||||
155 | // contents of deallocated chunk, confusing GetAsanChunk lookup. | ||||||||
156 | alloc_magic[0] = 0; | ||||||||
157 | CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1] )); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast <uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 157, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))" ")", v1, v2); } while (false); | ||||||||
158 | } | ||||||||
159 | |||||||||
160 | // Statistics. | ||||||||
161 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
162 | thread_stats.real_frees++; | ||||||||
163 | thread_stats.really_freed += m->UsedSize(); | ||||||||
164 | |||||||||
165 | get_allocator().Deallocate(cache_, p); | ||||||||
166 | } | ||||||||
167 | |||||||||
168 | void *Allocate(uptr size) { | ||||||||
169 | void *res = get_allocator().Allocate(cache_, size, 1); | ||||||||
170 | // TODO(alekseys): Consider making quarantine OOM-friendly. | ||||||||
171 | if (UNLIKELY(!res)__builtin_expect(!!(!res), 0)) | ||||||||
172 | ReportOutOfMemory(size, stack_); | ||||||||
173 | return res; | ||||||||
174 | } | ||||||||
175 | |||||||||
176 | void Deallocate(void *p) { | ||||||||
177 | get_allocator().Deallocate(cache_, p); | ||||||||
178 | } | ||||||||
179 | |||||||||
180 | private: | ||||||||
181 | AllocatorCache* const cache_; | ||||||||
182 | BufferedStackTrace* const stack_; | ||||||||
183 | }; | ||||||||
184 | |||||||||
185 | typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; | ||||||||
186 | typedef AsanQuarantine::Cache QuarantineCache; | ||||||||
187 | |||||||||
188 | void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { | ||||||||
189 | PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); | ||||||||
190 | // Statistics. | ||||||||
191 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
192 | thread_stats.mmaps++; | ||||||||
193 | thread_stats.mmaped += size; | ||||||||
194 | } | ||||||||
195 | void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { | ||||||||
196 | PoisonShadow(p, size, 0); | ||||||||
197 | // We are about to unmap a chunk of user memory. | ||||||||
198 | // Mark the corresponding shadow memory as not needed. | ||||||||
199 | FlushUnneededASanShadowMemory(p, size); | ||||||||
200 | // Statistics. | ||||||||
201 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
202 | thread_stats.munmaps++; | ||||||||
203 | thread_stats.munmaped += size; | ||||||||
204 | } | ||||||||
205 | |||||||||
206 | // We can not use THREADLOCAL because it is not supported on some of the | ||||||||
207 | // platforms we care about (OSX 10.6, Android). | ||||||||
208 | // static THREADLOCAL AllocatorCache cache; | ||||||||
209 | AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { | ||||||||
210 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 210, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
211 | return &ms->allocator_cache; | ||||||||
212 | } | ||||||||
213 | |||||||||
214 | QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { | ||||||||
215 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 215, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
216 | CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache ))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms-> quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 216, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))" ")", v1, v2); } while (false); | ||||||||
217 | return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); | ||||||||
218 | } | ||||||||
219 | |||||||||
220 | void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { | ||||||||
221 | quarantine_size_mb = f->quarantine_size_mb; | ||||||||
222 | thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; | ||||||||
223 | min_redzone = f->redzone; | ||||||||
224 | max_redzone = f->max_redzone; | ||||||||
225 | may_return_null = cf->allocator_may_return_null; | ||||||||
226 | alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; | ||||||||
227 | release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; | ||||||||
228 | } | ||||||||
229 | |||||||||
230 | void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { | ||||||||
231 | f->quarantine_size_mb = quarantine_size_mb; | ||||||||
232 | f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; | ||||||||
233 | f->redzone = min_redzone; | ||||||||
234 | f->max_redzone = max_redzone; | ||||||||
235 | cf->allocator_may_return_null = may_return_null; | ||||||||
236 | f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; | ||||||||
237 | cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; | ||||||||
238 | } | ||||||||
239 | |||||||||
240 | struct Allocator { | ||||||||
241 | static const uptr kMaxAllowedMallocSize = | ||||||||
242 | FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(1ULL << 40); | ||||||||
243 | |||||||||
244 | AsanAllocator allocator; | ||||||||
245 | AsanQuarantine quarantine; | ||||||||
246 | StaticSpinMutex fallback_mutex; | ||||||||
247 | AllocatorCache fallback_allocator_cache; | ||||||||
248 | QuarantineCache fallback_quarantine_cache; | ||||||||
249 | |||||||||
250 | uptr max_user_defined_malloc_size; | ||||||||
251 | atomic_uint8_t rss_limit_exceeded; | ||||||||
252 | |||||||||
253 | // ------------------- Options -------------------------- | ||||||||
254 | atomic_uint16_t min_redzone; | ||||||||
255 | atomic_uint16_t max_redzone; | ||||||||
256 | atomic_uint8_t alloc_dealloc_mismatch; | ||||||||
257 | |||||||||
258 | // ------------------- Initialization ------------------------ | ||||||||
259 | explicit Allocator(LinkerInitialized) | ||||||||
260 | : quarantine(LINKER_INITIALIZED), | ||||||||
261 | fallback_quarantine_cache(LINKER_INITIALIZED) {} | ||||||||
262 | |||||||||
263 | void CheckOptions(const AllocatorOptions &options) const { | ||||||||
264 | CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 264, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")" , v1, v2); } while (false); | ||||||||
265 | CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone )); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 265, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)" ")", v1, v2); } while (false); | ||||||||
266 | CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 266, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)" ")", v1, v2); } while (false); | ||||||||
267 | CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 267, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
268 | CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 268, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
269 | } | ||||||||
270 | |||||||||
271 | void SharedInitCode(const AllocatorOptions &options) { | ||||||||
272 | CheckOptions(options); | ||||||||
273 | quarantine.Init((uptr)options.quarantine_size_mb << 20, | ||||||||
274 | (uptr)options.thread_local_quarantine_size_kb << 10); | ||||||||
275 | atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, | ||||||||
276 | memory_order_release); | ||||||||
277 | atomic_store(&min_redzone, options.min_redzone, memory_order_release); | ||||||||
278 | atomic_store(&max_redzone, options.max_redzone, memory_order_release); | ||||||||
279 | } | ||||||||
280 | |||||||||
281 | void InitLinkerInitialized(const AllocatorOptions &options) { | ||||||||
282 | SetAllocatorMayReturnNull(options.may_return_null); | ||||||||
283 | allocator.InitLinkerInitialized(options.release_to_os_interval_ms); | ||||||||
284 | SharedInitCode(options); | ||||||||
285 | max_user_defined_malloc_size = common_flags()->max_allocation_size_mb | ||||||||
286 | ? common_flags()->max_allocation_size_mb | ||||||||
287 | << 20 | ||||||||
288 | : kMaxAllowedMallocSize; | ||||||||
289 | } | ||||||||
290 | |||||||||
291 | bool RssLimitExceeded() { | ||||||||
292 | return atomic_load(&rss_limit_exceeded, memory_order_relaxed); | ||||||||
293 | } | ||||||||
294 | |||||||||
295 | void SetRssLimitExceeded(bool limit_exceeded) { | ||||||||
296 | atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); | ||||||||
297 | } | ||||||||
298 | |||||||||
299 | void RePoisonChunk(uptr chunk) { | ||||||||
300 | // This could be a user-facing chunk (with redzones), or some internal | ||||||||
301 | // housekeeping chunk, like TransferBatch. Start by assuming the former. | ||||||||
302 | AsanChunk *ac = GetAsanChunk((void *)chunk); | ||||||||
303 | uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); | ||||||||
304 | uptr beg = ac->Beg(); | ||||||||
305 | uptr end = ac->Beg() + ac->UsedSize(true); | ||||||||
306 | uptr chunk_end = chunk + allocated_size; | ||||||||
307 | if (chunk < beg && beg < end && end <= chunk_end && | ||||||||
308 | ac->chunk_state == CHUNK_ALLOCATED) { | ||||||||
309 | // Looks like a valid AsanChunk in use, poison redzones only. | ||||||||
310 | PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); | ||||||||
311 | uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||||||
312 | FastPoisonShadowPartialRightRedzone( | ||||||||
313 | end_aligned_down, end - end_aligned_down, | ||||||||
314 | chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); | ||||||||
315 | } else { | ||||||||
316 | // This is either not an AsanChunk or freed or quarantined AsanChunk. | ||||||||
317 | // In either case, poison everything. | ||||||||
318 | PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||||||
319 | } | ||||||||
320 | } | ||||||||
321 | |||||||||
322 | void ReInitialize(const AllocatorOptions &options) { | ||||||||
323 | SetAllocatorMayReturnNull(options.may_return_null); | ||||||||
324 | allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); | ||||||||
325 | SharedInitCode(options); | ||||||||
326 | |||||||||
327 | // Poison all existing allocation's redzones. | ||||||||
328 | if (CanPoisonMemory()) { | ||||||||
329 | allocator.ForceLock(); | ||||||||
330 | allocator.ForEachChunk( | ||||||||
331 | [](uptr chunk, void *alloc) { | ||||||||
332 | ((Allocator *)alloc)->RePoisonChunk(chunk); | ||||||||
333 | }, | ||||||||
334 | this); | ||||||||
335 | allocator.ForceUnlock(); | ||||||||
336 | } | ||||||||
337 | } | ||||||||
338 | |||||||||
339 | void GetOptions(AllocatorOptions *options) const { | ||||||||
340 | options->quarantine_size_mb = quarantine.GetSize() >> 20; | ||||||||
341 | options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; | ||||||||
342 | options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); | ||||||||
343 | options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); | ||||||||
344 | options->may_return_null = AllocatorMayReturnNull(); | ||||||||
345 | options->alloc_dealloc_mismatch = | ||||||||
346 | atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); | ||||||||
347 | options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); | ||||||||
348 | } | ||||||||
349 | |||||||||
350 | // -------------------- Helper methods. ------------------------- | ||||||||
351 | uptr ComputeRZLog(uptr user_requested_size) { | ||||||||
352 | u32 rz_log = | ||||||||
353 | user_requested_size <= 64 - 16 ? 0 : | ||||||||
354 | user_requested_size <= 128 - 32 ? 1 : | ||||||||
355 | user_requested_size <= 512 - 64 ? 2 : | ||||||||
356 | user_requested_size <= 4096 - 128 ? 3 : | ||||||||
357 | user_requested_size <= (1 << 14) - 256 ? 4 : | ||||||||
358 | user_requested_size <= (1 << 15) - 512 ? 5 : | ||||||||
359 | user_requested_size <= (1 << 16) - 1024 ? 6 : 7; | ||||||||
360 | u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); | ||||||||
361 | u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); | ||||||||
362 | return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); | ||||||||
363 | } | ||||||||
364 | |||||||||
365 | static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { | ||||||||
366 | if (user_requested_alignment < 8) | ||||||||
367 | return 0; | ||||||||
368 | if (user_requested_alignment > 512) | ||||||||
369 | user_requested_alignment = 512; | ||||||||
370 | return Log2(user_requested_alignment) - 2; | ||||||||
371 | } | ||||||||
372 | |||||||||
373 | static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { | ||||||||
374 | if (user_requested_alignment_log == 0) | ||||||||
375 | return 0; | ||||||||
376 | return 1LL << (user_requested_alignment_log + 2); | ||||||||
377 | } | ||||||||
378 | |||||||||
379 | // We have an address between two chunks, and we want to report just one. | ||||||||
380 | AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, | ||||||||
381 | AsanChunk *right_chunk) { | ||||||||
382 | // Prefer an allocated chunk over freed chunk and freed chunk | ||||||||
383 | // over available chunk. | ||||||||
384 | if (left_chunk->chunk_state != right_chunk->chunk_state) { | ||||||||
385 | if (left_chunk->chunk_state == CHUNK_ALLOCATED) | ||||||||
386 | return left_chunk; | ||||||||
387 | if (right_chunk->chunk_state == CHUNK_ALLOCATED) | ||||||||
388 | return right_chunk; | ||||||||
389 | if (left_chunk->chunk_state == CHUNK_QUARANTINE) | ||||||||
390 | return left_chunk; | ||||||||
391 | if (right_chunk->chunk_state == CHUNK_QUARANTINE) | ||||||||
392 | return right_chunk; | ||||||||
393 | } | ||||||||
394 | // Same chunk_state: choose based on offset. | ||||||||
395 | sptr l_offset = 0, r_offset = 0; | ||||||||
396 | CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 396, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
397 | CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 397, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
398 | if (l_offset < r_offset) | ||||||||
399 | return left_chunk; | ||||||||
400 | return right_chunk; | ||||||||
401 | } | ||||||||
402 | |||||||||
403 | bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { | ||||||||
404 | AsanChunk *m = GetAsanChunkByAddr(addr); | ||||||||
405 | if (!m) return false; | ||||||||
406 | if (m->chunk_state != CHUNK_ALLOCATED) return false; | ||||||||
407 | if (m->Beg() != addr) return false; | ||||||||
408 | atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack), | ||||||||
409 | memory_order_relaxed); | ||||||||
410 | return true; | ||||||||
411 | } | ||||||||
412 | |||||||||
413 | // -------------------- Allocation/Deallocation routines --------------- | ||||||||
414 | void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, | ||||||||
415 | AllocType alloc_type, bool can_fill) { | ||||||||
416 | if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0)) | ||||||||
| |||||||||
417 | AsanInitFromRtl(); | ||||||||
418 | if (RssLimitExceeded()) { | ||||||||
419 | if (AllocatorMayReturnNull()) | ||||||||
420 | return nullptr; | ||||||||
421 | ReportRssLimitExceeded(stack); | ||||||||
422 | } | ||||||||
423 | Flags &fl = *flags(); | ||||||||
424 | CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 424, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
425 | const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale); | ||||||||
426 | const uptr user_requested_alignment_log = | ||||||||
427 | ComputeUserRequestedAlignmentLog(alignment); | ||||||||
428 | if (alignment
| ||||||||
429 | alignment = min_alignment; | ||||||||
430 | if (size == 0) { | ||||||||
431 | // We'd be happy to avoid allocating memory for zero-size requests, but | ||||||||
432 | // some programs/tests depend on this behavior and assume that malloc | ||||||||
433 | // would not return NULL even for zero-size allocations. Moreover, it | ||||||||
434 | // looks like operator new should never return NULL, and results of | ||||||||
435 | // consecutive "new" calls must be different even if the allocated size | ||||||||
436 | // is zero. | ||||||||
437 | size = 1; | ||||||||
438 | } | ||||||||
439 | CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 439, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")" , v1, v2); } while (false); | ||||||||
440 | uptr rz_log = ComputeRZLog(size); | ||||||||
441 | uptr rz_size = RZLog2Size(rz_log); | ||||||||
442 | uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); | ||||||||
443 | uptr needed_size = rounded_size + rz_size; | ||||||||
444 | if (alignment > min_alignment) | ||||||||
445 | needed_size += alignment; | ||||||||
446 | bool using_primary_allocator = true; | ||||||||
447 | // If we are allocating from the secondary allocator, there will be no | ||||||||
448 | // automatic right redzone, so add the right redzone manually. | ||||||||
449 | if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { | ||||||||
450 | needed_size += rz_size; | ||||||||
451 | using_primary_allocator = false; | ||||||||
452 | } | ||||||||
453 | CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size , min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 453, "(" "(IsAligned(needed_size, min_alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
454 | if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || | ||||||||
455 | size > max_user_defined_malloc_size) { | ||||||||
456 | if (AllocatorMayReturnNull()) { | ||||||||
457 | Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", | ||||||||
458 | (void*)size); | ||||||||
459 | return nullptr; | ||||||||
460 | } | ||||||||
461 | uptr malloc_limit = | ||||||||
462 | Min(kMaxAllowedMallocSize, max_user_defined_malloc_size); | ||||||||
463 | ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); | ||||||||
464 | } | ||||||||
465 | |||||||||
466 | AsanThread *t = GetCurrentThread(); | ||||||||
467 | void *allocated; | ||||||||
468 | if (t) { | ||||||||
469 | AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); | ||||||||
470 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||||||
471 | } else { | ||||||||
472 | SpinMutexLock l(&fallback_mutex); | ||||||||
473 | AllocatorCache *cache = &fallback_allocator_cache; | ||||||||
474 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||||||
475 | } | ||||||||
476 | if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) { | ||||||||
477 | SetAllocatorOutOfMemory(); | ||||||||
478 | if (AllocatorMayReturnNull()) | ||||||||
479 | return nullptr; | ||||||||
480 | ReportOutOfMemory(size, stack); | ||||||||
481 | } | ||||||||
482 | |||||||||
483 | if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShort64bitShadowOffset )) == 0 && CanPoisonMemory()) { | ||||||||
484 | // Heap poisoning is enabled, but the allocator provides an unpoisoned | ||||||||
485 | // chunk. This is possible if CanPoisonMemory() was false for some | ||||||||
486 | // time, for example, due to flags()->start_disabled. | ||||||||
487 | // Anyway, poison the block before using it for anything else. | ||||||||
488 | uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); | ||||||||
489 | PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||||||
490 | } | ||||||||
491 | |||||||||
492 | uptr alloc_beg = reinterpret_cast<uptr>(allocated); | ||||||||
493 | uptr alloc_end = alloc_beg + needed_size; | ||||||||
494 | uptr beg_plus_redzone = alloc_beg + rz_size; | ||||||||
495 | uptr user_beg = beg_plus_redzone; | ||||||||
496 | if (!IsAligned(user_beg, alignment)) | ||||||||
497 | user_beg = RoundUpTo(user_beg, alignment); | ||||||||
498 | uptr user_end = user_beg + size; | ||||||||
499 | CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer ::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 499, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1 , v2); } while (false); | ||||||||
500 | uptr chunk_beg = user_beg - kChunkHeaderSize; | ||||||||
501 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
502 | m->alloc_type = alloc_type; | ||||||||
503 | m->rz_log = rz_log; | ||||||||
504 | u32 alloc_tid = t
| ||||||||
505 | m->alloc_tid = alloc_tid; | ||||||||
506 | CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer ::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 506, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")" , v1, v2); } while (false); // Does alloc_tid fit into the bitfield? | ||||||||
507 | m->free_tid = kInvalidTid; | ||||||||
508 | m->from_memalign = user_beg != beg_plus_redzone; | ||||||||
509 | if (alloc_beg != chunk_beg) { | ||||||||
510 | CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 * sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 510, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)" ")", v1, v2); } while (false); | ||||||||
511 | reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; | ||||||||
| |||||||||
512 | reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; | ||||||||
513 | } | ||||||||
514 | if (using_primary_allocator) { | ||||||||
515 | CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 515, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | ||||||||
516 | m->user_requested_size = size; | ||||||||
517 | CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary (allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 517, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
518 | } else { | ||||||||
519 | CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary (allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 519, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
520 | m->user_requested_size = SizeClassMap::kMaxSize; | ||||||||
521 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); | ||||||||
522 | meta[0] = size; | ||||||||
523 | meta[1] = chunk_beg; | ||||||||
524 | } | ||||||||
525 | m->user_requested_alignment_log = user_requested_alignment_log; | ||||||||
526 | |||||||||
527 | m->alloc_context_id = StackDepotPut(*stack); | ||||||||
528 | |||||||||
529 | uptr size_rounded_down_to_granularity = | ||||||||
530 | RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||||||
531 | // Unpoison the bulk of the memory region. | ||||||||
532 | if (size_rounded_down_to_granularity) | ||||||||
533 | PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); | ||||||||
534 | // Deal with the end of the region if size is not aligned to granularity. | ||||||||
535 | if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { | ||||||||
536 | u8 *shadow = | ||||||||
537 | (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); | ||||||||
538 | *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0; | ||||||||
539 | } | ||||||||
540 | |||||||||
541 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
542 | thread_stats.mallocs++; | ||||||||
543 | thread_stats.malloced += size; | ||||||||
544 | thread_stats.malloced_redzones += needed_size - size; | ||||||||
545 | if (needed_size > SizeClassMap::kMaxSize) | ||||||||
546 | thread_stats.malloc_large++; | ||||||||
547 | else | ||||||||
548 | thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; | ||||||||
549 | |||||||||
550 | void *res = reinterpret_cast<void *>(user_beg); | ||||||||
551 | if (can_fill && fl.max_malloc_fill_size) { | ||||||||
552 | uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); | ||||||||
553 | REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size); | ||||||||
554 | } | ||||||||
555 | #if CAN_SANITIZE_LEAKS1 | ||||||||
556 | m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored | ||||||||
557 | : __lsan::kDirectlyLeaked; | ||||||||
558 | #endif | ||||||||
559 | // Must be the last mutation of metadata in this function. | ||||||||
560 | atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); | ||||||||
561 | ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook (res, size); RunMallocHooks(res, size); } while (false); | ||||||||
562 | return res; | ||||||||
563 | } | ||||||||
564 | |||||||||
565 | // Set quarantine flag if chunk is allocated, issue ASan error report on | ||||||||
566 | // available and quarantined chunks. Return true on success, false otherwise. | ||||||||
567 | bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, | ||||||||
568 | BufferedStackTrace *stack) { | ||||||||
569 | u8 old_chunk_state = CHUNK_ALLOCATED; | ||||||||
570 | // Flip the chunk_state atomically to avoid race on double-free. | ||||||||
571 | if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, | ||||||||
572 | CHUNK_QUARANTINE, | ||||||||
573 | memory_order_acquire)) { | ||||||||
574 | ReportInvalidFree(ptr, old_chunk_state, stack); | ||||||||
575 | // It's not safe to push a chunk in quarantine on invalid free. | ||||||||
576 | return false; | ||||||||
577 | } | ||||||||
578 | CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED )); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 578, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)" ")", v1, v2); } while (false); | ||||||||
579 | return true; | ||||||||
580 | } | ||||||||
581 | |||||||||
582 | // Expects the chunk to already be marked as quarantined by using | ||||||||
583 | // AtomicallySetQuarantineFlagIfAllocated. | ||||||||
584 | void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { | ||||||||
585 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state )); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 585, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false); | ||||||||
586 | CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 586, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1 , v2); } while (false); | ||||||||
587 | if (SANITIZER_WORDSIZE64 == 64) // On 32-bits this resides in user area. | ||||||||
588 | CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 588, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")" , v1, v2); } while (false); | ||||||||
589 | AsanThread *t = GetCurrentThread(); | ||||||||
590 | m->free_tid = t ? t->tid() : 0; | ||||||||
591 | m->free_context_id = StackDepotPut(*stack); | ||||||||
592 | |||||||||
593 | Flags &fl = *flags(); | ||||||||
594 | if (fl.max_free_fill_size > 0) { | ||||||||
595 | // We have to skip the chunk header, it contains free_context_id. | ||||||||
596 | uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; | ||||||||
597 | if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. | ||||||||
598 | uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; | ||||||||
599 | size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); | ||||||||
600 | REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill); | ||||||||
601 | } | ||||||||
602 | } | ||||||||
603 | |||||||||
604 | // Poison the region. | ||||||||
605 | PoisonShadow(m->Beg(), | ||||||||
606 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||||||
607 | kAsanHeapFreeMagic); | ||||||||
608 | |||||||||
609 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
610 | thread_stats.frees++; | ||||||||
611 | thread_stats.freed += m->UsedSize(); | ||||||||
612 | |||||||||
613 | // Push into quarantine. | ||||||||
614 | if (t) { | ||||||||
615 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||||||
616 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||||||
617 | quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, | ||||||||
618 | m->UsedSize()); | ||||||||
619 | } else { | ||||||||
620 | SpinMutexLock l(&fallback_mutex); | ||||||||
621 | AllocatorCache *ac = &fallback_allocator_cache; | ||||||||
622 | quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), | ||||||||
623 | m, m->UsedSize()); | ||||||||
624 | } | ||||||||
625 | } | ||||||||
626 | |||||||||
627 | void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, | ||||||||
628 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
629 | uptr p = reinterpret_cast<uptr>(ptr); | ||||||||
630 | if (p == 0) return; | ||||||||
631 | |||||||||
632 | uptr chunk_beg = p - kChunkHeaderSize; | ||||||||
633 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
634 | |||||||||
635 | // On Windows, uninstrumented DLLs may allocate memory before ASan hooks | ||||||||
636 | // malloc. Don't report an invalid free in this case. | ||||||||
637 | if (SANITIZER_WINDOWS0 && | ||||||||
638 | !get_allocator().PointerIsMine(ptr)) { | ||||||||
639 | if (!IsSystemHeapAddress(p)) | ||||||||
640 | ReportFreeNotMalloced(p, stack); | ||||||||
641 | return; | ||||||||
642 | } | ||||||||
643 | |||||||||
644 | ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr ); RunFreeHooks(ptr); } while (false); | ||||||||
645 | |||||||||
646 | // Must mark the chunk as quarantined before any changes to its metadata. | ||||||||
647 | // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. | ||||||||
648 | if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; | ||||||||
649 | |||||||||
650 | if (m->alloc_type != alloc_type) { | ||||||||
651 | if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { | ||||||||
652 | ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, | ||||||||
653 | (AllocType)alloc_type); | ||||||||
654 | } | ||||||||
655 | } else { | ||||||||
656 | if (flags()->new_delete_type_mismatch && | ||||||||
657 | (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && | ||||||||
658 | ((delete_size && delete_size != m->UsedSize()) || | ||||||||
659 | ComputeUserRequestedAlignmentLog(delete_alignment) != | ||||||||
660 | m->user_requested_alignment_log)) { | ||||||||
661 | ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); | ||||||||
662 | } | ||||||||
663 | } | ||||||||
664 | |||||||||
665 | QuarantineChunk(m, ptr, stack); | ||||||||
666 | } | ||||||||
667 | |||||||||
668 | void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { | ||||||||
669 | CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr && new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 669, "(" "(old_ptr && new_size)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
670 | uptr p = reinterpret_cast<uptr>(old_ptr); | ||||||||
671 | uptr chunk_beg = p - kChunkHeaderSize; | ||||||||
672 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
673 | |||||||||
674 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
675 | thread_stats.reallocs++; | ||||||||
676 | thread_stats.realloced += new_size; | ||||||||
677 | |||||||||
678 | void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); | ||||||||
679 | if (new_ptr) { | ||||||||
680 | u8 chunk_state = m->chunk_state; | ||||||||
681 | if (chunk_state != CHUNK_ALLOCATED) | ||||||||
682 | ReportInvalidFree(old_ptr, chunk_state, stack); | ||||||||
683 | CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception ::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr )); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 683, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)" ")", v1, v2); } while (false); | ||||||||
684 | uptr memcpy_size = Min(new_size, m->UsedSize()); | ||||||||
685 | // If realloc() races with free(), we may start copying freed memory. | ||||||||
686 | // However, we will report racy double-free later anyway. | ||||||||
687 | REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size); | ||||||||
688 | Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); | ||||||||
689 | } | ||||||||
690 | return new_ptr; | ||||||||
691 | } | ||||||||
692 | |||||||||
693 | void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||||||
694 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||||||
695 | if (AllocatorMayReturnNull()) | ||||||||
696 | return nullptr; | ||||||||
697 | ReportCallocOverflow(nmemb, size, stack); | ||||||||
698 | } | ||||||||
699 | void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); | ||||||||
700 | // If the memory comes from the secondary allocator no need to clear it | ||||||||
701 | // as it comes directly from mmap. | ||||||||
702 | if (ptr && allocator.FromPrimary(ptr)) | ||||||||
703 | REAL(memset)__interception::real_memset(ptr, 0, nmemb * size); | ||||||||
704 | return ptr; | ||||||||
705 | } | ||||||||
706 | |||||||||
707 | void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { | ||||||||
708 | if (chunk_state == CHUNK_QUARANTINE) | ||||||||
709 | ReportDoubleFree((uptr)ptr, stack); | ||||||||
710 | else | ||||||||
711 | ReportFreeNotMalloced((uptr)ptr, stack); | ||||||||
712 | } | ||||||||
713 | |||||||||
714 | void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { | ||||||||
715 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||||||
716 | quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); | ||||||||
717 | allocator.SwallowCache(ac); | ||||||||
718 | } | ||||||||
719 | |||||||||
720 | // -------------------------- Chunk lookup ---------------------- | ||||||||
721 | |||||||||
722 | // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). | ||||||||
723 | AsanChunk *GetAsanChunk(void *alloc_beg) { | ||||||||
724 | if (!alloc_beg) return nullptr; | ||||||||
725 | if (!allocator.FromPrimary(alloc_beg)) { | ||||||||
726 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); | ||||||||
727 | AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); | ||||||||
728 | return m; | ||||||||
729 | } | ||||||||
730 | uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); | ||||||||
731 | if (alloc_magic[0] == kAllocBegMagic) | ||||||||
732 | return reinterpret_cast<AsanChunk *>(alloc_magic[1]); | ||||||||
733 | // FIXME: This is either valid small chunk with tiny redzone or invalid | ||||||||
734 | // chunk which is beeing allocated/deallocated. The latter case should | ||||||||
735 | // return nullptr like secondary allocator does. | ||||||||
736 | return reinterpret_cast<AsanChunk *>(alloc_beg); | ||||||||
737 | } | ||||||||
738 | |||||||||
739 | AsanChunk *GetAsanChunkDebug(void *alloc_beg) { | ||||||||
740 | if (!alloc_beg) return nullptr; | ||||||||
741 | if (!allocator.FromPrimary(alloc_beg)) { | ||||||||
742 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); | ||||||||
743 | AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); | ||||||||
744 | Printf("GetAsanChunkDebug1 alloc_beg %p meta %p m %p\n", alloc_beg, meta, m); | ||||||||
745 | return m; | ||||||||
746 | } | ||||||||
747 | uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); | ||||||||
748 | Printf( | ||||||||
749 | "GetAsanChunkDebug2 alloc_beg %p alloc_magic %p alloc_magic[0] %p " | ||||||||
750 | "alloc_magic[1] %p\n", | ||||||||
751 | alloc_beg, alloc_magic, alloc_magic[0], alloc_magic[1]); | ||||||||
752 | if (alloc_magic[0] == kAllocBegMagic) | ||||||||
753 | return reinterpret_cast<AsanChunk *>(alloc_magic[1]); | ||||||||
754 | return reinterpret_cast<AsanChunk *>(alloc_beg); | ||||||||
755 | } | ||||||||
756 | |||||||||
757 | |||||||||
758 | AsanChunk *GetAsanChunkByAddr(uptr p) { | ||||||||
759 | void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); | ||||||||
760 | return GetAsanChunk(alloc_beg); | ||||||||
761 | } | ||||||||
762 | |||||||||
763 | // Allocator must be locked when this function is called. | ||||||||
764 | AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { | ||||||||
765 | void *alloc_beg = | ||||||||
766 | allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); | ||||||||
767 | return GetAsanChunk(alloc_beg); | ||||||||
768 | } | ||||||||
769 | |||||||||
770 | AsanChunk *GetAsanChunkByAddrFastLockedDebug(uptr p) { | ||||||||
771 | void *alloc_beg = | ||||||||
772 | allocator.GetBlockBeginFastLockedDebug(reinterpret_cast<void *>(p)); | ||||||||
773 | Printf("GetAsanChunkByAddrFastLockedDebug p %p alloc_beg %p\n", p, alloc_beg); | ||||||||
774 | return GetAsanChunkDebug(alloc_beg); | ||||||||
775 | } | ||||||||
776 | |||||||||
777 | uptr AllocationSize(uptr p) { | ||||||||
778 | AsanChunk *m = GetAsanChunkByAddr(p); | ||||||||
779 | if (!m) return 0; | ||||||||
780 | if (m->chunk_state != CHUNK_ALLOCATED) return 0; | ||||||||
781 | if (m->Beg() != p) return 0; | ||||||||
782 | return m->UsedSize(); | ||||||||
783 | } | ||||||||
784 | |||||||||
785 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||||||
786 | AsanChunk *m1 = GetAsanChunkByAddr(addr); | ||||||||
787 | if (!m1) return AsanChunkView(m1); | ||||||||
788 | sptr offset = 0; | ||||||||
789 | if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { | ||||||||
790 | // The address is in the chunk's left redzone, so maybe it is actually | ||||||||
791 | // a right buffer overflow from the other chunk to the left. | ||||||||
792 | // Search a bit to the left to see if there is another chunk. | ||||||||
793 | AsanChunk *m2 = nullptr; | ||||||||
794 | for (uptr l = 1; l < GetPageSizeCached(); l++) { | ||||||||
795 | m2 = GetAsanChunkByAddr(addr - l); | ||||||||
796 | if (m2 == m1) continue; // Still the same chunk. | ||||||||
797 | break; | ||||||||
798 | } | ||||||||
799 | if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) | ||||||||
800 | m1 = ChooseChunk(addr, m2, m1); | ||||||||
801 | } | ||||||||
802 | return AsanChunkView(m1); | ||||||||
803 | } | ||||||||
804 | |||||||||
805 | void Purge(BufferedStackTrace *stack) { | ||||||||
806 | AsanThread *t = GetCurrentThread(); | ||||||||
807 | if (t) { | ||||||||
808 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||||||
809 | quarantine.DrainAndRecycle(GetQuarantineCache(ms), | ||||||||
810 | QuarantineCallback(GetAllocatorCache(ms), | ||||||||
811 | stack)); | ||||||||
812 | } | ||||||||
813 | { | ||||||||
814 | SpinMutexLock l(&fallback_mutex); | ||||||||
815 | quarantine.DrainAndRecycle(&fallback_quarantine_cache, | ||||||||
816 | QuarantineCallback(&fallback_allocator_cache, | ||||||||
817 | stack)); | ||||||||
818 | } | ||||||||
819 | |||||||||
820 | allocator.ForceReleaseToOS(); | ||||||||
821 | } | ||||||||
822 | |||||||||
823 | void PrintStats() { | ||||||||
824 | allocator.PrintStats(); | ||||||||
825 | quarantine.PrintStats(); | ||||||||
826 | } | ||||||||
827 | |||||||||
828 | void ForceLock() { | ||||||||
829 | allocator.ForceLock(); | ||||||||
830 | fallback_mutex.Lock(); | ||||||||
831 | } | ||||||||
832 | |||||||||
833 | void ForceUnlock() { | ||||||||
834 | fallback_mutex.Unlock(); | ||||||||
835 | allocator.ForceUnlock(); | ||||||||
836 | } | ||||||||
837 | }; | ||||||||
838 | |||||||||
839 | static Allocator instance(LINKER_INITIALIZED); | ||||||||
840 | |||||||||
841 | static AsanAllocator &get_allocator() { | ||||||||
842 | return instance.allocator; | ||||||||
843 | } | ||||||||
844 | |||||||||
845 | bool AsanChunkView::IsValid() const { | ||||||||
846 | return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; | ||||||||
847 | } | ||||||||
848 | bool AsanChunkView::IsAllocated() const { | ||||||||
849 | return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; | ||||||||
850 | } | ||||||||
851 | bool AsanChunkView::IsQuarantined() const { | ||||||||
852 | return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; | ||||||||
853 | } | ||||||||
854 | uptr AsanChunkView::Beg() const { return chunk_->Beg(); } | ||||||||
855 | uptr AsanChunkView::End() const { return Beg() + UsedSize(); } | ||||||||
856 | uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } | ||||||||
857 | u32 AsanChunkView::UserRequestedAlignment() const { | ||||||||
858 | return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); | ||||||||
859 | } | ||||||||
860 | uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } | ||||||||
861 | uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } | ||||||||
862 | AllocType AsanChunkView::GetAllocType() const { | ||||||||
863 | return (AllocType)chunk_->alloc_type; | ||||||||
864 | } | ||||||||
865 | |||||||||
866 | static StackTrace GetStackTraceFromId(u32 id) { | ||||||||
867 | CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 867, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
868 | StackTrace res = StackDepotGet(id); | ||||||||
869 | CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 869, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
870 | return res; | ||||||||
871 | } | ||||||||
872 | |||||||||
873 | u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } | ||||||||
874 | u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } | ||||||||
875 | |||||||||
876 | StackTrace AsanChunkView::GetAllocStack() const { | ||||||||
877 | return GetStackTraceFromId(GetAllocStackId()); | ||||||||
878 | } | ||||||||
879 | |||||||||
880 | StackTrace AsanChunkView::GetFreeStack() const { | ||||||||
881 | return GetStackTraceFromId(GetFreeStackId()); | ||||||||
882 | } | ||||||||
883 | |||||||||
884 | void InitializeAllocator(const AllocatorOptions &options) { | ||||||||
885 | instance.InitLinkerInitialized(options); | ||||||||
886 | } | ||||||||
887 | |||||||||
888 | void ReInitializeAllocator(const AllocatorOptions &options) { | ||||||||
889 | instance.ReInitialize(options); | ||||||||
890 | } | ||||||||
891 | |||||||||
892 | void GetAllocatorOptions(AllocatorOptions *options) { | ||||||||
893 | instance.GetOptions(options); | ||||||||
894 | } | ||||||||
895 | |||||||||
896 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||||||
897 | return instance.FindHeapChunkByAddress(addr); | ||||||||
898 | } | ||||||||
899 | AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { | ||||||||
900 | return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); | ||||||||
901 | } | ||||||||
902 | |||||||||
903 | void AsanThreadLocalMallocStorage::CommitBack() { | ||||||||
904 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||||||
905 | instance.CommitBack(this, &stack); | ||||||||
906 | } | ||||||||
907 | |||||||||
908 | void PrintInternalAllocatorStats() { | ||||||||
909 | instance.PrintStats(); | ||||||||
910 | } | ||||||||
911 | |||||||||
912 | void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
913 | instance.Deallocate(ptr, 0, 0, stack, alloc_type); | ||||||||
914 | } | ||||||||
915 | |||||||||
916 | void asan_delete(void *ptr, uptr size, uptr alignment, | ||||||||
917 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
918 | instance.Deallocate(ptr, size, alignment, stack, alloc_type); | ||||||||
919 | } | ||||||||
920 | |||||||||
921 | void *asan_malloc(uptr size, BufferedStackTrace *stack) { | ||||||||
922 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||||||
923 | } | ||||||||
924 | |||||||||
925 | void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||||||
926 | return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); | ||||||||
927 | } | ||||||||
928 | |||||||||
929 | void *asan_reallocarray(void *p, uptr nmemb, uptr size, | ||||||||
930 | BufferedStackTrace *stack) { | ||||||||
931 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||||||
932 | errno(*__errno_location()) = errno_ENOMEM12; | ||||||||
933 | if (AllocatorMayReturnNull()) | ||||||||
934 | return nullptr; | ||||||||
935 | ReportReallocArrayOverflow(nmemb, size, stack); | ||||||||
936 | } | ||||||||
937 | return asan_realloc(p, nmemb * size, stack); | ||||||||
938 | } | ||||||||
939 | |||||||||
940 | void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { | ||||||||
941 | if (!p) | ||||||||
942 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||||||
943 | if (size == 0) { | ||||||||
944 | if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { | ||||||||
945 | instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); | ||||||||
946 | return nullptr; | ||||||||
947 | } | ||||||||
948 | // Allocate a size of 1 if we shouldn't free() on Realloc to 0 | ||||||||
949 | size = 1; | ||||||||
950 | } | ||||||||
951 | return SetErrnoOnNull(instance.Reallocate(p, size, stack)); | ||||||||
952 | } | ||||||||
953 | |||||||||
954 | void *asan_valloc(uptr size, BufferedStackTrace *stack) { | ||||||||
955 | return SetErrnoOnNull( | ||||||||
956 | instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); | ||||||||
957 | } | ||||||||
958 | |||||||||
959 | void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { | ||||||||
960 | uptr PageSize = GetPageSizeCached(); | ||||||||
961 | if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)), 0)) { | ||||||||
962 | errno(*__errno_location()) = errno_ENOMEM12; | ||||||||
963 | if (AllocatorMayReturnNull()) | ||||||||
964 | return nullptr; | ||||||||
965 | ReportPvallocOverflow(size, stack); | ||||||||
966 | } | ||||||||
967 | // pvalloc(0) should allocate one page. | ||||||||
968 | size = size ? RoundUpTo(size, PageSize) : PageSize; | ||||||||
969 | return SetErrnoOnNull( | ||||||||
970 | instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); | ||||||||
971 | } | ||||||||
972 | |||||||||
973 | void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, | ||||||||
974 | AllocType alloc_type) { | ||||||||
975 | if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) { | ||||||||
976 | errno(*__errno_location()) = errno_EINVAL22; | ||||||||
977 | if (AllocatorMayReturnNull()) | ||||||||
978 | return nullptr; | ||||||||
979 | ReportInvalidAllocationAlignment(alignment, stack); | ||||||||
980 | } | ||||||||
981 | return SetErrnoOnNull( | ||||||||
982 | instance.Allocate(size, alignment, stack, alloc_type, true)); | ||||||||
983 | } | ||||||||
984 | |||||||||
985 | void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { | ||||||||
986 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment , size)), 0)) { | ||||||||
987 | errno(*__errno_location()) = errno_EINVAL22; | ||||||||
988 | if (AllocatorMayReturnNull()) | ||||||||
989 | return nullptr; | ||||||||
990 | ReportInvalidAlignedAllocAlignment(size, alignment, stack); | ||||||||
991 | } | ||||||||
992 | return SetErrnoOnNull( | ||||||||
993 | instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); | ||||||||
994 | } | ||||||||
995 | |||||||||
996 | int asan_posix_memalign(void **memptr, uptr alignment, uptr size, | ||||||||
997 | BufferedStackTrace *stack) { | ||||||||
998 | if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)), 0)) { | ||||||||
999 | if (AllocatorMayReturnNull()) | ||||||||
1000 | return errno_EINVAL22; | ||||||||
1001 | ReportInvalidPosixMemalignAlignment(alignment, stack); | ||||||||
1002 | } | ||||||||
1003 | void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); | ||||||||
1004 | if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0)) | ||||||||
1005 | // OOM error is already taken care of by Allocate. | ||||||||
1006 | return errno_ENOMEM12; | ||||||||
1007 | CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr )ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 1007, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
1008 | *memptr = ptr; | ||||||||
1009 | return 0; | ||||||||
1010 | } | ||||||||
1011 | |||||||||
1012 | uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { | ||||||||
1013 | if (!ptr) return 0; | ||||||||
1014 | uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||||||
1015 | if (flags()->check_malloc_usable_size && (usable_size == 0)) { | ||||||||
1016 | GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags ()->fast_unwind_on_fatal); | ||||||||
1017 | ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); | ||||||||
1018 | } | ||||||||
1019 | return usable_size; | ||||||||
1020 | } | ||||||||
1021 | |||||||||
1022 | uptr asan_mz_size(const void *ptr) { | ||||||||
1023 | return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||||||
1024 | } | ||||||||
1025 | |||||||||
1026 | void asan_mz_force_lock() { | ||||||||
1027 | instance.ForceLock(); | ||||||||
1028 | } | ||||||||
1029 | |||||||||
1030 | void asan_mz_force_unlock() { | ||||||||
1031 | instance.ForceUnlock(); | ||||||||
1032 | } | ||||||||
1033 | |||||||||
1034 | void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { | ||||||||
1035 | instance.SetRssLimitExceeded(limit_exceeded); | ||||||||
1036 | } | ||||||||
1037 | |||||||||
1038 | } // namespace __asan | ||||||||
1039 | |||||||||
1040 | // --- Implementation of LSan-specific functions --- {{{1 | ||||||||
1041 | namespace __lsan { | ||||||||
1042 | void LockAllocator() { | ||||||||
1043 | __asan::get_allocator().ForceLock(); | ||||||||
1044 | } | ||||||||
1045 | |||||||||
1046 | void UnlockAllocator() { | ||||||||
1047 | __asan::get_allocator().ForceUnlock(); | ||||||||
1048 | } | ||||||||
1049 | |||||||||
1050 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { | ||||||||
1051 | *begin = (uptr)&__asan::get_allocator(); | ||||||||
1052 | *end = *begin + sizeof(__asan::get_allocator()); | ||||||||
1053 | } | ||||||||
1054 | |||||||||
1055 | uptr PointsIntoChunk(void* p) { | ||||||||
1056 | uptr addr = reinterpret_cast<uptr>(p); | ||||||||
1057 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); | ||||||||
1058 | if (!m) return 0; | ||||||||
1059 | uptr chunk = m->Beg(); | ||||||||
1060 | if (m->chunk_state != __asan::CHUNK_ALLOCATED) | ||||||||
1061 | return 0; | ||||||||
1062 | if (m->AddrIsInside(addr, /*locked_version=*/true)) | ||||||||
1063 | return chunk; | ||||||||
1064 | if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), | ||||||||
1065 | addr)) | ||||||||
1066 | return chunk; | ||||||||
1067 | return 0; | ||||||||
1068 | } | ||||||||
1069 | |||||||||
1070 | // Debug code. Delete once issue #1193 is chased down. | ||||||||
1071 | extern "C" SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) const char *__lsan_current_stage; | ||||||||
1072 | |||||||||
1073 | void GetUserBeginDebug(uptr chunk) { | ||||||||
1074 | Printf("GetUserBeginDebug1 chunk %p\n", chunk); | ||||||||
1075 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLockedDebug(chunk); | ||||||||
1076 | Printf("GetUserBeginDebug2 m %p\n", m); | ||||||||
1077 | } | ||||||||
1078 | |||||||||
1079 | uptr GetUserBegin(uptr chunk) { | ||||||||
1080 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); | ||||||||
1081 | if (!m) { | ||||||||
1082 | Printf( | ||||||||
1083 | "ASAN is about to crash with a CHECK failure.\n" | ||||||||
1084 | "The ASAN developers are trying to chase down this bug,\n" | ||||||||
1085 | "so if you've encountered this bug please let us know.\n" | ||||||||
1086 | "See also: https://github.com/google/sanitizers/issues/1193\n" | ||||||||
1087 | "Internal ref b/149237057\n" | ||||||||
1088 | "chunk: %p caller %p __lsan_current_stage %s\n", | ||||||||
1089 | chunk, GET_CALLER_PC()(__sanitizer::uptr) __builtin_return_address(0), __lsan_current_stage); | ||||||||
1090 | GetUserBeginDebug(chunk); | ||||||||
1091 | } | ||||||||
1092 | CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp" , 1092, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
1093 | return m->Beg(); | ||||||||
1094 | } | ||||||||
1095 | |||||||||
1096 | LsanMetadata::LsanMetadata(uptr chunk) { | ||||||||
1097 | metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); | ||||||||
1098 | } | ||||||||
1099 | |||||||||
1100 | bool LsanMetadata::allocated() const { | ||||||||
1101 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1102 | return m->chunk_state == __asan::CHUNK_ALLOCATED; | ||||||||
1103 | } | ||||||||
1104 | |||||||||
1105 | ChunkTag LsanMetadata::tag() const { | ||||||||
1106 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1107 | return static_cast<ChunkTag>(m->lsan_tag); | ||||||||
1108 | } | ||||||||
1109 | |||||||||
1110 | void LsanMetadata::set_tag(ChunkTag value) { | ||||||||
1111 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1112 | m->lsan_tag = value; | ||||||||
1113 | } | ||||||||
1114 | |||||||||
1115 | uptr LsanMetadata::requested_size() const { | ||||||||
1116 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1117 | return m->UsedSize(/*locked_version=*/true); | ||||||||
1118 | } | ||||||||
1119 | |||||||||
1120 | u32 LsanMetadata::stack_trace_id() const { | ||||||||
1121 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1122 | return m->alloc_context_id; | ||||||||
1123 | } | ||||||||
1124 | |||||||||
1125 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | ||||||||
1126 | __asan::get_allocator().ForEachChunk(callback, arg); | ||||||||
1127 | } | ||||||||
1128 | |||||||||
1129 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { | ||||||||
1130 | uptr addr = reinterpret_cast<uptr>(p); | ||||||||
1131 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); | ||||||||
1132 | if (!m) return kIgnoreObjectInvalid; | ||||||||
1133 | if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { | ||||||||
1134 | if (m->lsan_tag == kIgnored) | ||||||||
1135 | return kIgnoreObjectAlreadyIgnored; | ||||||||
1136 | m->lsan_tag = __lsan::kIgnored; | ||||||||
1137 | return kIgnoreObjectSuccess; | ||||||||
1138 | } else { | ||||||||
1139 | return kIgnoreObjectInvalid; | ||||||||
1140 | } | ||||||||
1141 | } | ||||||||
1142 | } // namespace __lsan | ||||||||
1143 | |||||||||
1144 | // ---------------------- Interface ---------------- {{{1 | ||||||||
1145 | using namespace __asan; | ||||||||
1146 | |||||||||
1147 | // ASan allocator doesn't reserve extra bytes, so normally we would | ||||||||
1148 | // just return "size". We don't want to expose our redzone sizes, etc here. | ||||||||
1149 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { | ||||||||
1150 | return size; | ||||||||
1151 | } | ||||||||
1152 | |||||||||
1153 | int __sanitizer_get_ownership(const void *p) { | ||||||||
1154 | uptr ptr = reinterpret_cast<uptr>(p); | ||||||||
1155 | return instance.AllocationSize(ptr) > 0; | ||||||||
1156 | } | ||||||||
1157 | |||||||||
1158 | uptr __sanitizer_get_allocated_size(const void *p) { | ||||||||
1159 | if (!p) return 0; | ||||||||
1160 | uptr ptr = reinterpret_cast<uptr>(p); | ||||||||
1161 | uptr allocated_size = instance.AllocationSize(ptr); | ||||||||
1162 | // Die if p is not malloced or if it is already freed. | ||||||||
1163 | if (allocated_size == 0) { | ||||||||
1164 | GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack .size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer [0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack .trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address (0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer ::uptr) __builtin_frame_address(0), nullptr, common_flags()-> fast_unwind_on_fatal, kStackTraceMax); }; | ||||||||
1165 | ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); | ||||||||
1166 | } | ||||||||
1167 | return allocated_size; | ||||||||
1168 | } | ||||||||
1169 | |||||||||
1170 | void __sanitizer_purge_allocator() { | ||||||||
1171 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||||||
1172 | instance.Purge(&stack); | ||||||||
1173 | } | ||||||||
1174 | |||||||||
1175 | int __asan_update_allocation_context(void* addr) { | ||||||||
1176 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||||||
1177 | return instance.UpdateAllocationStack((uptr)addr, &stack); | ||||||||
1178 | } | ||||||||
1179 | |||||||||
1180 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | ||||||||
1181 | // Provide default (no-op) implementation of malloc hooks. | ||||||||
1182 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) | ||||||||
1183 | void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) { | ||||||||
1184 | (void)ptr; | ||||||||
1185 | (void)size; | ||||||||
1186 | } | ||||||||
1187 | |||||||||
1188 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_free_hook(void *ptr) { | ||||||||
1189 | (void)ptr; | ||||||||
1190 | } | ||||||||
1191 | #endif |
1 | //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. | ||||||||
10 | // Not intended for direct inclusion. Include sanitizer_atomic.h. | ||||||||
11 | // | ||||||||
12 | //===----------------------------------------------------------------------===// | ||||||||
13 | |||||||||
14 | #ifndef SANITIZER_ATOMIC_CLANG_X86_H | ||||||||
15 | #define SANITIZER_ATOMIC_CLANG_X86_H | ||||||||
16 | |||||||||
17 | namespace __sanitizer { | ||||||||
18 | |||||||||
19 | INLINEinline void proc_yield(int cnt) { | ||||||||
20 | __asm__ __volatile__("" ::: "memory"); | ||||||||
21 | for (int i = 0; i < cnt; i++) | ||||||||
22 | __asm__ __volatile__("pause"); | ||||||||
23 | __asm__ __volatile__("" ::: "memory"); | ||||||||
24 | } | ||||||||
25 | |||||||||
26 | template<typename T> | ||||||||
27 | INLINEinline typename T::Type atomic_load( | ||||||||
28 | const volatile T *a, memory_order mo) { | ||||||||
29 | DCHECK(mo & (memory_order_relaxed | memory_order_consume | ||||||||
30 | | memory_order_acquire | memory_order_seq_cst)); | ||||||||
31 | DCHECK(!((uptr)a % sizeof(*a))); | ||||||||
32 | typename T::Type v; | ||||||||
33 | |||||||||
34 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||||||
35 | // Assume that aligned loads are atomic. | ||||||||
36 | if (mo
| ||||||||
37 | v = a->val_dont_use; | ||||||||
38 | } else if (mo == memory_order_consume) { | ||||||||
39 | // Assume that processor respects data dependencies | ||||||||
40 | // (and that compiler won't break them). | ||||||||
41 | __asm__ __volatile__("" ::: "memory"); | ||||||||
42 | v = a->val_dont_use; | ||||||||
43 | __asm__ __volatile__("" ::: "memory"); | ||||||||
44 | } else if (mo == memory_order_acquire) { | ||||||||
45 | __asm__ __volatile__("" ::: "memory"); | ||||||||
46 | v = a->val_dont_use; | ||||||||
47 | // On x86 loads are implicitly acquire. | ||||||||
48 | __asm__ __volatile__("" ::: "memory"); | ||||||||
49 | } else { // seq_cst | ||||||||
50 | // On x86 plain MOV is enough for seq_cst store. | ||||||||
51 | __asm__ __volatile__("" ::: "memory"); | ||||||||
52 | v = a->val_dont_use; | ||||||||
53 | __asm__ __volatile__("" ::: "memory"); | ||||||||
54 | } | ||||||||
55 | } else { | ||||||||
56 | // 64-bit load on 32-bit platform. | ||||||||
57 | __asm__ __volatile__( | ||||||||
58 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||||||
59 | "movq %%mm0, %0;" // (ptr could be read-only) | ||||||||
60 | "emms;" // Empty mmx state/Reset FP regs | ||||||||
61 | : "=m" (v) | ||||||||
62 | : "m" (a->val_dont_use) | ||||||||
63 | : // mark the mmx registers as clobbered | ||||||||
64 | #ifdef __MMX__1 | ||||||||
65 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||||||
66 | #endif // #ifdef __MMX__ | ||||||||
67 | "memory"); | ||||||||
68 | } | ||||||||
69 | return v; | ||||||||
70 | } | ||||||||
71 | |||||||||
72 | template<typename T> | ||||||||
73 | INLINEinline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { | ||||||||
74 | DCHECK(mo & (memory_order_relaxed | memory_order_release | ||||||||
75 | | memory_order_seq_cst)); | ||||||||
76 | DCHECK(!((uptr)a % sizeof(*a))); | ||||||||
77 | |||||||||
78 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||||||
79 | // Assume that aligned loads are atomic. | ||||||||
80 | if (mo == memory_order_relaxed) { | ||||||||
81 | a->val_dont_use = v; | ||||||||
82 | } else if (mo == memory_order_release) { | ||||||||
83 | // On x86 stores are implicitly release. | ||||||||
84 | __asm__ __volatile__("" ::: "memory"); | ||||||||
85 | a->val_dont_use = v; | ||||||||
86 | __asm__ __volatile__("" ::: "memory"); | ||||||||
87 | } else { // seq_cst | ||||||||
88 | // On x86 stores are implicitly release. | ||||||||
89 | __asm__ __volatile__("" ::: "memory"); | ||||||||
90 | a->val_dont_use = v; | ||||||||
91 | __sync_synchronize(); | ||||||||
92 | } | ||||||||
93 | } else { | ||||||||
94 | // 64-bit store on 32-bit platform. | ||||||||
95 | __asm__ __volatile__( | ||||||||
96 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||||||
97 | "movq %%mm0, %0;" | ||||||||
98 | "emms;" // Empty mmx state/Reset FP regs | ||||||||
99 | : "=m" (a->val_dont_use) | ||||||||
100 | : "m" (v) | ||||||||
101 | : // mark the mmx registers as clobbered | ||||||||
102 | #ifdef __MMX__1 | ||||||||
103 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||||||
104 | #endif // #ifdef __MMX__ | ||||||||
105 | "memory"); | ||||||||
106 | if (mo == memory_order_seq_cst) | ||||||||
107 | __sync_synchronize(); | ||||||||
108 | } | ||||||||
109 | } | ||||||||
110 | |||||||||
111 | } // namespace __sanitizer | ||||||||
112 | |||||||||
113 | #endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H |
1 | //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // Part of the Sanitizer Allocator. | ||||||||
10 | // | ||||||||
11 | //===----------------------------------------------------------------------===// | ||||||||
12 | #ifndef SANITIZER_ALLOCATOR_H | ||||||||
13 | #error This file must be included inside sanitizer_allocator.h | ||||||||
14 | #endif | ||||||||
15 | |||||||||
16 | // This class implements a complete memory allocator by using two | ||||||||
17 | // internal allocators: | ||||||||
18 | // PrimaryAllocator is efficient, but may not allocate some sizes (alignments). | ||||||||
19 | // When allocating 2^x bytes it should return 2^x aligned chunk. | ||||||||
20 | // PrimaryAllocator is used via a local AllocatorCache. | ||||||||
21 | // SecondaryAllocator can allocate anything, but is not efficient. | ||||||||
22 | template <class PrimaryAllocator, | ||||||||
23 | class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray> | ||||||||
24 | class CombinedAllocator { | ||||||||
25 | public: | ||||||||
26 | using AllocatorCache = typename PrimaryAllocator::AllocatorCache; | ||||||||
27 | using SecondaryAllocator = | ||||||||
28 | LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback, | ||||||||
29 | LargeMmapAllocatorPtrArray, | ||||||||
30 | typename PrimaryAllocator::AddressSpaceView>; | ||||||||
31 | |||||||||
32 | void InitLinkerInitialized(s32 release_to_os_interval_ms) { | ||||||||
33 | stats_.InitLinkerInitialized(); | ||||||||
34 | primary_.Init(release_to_os_interval_ms); | ||||||||
35 | secondary_.InitLinkerInitialized(); | ||||||||
36 | } | ||||||||
37 | |||||||||
38 | void Init(s32 release_to_os_interval_ms) { | ||||||||
39 | stats_.Init(); | ||||||||
40 | primary_.Init(release_to_os_interval_ms); | ||||||||
41 | secondary_.Init(); | ||||||||
42 | } | ||||||||
43 | |||||||||
44 | void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { | ||||||||
45 | // Returning 0 on malloc(0) may break a lot of code. | ||||||||
46 | if (size == 0) | ||||||||
47 | size = 1; | ||||||||
48 | if (size + alignment < size) { | ||||||||
49 | Report("WARNING: %s: CombinedAllocator allocation overflow: " | ||||||||
50 | "0x%zx bytes with 0x%zx alignment requested\n", | ||||||||
51 | SanitizerToolName, size, alignment); | ||||||||
52 | return nullptr; | ||||||||
53 | } | ||||||||
54 | uptr original_size = size; | ||||||||
55 | // If alignment requirements are to be fulfilled by the frontend allocator | ||||||||
56 | // rather than by the primary or secondary, passing an alignment lower than | ||||||||
57 | // or equal to 8 will prevent any further rounding up, as well as the later | ||||||||
58 | // alignment check. | ||||||||
59 | if (alignment
| ||||||||
60 | size = RoundUpTo(size, alignment); | ||||||||
61 | // The primary allocator should return a 2^x aligned allocation when | ||||||||
62 | // requested 2^x bytes, hence using the rounded up 'size' when being | ||||||||
63 | // serviced by the primary (this is no longer true when the primary is | ||||||||
64 | // using a non-fixed base address). The secondary takes care of the | ||||||||
65 | // alignment without such requirement, and allocating 'size' would use | ||||||||
66 | // extraneous memory, so we employ 'original_size'. | ||||||||
67 | void *res; | ||||||||
68 | if (primary_.CanAllocate(size, alignment)) | ||||||||
69 | res = cache->Allocate(&primary_, primary_.ClassID(size)); | ||||||||
70 | else | ||||||||
71 | res = secondary_.Allocate(&stats_, original_size, alignment); | ||||||||
72 | if (alignment
| ||||||||
73 | CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast <uptr>(res) & (alignment - 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2 )), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h" , 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); | ||||||||
74 | return res; | ||||||||
75 | } | ||||||||
76 | |||||||||
77 | s32 ReleaseToOSIntervalMs() const { | ||||||||
78 | return primary_.ReleaseToOSIntervalMs(); | ||||||||
79 | } | ||||||||
80 | |||||||||
81 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { | ||||||||
82 | primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); | ||||||||
83 | } | ||||||||
84 | |||||||||
85 | void ForceReleaseToOS() { | ||||||||
86 | primary_.ForceReleaseToOS(); | ||||||||
87 | } | ||||||||
88 | |||||||||
89 | void Deallocate(AllocatorCache *cache, void *p) { | ||||||||
90 | if (!p) return; | ||||||||
91 | if (primary_.PointerIsMine(p)) | ||||||||
92 | cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); | ||||||||
93 | else | ||||||||
94 | secondary_.Deallocate(&stats_, p); | ||||||||
95 | } | ||||||||
96 | |||||||||
97 | void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, | ||||||||
98 | uptr alignment) { | ||||||||
99 | if (!p) | ||||||||
100 | return Allocate(cache, new_size, alignment); | ||||||||
101 | if (!new_size) { | ||||||||
102 | Deallocate(cache, p); | ||||||||
103 | return nullptr; | ||||||||
104 | } | ||||||||
105 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h" , 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); | ||||||||
106 | uptr old_size = GetActuallyAllocatedSize(p); | ||||||||
107 | uptr memcpy_size = Min(new_size, old_size); | ||||||||
108 | void *new_p = Allocate(cache, new_size, alignment); | ||||||||
109 | if (new_p) | ||||||||
110 | internal_memcpy(new_p, p, memcpy_size); | ||||||||
111 | Deallocate(cache, p); | ||||||||
112 | return new_p; | ||||||||
113 | } | ||||||||
114 | |||||||||
115 | bool PointerIsMine(void *p) { | ||||||||
116 | if (primary_.PointerIsMine(p)) | ||||||||
117 | return true; | ||||||||
118 | return secondary_.PointerIsMine(p); | ||||||||
119 | } | ||||||||
120 | |||||||||
121 | bool FromPrimary(void *p) { | ||||||||
122 | return primary_.PointerIsMine(p); | ||||||||
123 | } | ||||||||
124 | |||||||||
125 | void *GetMetaData(const void *p) { | ||||||||
126 | if (primary_.PointerIsMine(p)) | ||||||||
127 | return primary_.GetMetaData(p); | ||||||||
128 | return secondary_.GetMetaData(p); | ||||||||
129 | } | ||||||||
130 | |||||||||
131 | void *GetBlockBegin(const void *p) { | ||||||||
132 | if (primary_.PointerIsMine(p)) | ||||||||
133 | return primary_.GetBlockBegin(p); | ||||||||
134 | return secondary_.GetBlockBegin(p); | ||||||||
135 | } | ||||||||
136 | |||||||||
137 | // This function does the same as GetBlockBegin, but is much faster. | ||||||||
138 | // Must be called with the allocator locked. | ||||||||
139 | void *GetBlockBeginFastLocked(void *p) { | ||||||||
140 | if (primary_.PointerIsMine(p)) | ||||||||
141 | return primary_.GetBlockBegin(p); | ||||||||
142 | return secondary_.GetBlockBeginFastLocked(p); | ||||||||
143 | } | ||||||||
144 | |||||||||
145 | void *GetBlockBeginFastLockedDebug(void *p) { | ||||||||
146 | if (primary_.PointerIsMine(p)) | ||||||||
147 | return primary_.GetBlockBeginDebug(p); | ||||||||
148 | return secondary_.GetBlockBeginFastLocked(p); | ||||||||
149 | } | ||||||||
150 | |||||||||
151 | |||||||||
152 | uptr GetActuallyAllocatedSize(void *p) { | ||||||||
153 | if (primary_.PointerIsMine(p)) | ||||||||
154 | return primary_.GetActuallyAllocatedSize(p); | ||||||||
155 | return secondary_.GetActuallyAllocatedSize(p); | ||||||||
156 | } | ||||||||
157 | |||||||||
158 | uptr TotalMemoryUsed() { | ||||||||
159 | return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); | ||||||||
160 | } | ||||||||
161 | |||||||||
162 | void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } | ||||||||
163 | |||||||||
164 | void InitCache(AllocatorCache *cache) { | ||||||||
165 | cache->Init(&stats_); | ||||||||
166 | } | ||||||||
167 | |||||||||
168 | void DestroyCache(AllocatorCache *cache) { | ||||||||
169 | cache->Destroy(&primary_, &stats_); | ||||||||
170 | } | ||||||||
171 | |||||||||
172 | void SwallowCache(AllocatorCache *cache) { | ||||||||
173 | cache->Drain(&primary_); | ||||||||
174 | } | ||||||||
175 | |||||||||
176 | void GetStats(AllocatorStatCounters s) const { | ||||||||
177 | stats_.Get(s); | ||||||||
178 | } | ||||||||
179 | |||||||||
180 | void PrintStats() { | ||||||||
181 | primary_.PrintStats(); | ||||||||
182 | secondary_.PrintStats(); | ||||||||
183 | } | ||||||||
184 | |||||||||
185 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | ||||||||
186 | // introspection API. | ||||||||
187 | void ForceLock() { | ||||||||
188 | primary_.ForceLock(); | ||||||||
189 | secondary_.ForceLock(); | ||||||||
190 | } | ||||||||
191 | |||||||||
192 | void ForceUnlock() { | ||||||||
193 | secondary_.ForceUnlock(); | ||||||||
194 | primary_.ForceUnlock(); | ||||||||
195 | } | ||||||||
196 | |||||||||
197 | // Iterate over all existing chunks. | ||||||||
198 | // The allocator must be locked when calling this function. | ||||||||
199 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | ||||||||
200 | primary_.ForEachChunk(callback, arg); | ||||||||
201 | secondary_.ForEachChunk(callback, arg); | ||||||||
202 | } | ||||||||
203 | |||||||||
204 | private: | ||||||||
205 | PrimaryAllocator primary_; | ||||||||
206 | SecondaryAllocator secondary_; | ||||||||
207 | AllocatorGlobalStats stats_; | ||||||||
208 | }; |
1 | //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Part of the Sanitizer Allocator. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #ifndef SANITIZER_ALLOCATOR_H |
13 | #error This file must be included inside sanitizer_allocator.h |
14 | #endif |
15 | |
16 | template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache; |
17 | |
18 | // SizeClassAllocator64 -- allocator for 64-bit address space. |
19 | // The template parameter Params is a class containing the actual parameters. |
20 | // |
21 | // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg. |
22 | // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap. |
23 | // Otherwise SpaceBeg=kSpaceBeg (fixed address). |
24 | // kSpaceSize is a power of two. |
25 | // At the beginning the entire space is mprotect-ed, then small parts of it |
26 | // are mapped on demand. |
27 | // |
28 | // Region: a part of Space dedicated to a single size class. |
29 | // There are kNumClasses Regions of equal size. |
30 | // |
31 | // UserChunk: a piece of memory returned to user. |
32 | // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. |
33 | |
34 | // FreeArray is an array free-d chunks (stored as 4-byte offsets) |
35 | // |
36 | // A Region looks like this: |
37 | // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray |
38 | |
39 | struct SizeClassAllocator64FlagMasks { // Bit masks. |
40 | enum { |
41 | kRandomShuffleChunks = 1, |
42 | }; |
43 | }; |
44 | |
45 | template <class Params> |
46 | class SizeClassAllocator64 { |
47 | public: |
48 | using AddressSpaceView = typename Params::AddressSpaceView; |
49 | static const uptr kSpaceBeg = Params::kSpaceBeg; |
50 | static const uptr kSpaceSize = Params::kSpaceSize; |
51 | static const uptr kMetadataSize = Params::kMetadataSize; |
52 | typedef typename Params::SizeClassMap SizeClassMap; |
53 | typedef typename Params::MapUnmapCallback MapUnmapCallback; |
54 | |
55 | static const bool kRandomShuffleChunks = |
56 | Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks; |
57 | |
58 | typedef SizeClassAllocator64<Params> ThisT; |
59 | typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache; |
60 | |
61 | // When we know the size class (the region base) we can represent a pointer |
62 | // as a 4-byte integer (offset from the region start shifted right by 4). |
63 | typedef u32 CompactPtrT; |
64 | static const uptr kCompactPtrScale = 4; |
65 | CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const { |
66 | return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale); |
67 | } |
68 | uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const { |
69 | return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); |
70 | } |
71 | |
72 | void Init(s32 release_to_os_interval_ms) { |
73 | uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); |
74 | if (kUsingConstantSpaceBeg) { |
75 | CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(kSpaceBeg , SizeClassMap::kMaxSize))); __sanitizer::u64 v2 = (__sanitizer ::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 75, "(" "(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
76 | CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize , PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!! (!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 77, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))" ")", v1, v2); } while (false) |
77 | PrimaryAllocatorName, kSpaceBeg))do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize , PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!! (!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 77, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))" ")", v1, v2); } while (false); |
78 | } else { |
79 | // Combined allocator expects that an 2^N allocation is always aligned to |
80 | // 2^N. For this to work, the start of the space needs to be aligned as |
81 | // high as the largest size class (which also needs to be a power of 2). |
82 | NonConstSpaceBeg = address_range.InitAligned( |
83 | TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); |
84 | CHECK_NE(NonConstSpaceBeg, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((NonConstSpaceBeg )); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 84, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")" , v1, v2); } while (false); |
85 | } |
86 | SetReleaseToOSIntervalMs(release_to_os_interval_ms); |
87 | MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(), |
88 | "SizeClassAllocator: region info"); |
89 | // Check that the RegionInfo array is aligned on the CacheLine size. |
90 | DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0); |
91 | } |
92 | |
93 | s32 ReleaseToOSIntervalMs() const { |
94 | return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed); |
95 | } |
96 | |
97 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { |
98 | atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms, |
99 | memory_order_relaxed); |
100 | } |
101 | |
102 | void ForceReleaseToOS() { |
103 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
104 | BlockingMutexLock l(&GetRegionInfo(class_id)->mutex); |
105 | MaybeReleaseToOS(class_id, true /*force*/); |
106 | } |
107 | } |
108 | |
109 | static bool CanAllocate(uptr size, uptr alignment) { |
110 | return size <= SizeClassMap::kMaxSize && |
111 | alignment <= SizeClassMap::kMaxSize; |
112 | } |
113 | |
114 | NOINLINE__attribute__((noinline)) void ReturnToAllocator(AllocatorStats *stat, uptr class_id, |
115 | const CompactPtrT *chunks, uptr n_chunks) { |
116 | RegionInfo *region = GetRegionInfo(class_id); |
117 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
118 | CompactPtrT *free_array = GetFreeArray(region_beg); |
119 | |
120 | BlockingMutexLock l(®ion->mutex); |
121 | uptr old_num_chunks = region->num_freed_chunks; |
122 | uptr new_num_freed_chunks = old_num_chunks + n_chunks; |
123 | // Failure to allocate free array space while releasing memory is non |
124 | // recoverable. |
125 | if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks)), 0) |
126 | new_num_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks)), 0)) { |
127 | Report("FATAL: Internal error: %s's allocator exhausted the free list " |
128 | "space for size class %zd (%zd bytes).\n", SanitizerToolName, |
129 | class_id, ClassIdToSize(class_id)); |
130 | Die(); |
131 | } |
132 | for (uptr i = 0; i < n_chunks; i++) |
133 | free_array[old_num_chunks + i] = chunks[i]; |
134 | region->num_freed_chunks = new_num_freed_chunks; |
135 | region->stats.n_freed += n_chunks; |
136 | |
137 | MaybeReleaseToOS(class_id, false /*force*/); |
138 | } |
139 | |
140 | NOINLINE__attribute__((noinline)) bool GetFromAllocator(AllocatorStats *stat, uptr class_id, |
141 | CompactPtrT *chunks, uptr n_chunks) { |
142 | RegionInfo *region = GetRegionInfo(class_id); |
143 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
144 | CompactPtrT *free_array = GetFreeArray(region_beg); |
145 | |
146 | BlockingMutexLock l(®ion->mutex); |
147 | if (UNLIKELY(region->num_freed_chunks < n_chunks)__builtin_expect(!!(region->num_freed_chunks < n_chunks ), 0)) { |
148 | if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region , n_chunks - region->num_freed_chunks)), 0) |
149 | n_chunks - region->num_freed_chunks))__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region , n_chunks - region->num_freed_chunks)), 0)) |
150 | return false; |
151 | CHECK_GE(region->num_freed_chunks, n_chunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->num_freed_chunks )); __sanitizer::u64 v2 = (__sanitizer::u64)((n_chunks)); if ( __builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 151, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)" ")", v1, v2); } while (false); |
152 | } |
153 | region->num_freed_chunks -= n_chunks; |
154 | uptr base_idx = region->num_freed_chunks; |
155 | for (uptr i = 0; i < n_chunks; i++) |
156 | chunks[i] = free_array[base_idx + i]; |
157 | region->stats.n_allocated += n_chunks; |
158 | return true; |
159 | } |
160 | |
161 | bool PointerIsMine(const void *p) const { |
162 | uptr P = reinterpret_cast<uptr>(p); |
163 | if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) |
164 | return P / kSpaceSize == kSpaceBeg / kSpaceSize; |
165 | return P >= SpaceBeg() && P < SpaceEnd(); |
166 | } |
167 | |
168 | uptr GetRegionBegin(const void *p) { |
169 | if (kUsingConstantSpaceBeg) |
170 | return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1); |
171 | uptr space_beg = SpaceBeg(); |
172 | return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) + |
173 | space_beg; |
174 | } |
175 | |
176 | uptr GetRegionBeginBySizeClass(uptr class_id) const { |
177 | return SpaceBeg() + kRegionSize * class_id; |
178 | } |
179 | |
180 | uptr GetSizeClass(const void *p) { |
181 | if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) |
182 | return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded; |
183 | return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) % |
184 | kNumClassesRounded; |
185 | } |
186 | |
187 | void *GetBlockBegin(const void *p) { |
188 | uptr class_id = GetSizeClass(p); |
189 | uptr size = ClassIdToSize(class_id); |
190 | if (!size) return nullptr; |
191 | uptr chunk_idx = GetChunkIdx((uptr)p, size); |
192 | uptr reg_beg = GetRegionBegin(p); |
193 | uptr beg = chunk_idx * size; |
194 | uptr next_beg = beg + size; |
195 | if (class_id >= kNumClasses) return nullptr; |
196 | const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id)); |
197 | if (region->mapped_user >= next_beg) |
198 | return reinterpret_cast<void*>(reg_beg + beg); |
199 | return nullptr; |
200 | } |
201 | |
202 | void *GetBlockBeginDebug(const void *p) { |
203 | uptr class_id = GetSizeClass(p); |
204 | uptr size = ClassIdToSize(class_id); |
205 | Printf("GetBlockBeginDebug1 p %p class_id %p size %p\n", p, class_id, size); |
206 | if (!size) return nullptr; |
207 | uptr chunk_idx = GetChunkIdx((uptr)p, size); |
208 | uptr reg_beg = GetRegionBegin(p); |
209 | uptr beg = chunk_idx * size; |
210 | uptr next_beg = beg + size; |
211 | Printf( |
212 | "GetBlockBeginDebug2 chunk_idx %p reg_beg %p beg %p next_beg %p " |
213 | "kNumClasses %p\n", |
214 | chunk_idx, reg_beg, beg, next_beg, kNumClasses); |
215 | if (class_id >= kNumClasses) return nullptr; |
216 | const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id)); |
217 | Printf("GetBlockBeginDebug3 region %p region->mapped_user %p\n", region, |
218 | region->mapped_user); |
219 | if (region->mapped_user >= next_beg) |
220 | return reinterpret_cast<void*>(reg_beg + beg); |
221 | return nullptr; |
222 | } |
223 | |
224 | |
225 | uptr GetActuallyAllocatedSize(void *p) { |
226 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 226, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
227 | return ClassIdToSize(GetSizeClass(p)); |
228 | } |
229 | |
230 | static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } |
231 | |
232 | void *GetMetaData(const void *p) { |
233 | CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize) ); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 233, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
234 | uptr class_id = GetSizeClass(p); |
235 | uptr size = ClassIdToSize(class_id); |
236 | uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); |
237 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
238 | return reinterpret_cast<void *>(GetMetadataEnd(region_beg) - |
239 | (1 + chunk_idx) * kMetadataSize); |
240 | } |
241 | |
242 | uptr TotalMemoryUsed() { |
243 | uptr res = 0; |
244 | for (uptr i = 0; i < kNumClasses; i++) |
245 | res += GetRegionInfo(i)->allocated_user; |
246 | return res; |
247 | } |
248 | |
249 | // Test-only. |
250 | void TestOnlyUnmap() { |
251 | UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size()); |
252 | } |
253 | |
254 | static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, |
255 | uptr stats_size) { |
256 | for (uptr class_id = 0; class_id < stats_size; class_id++) |
257 | if (stats[class_id] == start) |
258 | stats[class_id] = rss; |
259 | } |
260 | |
261 | void PrintStats(uptr class_id, uptr rss) { |
262 | RegionInfo *region = GetRegionInfo(class_id); |
263 | if (region->mapped_user == 0) return; |
264 | uptr in_use = region->stats.n_allocated - region->stats.n_freed; |
265 | uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); |
266 | Printf( |
267 | "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " |
268 | "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd " |
269 | "last released: %6zdK region: 0x%zx\n", |
270 | region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), |
271 | region->mapped_user >> 10, region->stats.n_allocated, |
272 | region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, |
273 | rss >> 10, region->rtoi.num_releases, |
274 | region->rtoi.last_released_bytes >> 10, |
275 | SpaceBeg() + kRegionSize * class_id); |
276 | } |
277 | |
278 | void PrintStats() { |
279 | uptr rss_stats[kNumClasses]; |
280 | for (uptr class_id = 0; class_id < kNumClasses; class_id++) |
281 | rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; |
282 | GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses); |
283 | |
284 | uptr total_mapped = 0; |
285 | uptr total_rss = 0; |
286 | uptr n_allocated = 0; |
287 | uptr n_freed = 0; |
288 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
289 | RegionInfo *region = GetRegionInfo(class_id); |
290 | if (region->mapped_user != 0) { |
291 | total_mapped += region->mapped_user; |
292 | total_rss += rss_stats[class_id]; |
293 | } |
294 | n_allocated += region->stats.n_allocated; |
295 | n_freed += region->stats.n_freed; |
296 | } |
297 | |
298 | Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in " |
299 | "%zd allocations; remains %zd\n", total_mapped >> 20, |
300 | total_rss >> 20, n_allocated, n_allocated - n_freed); |
301 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) |
302 | PrintStats(class_id, rss_stats[class_id]); |
303 | } |
304 | |
305 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone |
306 | // introspection API. |
307 | void ForceLock() { |
308 | for (uptr i = 0; i < kNumClasses; i++) { |
309 | GetRegionInfo(i)->mutex.Lock(); |
310 | } |
311 | } |
312 | |
313 | void ForceUnlock() { |
314 | for (int i = (int)kNumClasses - 1; i >= 0; i--) { |
315 | GetRegionInfo(i)->mutex.Unlock(); |
316 | } |
317 | } |
318 | |
319 | // Iterate over all existing chunks. |
320 | // The allocator must be locked when calling this function. |
321 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
322 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
323 | RegionInfo *region = GetRegionInfo(class_id); |
324 | uptr chunk_size = ClassIdToSize(class_id); |
325 | uptr region_beg = SpaceBeg() + class_id * kRegionSize; |
326 | uptr region_allocated_user_size = |
327 | AddressSpaceView::Load(region)->allocated_user; |
328 | for (uptr chunk = region_beg; |
329 | chunk < region_beg + region_allocated_user_size; |
330 | chunk += chunk_size) { |
331 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); |
332 | callback(chunk, arg); |
333 | } |
334 | } |
335 | } |
336 | |
337 | static uptr ClassIdToSize(uptr class_id) { |
338 | return SizeClassMap::Size(class_id); |
339 | } |
340 | |
341 | static uptr AdditionalSize() { |
342 | return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, |
343 | GetPageSizeCached()); |
344 | } |
345 | |
346 | typedef SizeClassMap SizeClassMapT; |
347 | static const uptr kNumClasses = SizeClassMap::kNumClasses; |
348 | static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; |
349 | |
350 | // A packed array of counters. Each counter occupies 2^n bits, enough to store |
351 | // counter's max_value. Ctor will try to allocate the required buffer via |
352 | // mapper->MapPackedCounterArrayBuffer and the caller is expected to check |
353 | // whether the initialization was successful by checking IsAllocated() result. |
354 | // For the performance sake, none of the accessors check the validity of the |
355 | // arguments, it is assumed that index is always in [0, n) range and the value |
356 | // is not incremented past max_value. |
357 | template<class MemoryMapperT> |
358 | class PackedCounterArray { |
359 | public: |
360 | PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper) |
361 | : n(num_counters), memory_mapper(mapper) { |
362 | CHECK_GT(num_counters, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((num_counters)) ; __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 362, "(" "(num_counters)" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
363 | CHECK_GT(max_value, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((max_value)); __sanitizer ::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(! (v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 363, "(" "(max_value)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); |
364 | constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL; |
365 | // Rounding counter storage size up to the power of two allows for using |
366 | // bit shifts calculating particular counter's index and offset. |
367 | uptr counter_size_bits = |
368 | RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1); |
369 | CHECK_LE(counter_size_bits, kMaxCounterBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((counter_size_bits )); __sanitizer::u64 v2 = (__sanitizer::u64)((kMaxCounterBits )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 369, "(" "(counter_size_bits)" ") " "<=" " (" "(kMaxCounterBits)" ")", v1, v2); } while (false); |
370 | counter_size_bits_log = Log2(counter_size_bits); |
371 | counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits); |
372 | |
373 | uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log; |
374 | CHECK_GT(packing_ratio, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((packing_ratio) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 374, "(" "(packing_ratio)" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
375 | packing_ratio_log = Log2(packing_ratio); |
376 | bit_offset_mask = packing_ratio - 1; |
377 | |
378 | buffer_size = |
379 | (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) * |
380 | sizeof(*buffer); |
381 | buffer = reinterpret_cast<u64*>( |
382 | memory_mapper->MapPackedCounterArrayBuffer(buffer_size)); |
383 | } |
384 | ~PackedCounterArray() { |
385 | if (buffer) { |
386 | memory_mapper->UnmapPackedCounterArrayBuffer( |
387 | reinterpret_cast<uptr>(buffer), buffer_size); |
388 | } |
389 | } |
390 | |
391 | bool IsAllocated() const { |
392 | return !!buffer; |
393 | } |
394 | |
395 | u64 GetCount() const { |
396 | return n; |
397 | } |
398 | |
399 | uptr Get(uptr i) const { |
400 | DCHECK_LT(i, n); |
401 | uptr index = i >> packing_ratio_log; |
402 | uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; |
403 | return (buffer[index] >> bit_offset) & counter_mask; |
404 | } |
405 | |
406 | void Inc(uptr i) const { |
407 | DCHECK_LT(Get(i), counter_mask); |
408 | uptr index = i >> packing_ratio_log; |
409 | uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; |
410 | buffer[index] += 1ULL << bit_offset; |
411 | } |
412 | |
413 | void IncRange(uptr from, uptr to) const { |
414 | DCHECK_LE(from, to); |
415 | for (uptr i = from; i <= to; i++) |
416 | Inc(i); |
417 | } |
418 | |
419 | private: |
420 | const u64 n; |
421 | u64 counter_size_bits_log; |
422 | u64 counter_mask; |
423 | u64 packing_ratio_log; |
424 | u64 bit_offset_mask; |
425 | |
426 | MemoryMapperT* const memory_mapper; |
427 | u64 buffer_size; |
428 | u64* buffer; |
429 | }; |
430 | |
431 | template<class MemoryMapperT> |
432 | class FreePagesRangeTracker { |
433 | public: |
434 | explicit FreePagesRangeTracker(MemoryMapperT* mapper) |
435 | : memory_mapper(mapper), |
436 | page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)), |
437 | in_the_range(false), current_page(0), current_range_start_page(0) {} |
438 | |
439 | void NextPage(bool freed) { |
440 | if (freed) { |
441 | if (!in_the_range) { |
442 | current_range_start_page = current_page; |
443 | in_the_range = true; |
444 | } |
445 | } else { |
446 | CloseOpenedRange(); |
447 | } |
448 | current_page++; |
449 | } |
450 | |
451 | void Done() { |
452 | CloseOpenedRange(); |
453 | } |
454 | |
455 | private: |
456 | void CloseOpenedRange() { |
457 | if (in_the_range) { |
458 | memory_mapper->ReleasePageRangeToOS( |
459 | current_range_start_page << page_size_scaled_log, |
460 | current_page << page_size_scaled_log); |
461 | in_the_range = false; |
462 | } |
463 | } |
464 | |
465 | MemoryMapperT* const memory_mapper; |
466 | const uptr page_size_scaled_log; |
467 | bool in_the_range; |
468 | uptr current_page; |
469 | uptr current_range_start_page; |
470 | }; |
471 | |
472 | // Iterates over the free_array to identify memory pages containing freed |
473 | // chunks only and returns these pages back to OS. |
474 | // allocated_pages_count is the total number of pages allocated for the |
475 | // current bucket. |
476 | template<class MemoryMapperT> |
477 | static void ReleaseFreeMemoryToOS(CompactPtrT *free_array, |
478 | uptr free_array_count, uptr chunk_size, |
479 | uptr allocated_pages_count, |
480 | MemoryMapperT *memory_mapper) { |
481 | const uptr page_size = GetPageSizeCached(); |
482 | |
483 | // Figure out the number of chunks per page and whether we can take a fast |
484 | // path (the number of chunks per page is the same for all pages). |
485 | uptr full_pages_chunk_count_max; |
486 | bool same_chunk_count_per_page; |
487 | if (chunk_size <= page_size && page_size % chunk_size == 0) { |
488 | // Same number of chunks per page, no cross overs. |
489 | full_pages_chunk_count_max = page_size / chunk_size; |
490 | same_chunk_count_per_page = true; |
491 | } else if (chunk_size <= page_size && page_size % chunk_size != 0 && |
492 | chunk_size % (page_size % chunk_size) == 0) { |
493 | // Some chunks are crossing page boundaries, which means that the page |
494 | // contains one or two partial chunks, but all pages contain the same |
495 | // number of chunks. |
496 | full_pages_chunk_count_max = page_size / chunk_size + 1; |
497 | same_chunk_count_per_page = true; |
498 | } else if (chunk_size <= page_size) { |
499 | // Some chunks are crossing page boundaries, which means that the page |
500 | // contains one or two partial chunks. |
501 | full_pages_chunk_count_max = page_size / chunk_size + 2; |
502 | same_chunk_count_per_page = false; |
503 | } else if (chunk_size > page_size && chunk_size % page_size == 0) { |
504 | // One chunk covers multiple pages, no cross overs. |
505 | full_pages_chunk_count_max = 1; |
506 | same_chunk_count_per_page = true; |
507 | } else if (chunk_size > page_size) { |
508 | // One chunk covers multiple pages, Some chunks are crossing page |
509 | // boundaries. Some pages contain one chunk, some contain two. |
510 | full_pages_chunk_count_max = 2; |
511 | same_chunk_count_per_page = false; |
512 | } else { |
513 | UNREACHABLE("All chunk_size/page_size ratios must be handled.")do { do { __sanitizer::u64 v1 = (__sanitizer::u64)((0 && "All chunk_size/page_size ratios must be handled.")); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 513, "(" "(0 && \"All chunk_size/page_size ratios must be handled.\")" ") " "!=" " (" "0" ")", v1, v2); } while (false); Die(); } while (0); |
514 | } |
515 | |
516 | PackedCounterArray<MemoryMapperT> counters(allocated_pages_count, |
517 | full_pages_chunk_count_max, |
518 | memory_mapper); |
519 | if (!counters.IsAllocated()) |
520 | return; |
521 | |
522 | const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale; |
523 | const uptr page_size_scaled = page_size >> kCompactPtrScale; |
524 | const uptr page_size_scaled_log = Log2(page_size_scaled); |
525 | |
526 | // Iterate over free chunks and count how many free chunks affect each |
527 | // allocated page. |
528 | if (chunk_size <= page_size && page_size % chunk_size == 0) { |
529 | // Each chunk affects one page only. |
530 | for (uptr i = 0; i < free_array_count; i++) |
531 | counters.Inc(free_array[i] >> page_size_scaled_log); |
532 | } else { |
533 | // In all other cases chunks might affect more than one page. |
534 | for (uptr i = 0; i < free_array_count; i++) { |
535 | counters.IncRange( |
536 | free_array[i] >> page_size_scaled_log, |
537 | (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log); |
538 | } |
539 | } |
540 | |
541 | // Iterate over pages detecting ranges of pages with chunk counters equal |
542 | // to the expected number of chunks for the particular page. |
543 | FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper); |
544 | if (same_chunk_count_per_page) { |
545 | // Fast path, every page has the same number of chunks affecting it. |
546 | for (uptr i = 0; i < counters.GetCount(); i++) |
547 | range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max); |
548 | } else { |
549 | // Show path, go through the pages keeping count how many chunks affect |
550 | // each page. |
551 | const uptr pn = |
552 | chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1; |
553 | const uptr pnc = pn * chunk_size_scaled; |
554 | // The idea is to increment the current page pointer by the first chunk |
555 | // size, middle portion size (the portion of the page covered by chunks |
556 | // except the first and the last one) and then the last chunk size, adding |
557 | // up the number of chunks on the current page and checking on every step |
558 | // whether the page boundary was crossed. |
559 | uptr prev_page_boundary = 0; |
560 | uptr current_boundary = 0; |
561 | for (uptr i = 0; i < counters.GetCount(); i++) { |
562 | uptr page_boundary = prev_page_boundary + page_size_scaled; |
563 | uptr chunks_per_page = pn; |
564 | if (current_boundary < page_boundary) { |
565 | if (current_boundary > prev_page_boundary) |
566 | chunks_per_page++; |
567 | current_boundary += pnc; |
568 | if (current_boundary < page_boundary) { |
569 | chunks_per_page++; |
570 | current_boundary += chunk_size_scaled; |
571 | } |
572 | } |
573 | prev_page_boundary = page_boundary; |
574 | |
575 | range_tracker.NextPage(counters.Get(i) == chunks_per_page); |
576 | } |
577 | } |
578 | range_tracker.Done(); |
579 | } |
580 | |
581 | private: |
582 | friend class MemoryMapper; |
583 | |
584 | ReservedAddressRange address_range; |
585 | |
586 | static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; |
587 | // FreeArray is the array of free-d chunks (stored as 4-byte offsets). |
588 | // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize |
589 | // elements, but in reality this will not happen. For simplicity we |
590 | // dedicate 1/8 of the region's virtual space to FreeArray. |
591 | static const uptr kFreeArraySize = kRegionSize / 8; |
592 | |
593 | static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; |
594 | uptr NonConstSpaceBeg; |
595 | uptr SpaceBeg() const { |
596 | return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; |
597 | } |
598 | uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } |
599 | // kRegionSize must be >= 2^32. |
600 | COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)))static_assert((kRegionSize) >= (1ULL << (64 / 2)), "" ); |
601 | // kRegionSize must be <= 2^36, see CompactPtrT. |
602 | COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)))static_assert((kRegionSize) <= (1ULL << (64 / 2 + 4) ), ""); |
603 | // Call mmap for user memory with at least this size. |
604 | static const uptr kUserMapSize = 1 << 16; |
605 | // Call mmap for metadata memory with at least this size. |
606 | static const uptr kMetaMapSize = 1 << 16; |
607 | // Call mmap for free array memory with at least this size. |
608 | static const uptr kFreeArrayMapSize = 1 << 16; |
609 | |
610 | atomic_sint32_t release_to_os_interval_ms_; |
611 | |
612 | struct Stats { |
613 | uptr n_allocated; |
614 | uptr n_freed; |
615 | }; |
616 | |
617 | struct ReleaseToOsInfo { |
618 | uptr n_freed_at_last_release; |
619 | uptr num_releases; |
620 | u64 last_release_at_ns; |
621 | u64 last_released_bytes; |
622 | }; |
623 | |
624 | struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) RegionInfo { |
625 | BlockingMutex mutex; |
626 | uptr num_freed_chunks; // Number of elements in the freearray. |
627 | uptr mapped_free_array; // Bytes mapped for freearray. |
628 | uptr allocated_user; // Bytes allocated for user memory. |
629 | uptr allocated_meta; // Bytes allocated for metadata. |
630 | uptr mapped_user; // Bytes mapped for user memory. |
631 | uptr mapped_meta; // Bytes mapped for metadata. |
632 | u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. |
633 | bool exhausted; // Whether region is out of space for new chunks. |
634 | Stats stats; |
635 | ReleaseToOsInfo rtoi; |
636 | }; |
637 | COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0)static_assert(sizeof(RegionInfo) % kCacheLineSize == 0, ""); |
638 | |
639 | RegionInfo *GetRegionInfo(uptr class_id) const { |
640 | DCHECK_LT(class_id, kNumClasses); |
641 | RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd()); |
642 | return ®ions[class_id]; |
643 | } |
644 | |
645 | uptr GetMetadataEnd(uptr region_beg) const { |
646 | return region_beg + kRegionSize - kFreeArraySize; |
647 | } |
648 | |
649 | uptr GetChunkIdx(uptr chunk, uptr size) const { |
650 | if (!kUsingConstantSpaceBeg) |
651 | chunk -= SpaceBeg(); |
652 | |
653 | uptr offset = chunk % kRegionSize; |
654 | // Here we divide by a non-constant. This is costly. |
655 | // size always fits into 32-bits. If the offset fits too, use 32-bit div. |
656 | if (offset >> (SANITIZER_WORDSIZE64 / 2)) |
657 | return offset / size; |
658 | return (u32)offset / (u32)size; |
659 | } |
660 | |
661 | CompactPtrT *GetFreeArray(uptr region_beg) const { |
662 | return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg)); |
663 | } |
664 | |
665 | bool MapWithCallback(uptr beg, uptr size, const char *name) { |
666 | uptr mapped = address_range.Map(beg, size, name); |
667 | if (UNLIKELY(!mapped)__builtin_expect(!!(!mapped), 0)) |
668 | return false; |
669 | CHECK_EQ(beg, mapped)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((mapped)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 669, "(" "(beg)" ") " "==" " (" "(mapped)" ")", v1, v2); } while (false); |
670 | MapUnmapCallback().OnMap(beg, size); |
671 | return true; |
672 | } |
673 | |
674 | void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { |
675 | CHECK_EQ(beg, address_range.MapOrDie(beg, size, name))do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie(beg, size , name))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 675, "(" "(beg)" ") " "==" " (" "(address_range.MapOrDie(beg, size, name))" ")", v1, v2); } while (false); |
676 | MapUnmapCallback().OnMap(beg, size); |
677 | } |
678 | |
679 | void UnmapWithCallbackOrDie(uptr beg, uptr size) { |
680 | MapUnmapCallback().OnUnmap(beg, size); |
681 | address_range.Unmap(beg, size); |
682 | } |
683 | |
684 | bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, |
685 | uptr num_freed_chunks) { |
686 | uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); |
687 | if (region->mapped_free_array < needed_space) { |
688 | uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize); |
689 | CHECK_LE(new_mapped_free_array, kFreeArraySize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_mapped_free_array )); __sanitizer::u64 v2 = (__sanitizer::u64)((kFreeArraySize) ); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 689, "(" "(new_mapped_free_array)" ") " "<=" " (" "(kFreeArraySize)" ")", v1, v2); } while (false); |
690 | uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + |
691 | region->mapped_free_array; |
692 | uptr new_map_size = new_mapped_free_array - region->mapped_free_array; |
693 | if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size , "SizeClassAllocator: freearray")), 0) |
694 | "SizeClassAllocator: freearray"))__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size , "SizeClassAllocator: freearray")), 0)) |
695 | return false; |
696 | region->mapped_free_array = new_mapped_free_array; |
697 | } |
698 | return true; |
699 | } |
700 | |
701 | // Check whether this size class is exhausted. |
702 | bool IsRegionExhausted(RegionInfo *region, uptr class_id, |
703 | uptr additional_map_size) { |
704 | if (LIKELY(region->mapped_user + region->mapped_meta +__builtin_expect(!!(region->mapped_user + region->mapped_meta + additional_map_size <= kRegionSize - kFreeArraySize), 1 ) |
705 | additional_map_size <= kRegionSize - kFreeArraySize)__builtin_expect(!!(region->mapped_user + region->mapped_meta + additional_map_size <= kRegionSize - kFreeArraySize), 1 )) |
706 | return false; |
707 | if (!region->exhausted) { |
708 | region->exhausted = true; |
709 | Printf("%s: Out of memory. ", SanitizerToolName); |
710 | Printf("The process has exhausted %zuMB for size class %zu.\n", |
711 | kRegionSize >> 20, ClassIdToSize(class_id)); |
712 | } |
713 | return true; |
714 | } |
715 | |
716 | NOINLINE__attribute__((noinline)) bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, |
717 | RegionInfo *region, uptr requested_count) { |
718 | // region->mutex is held. |
719 | const uptr region_beg = GetRegionBeginBySizeClass(class_id); |
720 | const uptr size = ClassIdToSize(class_id); |
721 | |
722 | const uptr total_user_bytes = |
723 | region->allocated_user + requested_count * size; |
724 | // Map more space for chunks, if necessary. |
725 | if (LIKELY(total_user_bytes > region->mapped_user)__builtin_expect(!!(total_user_bytes > region->mapped_user ), 1)) { |
726 | if (UNLIKELY(region->mapped_user == 0)__builtin_expect(!!(region->mapped_user == 0), 0)) { |
727 | if (!kUsingConstantSpaceBeg && kRandomShuffleChunks) |
728 | // The random state is initialized from ASLR. |
729 | region->rand_state = static_cast<u32>(region_beg >> 12); |
730 | // Postpone the first release to OS attempt for ReleaseToOSIntervalMs, |
731 | // preventing just allocated memory from being released sooner than |
732 | // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls |
733 | // for short lived processes. |
734 | // Do it only when the feature is turned on, to avoid a potentially |
735 | // extraneous syscall. |
736 | if (ReleaseToOSIntervalMs() >= 0) |
737 | region->rtoi.last_release_at_ns = MonotonicNanoTime(); |
738 | } |
739 | // Do the mmap for the user memory. |
740 | const uptr user_map_size = |
741 | RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize); |
742 | if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, user_map_size )), 0)) |
743 | return false; |
744 | if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0) |
745 | user_map_size,__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0) |
746 | "SizeClassAllocator: region data"))__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0)) |
747 | return false; |
748 | stat->Add(AllocatorStatMapped, user_map_size); |
749 | region->mapped_user += user_map_size; |
750 | } |
751 | const uptr new_chunks_count = |
752 | (region->mapped_user - region->allocated_user) / size; |
753 | |
754 | if (kMetadataSize) { |
755 | // Calculate the required space for metadata. |
756 | const uptr total_meta_bytes = |
757 | region->allocated_meta + new_chunks_count * kMetadataSize; |
758 | const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ? |
759 | RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0; |
760 | // Map more space for metadata, if necessary. |
761 | if (meta_map_size) { |
762 | if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, meta_map_size )), 0)) |
763 | return false; |
764 | if (UNLIKELY(!MapWithCallback(__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0) |
765 | GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0) |
766 | meta_map_size, "SizeClassAllocator: region metadata"))__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0)) |
767 | return false; |
768 | region->mapped_meta += meta_map_size; |
769 | } |
770 | } |
771 | |
772 | // If necessary, allocate more space for the free array and populate it with |
773 | // newly allocated chunks. |
774 | const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; |
775 | if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)), 0)) |
776 | return false; |
777 | CompactPtrT *free_array = GetFreeArray(region_beg); |
778 | for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count; |
779 | i++, chunk += size) |
780 | free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); |
781 | if (kRandomShuffleChunks) |
782 | RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, |
783 | ®ion->rand_state); |
784 | |
785 | // All necessary memory is mapped and now it is safe to advance all |
786 | // 'allocated_*' counters. |
787 | region->num_freed_chunks += new_chunks_count; |
788 | region->allocated_user += new_chunks_count * size; |
789 | CHECK_LE(region->allocated_user, region->mapped_user)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_user )); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_user )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 789, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)" ")", v1, v2); } while (false); |
790 | region->allocated_meta += new_chunks_count * kMetadataSize; |
791 | CHECK_LE(region->allocated_meta, region->mapped_meta)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_meta )); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_meta )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h" , 791, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)" ")", v1, v2); } while (false); |
792 | region->exhausted = false; |
793 | |
794 | // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent |
795 | // MaybeReleaseToOS from releasing just allocated pages or protect these |
796 | // not yet used chunks some other way. |
797 | |
798 | return true; |
799 | } |
800 | |
801 | class MemoryMapper { |
802 | public: |
803 | MemoryMapper(const ThisT& base_allocator, uptr class_id) |
804 | : allocator(base_allocator), |
805 | region_base(base_allocator.GetRegionBeginBySizeClass(class_id)), |
806 | released_ranges_count(0), |
807 | released_bytes(0) { |
808 | } |
809 | |
810 | uptr GetReleasedRangesCount() const { |
811 | return released_ranges_count; |
812 | } |
813 | |
814 | uptr GetReleasedBytes() const { |
815 | return released_bytes; |
816 | } |
817 | |
818 | uptr MapPackedCounterArrayBuffer(uptr buffer_size) { |
819 | // TODO(alekseyshl): The idea to explore is to check if we have enough |
820 | // space between num_freed_chunks*sizeof(CompactPtrT) and |
821 | // mapped_free_array to fit buffer_size bytes and use that space instead |
822 | // of mapping a temporary one. |
823 | return reinterpret_cast<uptr>( |
824 | MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters")); |
825 | } |
826 | |
827 | void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) { |
828 | UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size); |
829 | } |
830 | |
831 | // Releases [from, to) range of pages back to OS. |
832 | void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) { |
833 | const uptr from_page = allocator.CompactPtrToPointer(region_base, from); |
834 | const uptr to_page = allocator.CompactPtrToPointer(region_base, to); |
835 | ReleaseMemoryPagesToOS(from_page, to_page); |
836 | released_ranges_count++; |
837 | released_bytes += to_page - from_page; |
838 | } |
839 | |
840 | private: |
841 | const ThisT& allocator; |
842 | const uptr region_base; |
843 | uptr released_ranges_count; |
844 | uptr released_bytes; |
845 | }; |
846 | |
847 | // Attempts to release RAM occupied by freed chunks back to OS. The region is |
848 | // expected to be locked. |
849 | void MaybeReleaseToOS(uptr class_id, bool force) { |
850 | RegionInfo *region = GetRegionInfo(class_id); |
851 | const uptr chunk_size = ClassIdToSize(class_id); |
852 | const uptr page_size = GetPageSizeCached(); |
853 | |
854 | uptr n = region->num_freed_chunks; |
855 | if (n * chunk_size < page_size) |
856 | return; // No chance to release anything. |
857 | if ((region->stats.n_freed - |
858 | region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { |
859 | return; // Nothing new to release. |
860 | } |
861 | |
862 | if (!force) { |
863 | s32 interval_ms = ReleaseToOSIntervalMs(); |
864 | if (interval_ms < 0) |
865 | return; |
866 | |
867 | if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > |
868 | MonotonicNanoTime()) { |
869 | return; // Memory was returned recently. |
870 | } |
871 | } |
872 | |
873 | MemoryMapper memory_mapper(*this, class_id); |
874 | |
875 | ReleaseFreeMemoryToOS<MemoryMapper>( |
876 | GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size, |
877 | RoundUpTo(region->allocated_user, page_size) / page_size, |
878 | &memory_mapper); |
879 | |
880 | if (memory_mapper.GetReleasedRangesCount() > 0) { |
881 | region->rtoi.n_freed_at_last_release = region->stats.n_freed; |
882 | region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount(); |
883 | region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes(); |
884 | } |
885 | region->rtoi.last_release_at_ns = MonotonicNanoTime(); |
886 | } |
887 | }; |