File: | projects/compiler-rt/lib/asan/asan_allocator.cpp |
Warning: | line 493, column 46 Array access (from variable 'alloc_beg') results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- asan_allocator.cpp ------------------------------------------------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file is a part of AddressSanitizer, an address sanity checker. | ||||||||
10 | // | ||||||||
11 | // Implementation of ASan's memory allocator, 2-nd version. | ||||||||
12 | // This variant uses the allocator from sanitizer_common, i.e. the one shared | ||||||||
13 | // with ThreadSanitizer and MemorySanitizer. | ||||||||
14 | // | ||||||||
15 | //===----------------------------------------------------------------------===// | ||||||||
16 | |||||||||
17 | #include "asan_allocator.h" | ||||||||
18 | #include "asan_mapping.h" | ||||||||
19 | #include "asan_poisoning.h" | ||||||||
20 | #include "asan_report.h" | ||||||||
21 | #include "asan_stack.h" | ||||||||
22 | #include "asan_thread.h" | ||||||||
23 | #include "sanitizer_common/sanitizer_allocator_checks.h" | ||||||||
24 | #include "sanitizer_common/sanitizer_allocator_interface.h" | ||||||||
25 | #include "sanitizer_common/sanitizer_errno.h" | ||||||||
26 | #include "sanitizer_common/sanitizer_flags.h" | ||||||||
27 | #include "sanitizer_common/sanitizer_internal_defs.h" | ||||||||
28 | #include "sanitizer_common/sanitizer_list.h" | ||||||||
29 | #include "sanitizer_common/sanitizer_stackdepot.h" | ||||||||
30 | #include "sanitizer_common/sanitizer_quarantine.h" | ||||||||
31 | #include "lsan/lsan_common.h" | ||||||||
32 | |||||||||
33 | namespace __asan { | ||||||||
34 | |||||||||
35 | // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. | ||||||||
36 | // We use adaptive redzones: for larger allocation larger redzones are used. | ||||||||
37 | static u32 RZLog2Size(u32 rz_log) { | ||||||||
38 | CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer ::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(! (v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 38, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while (false); | ||||||||
39 | return 16 << rz_log; | ||||||||
40 | } | ||||||||
41 | |||||||||
42 | static u32 RZSize2Log(u32 rz_size) { | ||||||||
43 | CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!( !(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 43, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); } while (false); | ||||||||
44 | CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(! !(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 44, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2) ; } while (false); | ||||||||
45 | CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 45, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1 , v2); } while (false); | ||||||||
46 | u32 res = Log2(rz_size) - 4; | ||||||||
47 | CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 47, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")", v1, v2); } while (false); | ||||||||
48 | return res; | ||||||||
49 | } | ||||||||
50 | |||||||||
51 | static AsanAllocator &get_allocator(); | ||||||||
52 | |||||||||
53 | // The memory chunk allocated from the underlying allocator looks like this: | ||||||||
54 | // L L L L L L H H U U U U U U R R | ||||||||
55 | // L -- left redzone words (0 or more bytes) | ||||||||
56 | // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. | ||||||||
57 | // U -- user memory. | ||||||||
58 | // R -- right redzone (0 or more bytes) | ||||||||
59 | // ChunkBase consists of ChunkHeader and other bytes that overlap with user | ||||||||
60 | // memory. | ||||||||
61 | |||||||||
62 | // If the left redzone is greater than the ChunkHeader size we store a magic | ||||||||
63 | // value in the first uptr word of the memory block and store the address of | ||||||||
64 | // ChunkBase in the next uptr. | ||||||||
65 | // M B L L L L L L L L L H H U U U U U U | ||||||||
66 | // | ^ | ||||||||
67 | // ---------------------| | ||||||||
68 | // M -- magic value kAllocBegMagic | ||||||||
69 | // B -- address of ChunkHeader pointing to the first 'H' | ||||||||
70 | static const uptr kAllocBegMagic = 0xCC6E96B9; | ||||||||
71 | |||||||||
72 | struct ChunkHeader { | ||||||||
73 | // 1-st 8 bytes. | ||||||||
74 | u32 chunk_state : 8; // Must be first. | ||||||||
75 | u32 alloc_tid : 24; | ||||||||
76 | |||||||||
77 | u32 free_tid : 24; | ||||||||
78 | u32 from_memalign : 1; | ||||||||
79 | u32 alloc_type : 2; | ||||||||
80 | u32 rz_log : 3; | ||||||||
81 | u32 lsan_tag : 2; | ||||||||
82 | // 2-nd 8 bytes | ||||||||
83 | // This field is used for small sizes. For large sizes it is equal to | ||||||||
84 | // SizeClassMap::kMaxSize and the actual size is stored in the | ||||||||
85 | // SecondaryAllocator's metadata. | ||||||||
86 | u32 user_requested_size : 29; | ||||||||
87 | // align < 8 -> 0 | ||||||||
88 | // else -> log2(min(align, 512)) - 2 | ||||||||
89 | u32 user_requested_alignment_log : 3; | ||||||||
90 | u32 alloc_context_id; | ||||||||
91 | }; | ||||||||
92 | |||||||||
93 | struct ChunkBase : ChunkHeader { | ||||||||
94 | // Header2, intersects with user memory. | ||||||||
95 | u32 free_context_id; | ||||||||
96 | }; | ||||||||
97 | |||||||||
98 | static const uptr kChunkHeaderSize = sizeof(ChunkHeader); | ||||||||
99 | static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; | ||||||||
100 | COMPILER_CHECK(kChunkHeaderSize == 16)typedef char assertion_failed__100[2*(int)(kChunkHeaderSize == 16)-1]; | ||||||||
101 | COMPILER_CHECK(kChunkHeader2Size <= 16)typedef char assertion_failed__101[2*(int)(kChunkHeader2Size <= 16)-1]; | ||||||||
102 | |||||||||
103 | // Every chunk of memory allocated by this allocator can be in one of 3 states: | ||||||||
104 | // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. | ||||||||
105 | // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. | ||||||||
106 | // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. | ||||||||
107 | enum { | ||||||||
108 | CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. | ||||||||
109 | CHUNK_ALLOCATED = 2, | ||||||||
110 | CHUNK_QUARANTINE = 3 | ||||||||
111 | }; | ||||||||
112 | |||||||||
113 | struct AsanChunk: ChunkBase { | ||||||||
114 | uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } | ||||||||
115 | uptr UsedSize(bool locked_version = false) { | ||||||||
116 | if (user_requested_size != SizeClassMap::kMaxSize) | ||||||||
117 | return user_requested_size; | ||||||||
118 | return *reinterpret_cast<uptr *>( | ||||||||
119 | get_allocator().GetMetaData(AllocBeg(locked_version))); | ||||||||
120 | } | ||||||||
121 | void *AllocBeg(bool locked_version = false) { | ||||||||
122 | if (from_memalign) { | ||||||||
123 | if (locked_version) | ||||||||
124 | return get_allocator().GetBlockBeginFastLocked( | ||||||||
125 | reinterpret_cast<void *>(this)); | ||||||||
126 | return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this)); | ||||||||
127 | } | ||||||||
128 | return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); | ||||||||
129 | } | ||||||||
130 | bool AddrIsInside(uptr addr, bool locked_version = false) { | ||||||||
131 | return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); | ||||||||
132 | } | ||||||||
133 | }; | ||||||||
134 | |||||||||
135 | struct QuarantineCallback { | ||||||||
136 | QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) | ||||||||
137 | : cache_(cache), | ||||||||
138 | stack_(stack) { | ||||||||
139 | } | ||||||||
140 | |||||||||
141 | void Recycle(AsanChunk *m) { | ||||||||
142 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state )); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 142, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false); | ||||||||
143 | atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); | ||||||||
144 | CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 144, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)" ")", v1, v2); } while (false); | ||||||||
145 | CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 145, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")" , v1, v2); } while (false); | ||||||||
146 | PoisonShadow(m->Beg(), | ||||||||
147 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||||||
148 | kAsanHeapLeftRedzoneMagic); | ||||||||
149 | void *p = reinterpret_cast<void *>(m->AllocBeg()); | ||||||||
150 | if (p != m) { | ||||||||
151 | uptr *alloc_magic = reinterpret_cast<uptr *>(p); | ||||||||
152 | CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0] )); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic) ); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 152, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)" ")", v1, v2); } while (false); | ||||||||
153 | // Clear the magic value, as allocator internals may overwrite the | ||||||||
154 | // contents of deallocated chunk, confusing GetAsanChunk lookup. | ||||||||
155 | alloc_magic[0] = 0; | ||||||||
156 | CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1] )); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast <uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 156, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))" ")", v1, v2); } while (false); | ||||||||
157 | } | ||||||||
158 | |||||||||
159 | // Statistics. | ||||||||
160 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
161 | thread_stats.real_frees++; | ||||||||
162 | thread_stats.really_freed += m->UsedSize(); | ||||||||
163 | |||||||||
164 | get_allocator().Deallocate(cache_, p); | ||||||||
165 | } | ||||||||
166 | |||||||||
167 | void *Allocate(uptr size) { | ||||||||
168 | void *res = get_allocator().Allocate(cache_, size, 1); | ||||||||
169 | // TODO(alekseys): Consider making quarantine OOM-friendly. | ||||||||
170 | if (UNLIKELY(!res)__builtin_expect(!!(!res), 0)) | ||||||||
171 | ReportOutOfMemory(size, stack_); | ||||||||
172 | return res; | ||||||||
173 | } | ||||||||
174 | |||||||||
175 | void Deallocate(void *p) { | ||||||||
176 | get_allocator().Deallocate(cache_, p); | ||||||||
177 | } | ||||||||
178 | |||||||||
179 | private: | ||||||||
180 | AllocatorCache* const cache_; | ||||||||
181 | BufferedStackTrace* const stack_; | ||||||||
182 | }; | ||||||||
183 | |||||||||
184 | typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; | ||||||||
185 | typedef AsanQuarantine::Cache QuarantineCache; | ||||||||
186 | |||||||||
187 | void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { | ||||||||
188 | PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); | ||||||||
189 | // Statistics. | ||||||||
190 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
191 | thread_stats.mmaps++; | ||||||||
192 | thread_stats.mmaped += size; | ||||||||
193 | } | ||||||||
194 | void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { | ||||||||
195 | PoisonShadow(p, size, 0); | ||||||||
196 | // We are about to unmap a chunk of user memory. | ||||||||
197 | // Mark the corresponding shadow memory as not needed. | ||||||||
198 | FlushUnneededASanShadowMemory(p, size); | ||||||||
199 | // Statistics. | ||||||||
200 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
201 | thread_stats.munmaps++; | ||||||||
202 | thread_stats.munmaped += size; | ||||||||
203 | } | ||||||||
204 | |||||||||
205 | // We can not use THREADLOCAL because it is not supported on some of the | ||||||||
206 | // platforms we care about (OSX 10.6, Android). | ||||||||
207 | // static THREADLOCAL AllocatorCache cache; | ||||||||
208 | AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { | ||||||||
209 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 209, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
210 | return &ms->allocator_cache; | ||||||||
211 | } | ||||||||
212 | |||||||||
213 | QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { | ||||||||
214 | CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 214, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
215 | CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache ))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms-> quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 215, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))" ")", v1, v2); } while (false); | ||||||||
216 | return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); | ||||||||
217 | } | ||||||||
218 | |||||||||
219 | void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { | ||||||||
220 | quarantine_size_mb = f->quarantine_size_mb; | ||||||||
221 | thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; | ||||||||
222 | min_redzone = f->redzone; | ||||||||
223 | max_redzone = f->max_redzone; | ||||||||
224 | may_return_null = cf->allocator_may_return_null; | ||||||||
225 | alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; | ||||||||
226 | release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; | ||||||||
227 | } | ||||||||
228 | |||||||||
229 | void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { | ||||||||
230 | f->quarantine_size_mb = quarantine_size_mb; | ||||||||
231 | f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; | ||||||||
232 | f->redzone = min_redzone; | ||||||||
233 | f->max_redzone = max_redzone; | ||||||||
234 | cf->allocator_may_return_null = may_return_null; | ||||||||
235 | f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; | ||||||||
236 | cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; | ||||||||
237 | } | ||||||||
238 | |||||||||
239 | struct Allocator { | ||||||||
240 | static const uptr kMaxAllowedMallocSize = | ||||||||
241 | FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(3UL << 30); | ||||||||
242 | |||||||||
243 | AsanAllocator allocator; | ||||||||
244 | AsanQuarantine quarantine; | ||||||||
245 | StaticSpinMutex fallback_mutex; | ||||||||
246 | AllocatorCache fallback_allocator_cache; | ||||||||
247 | QuarantineCache fallback_quarantine_cache; | ||||||||
248 | |||||||||
249 | atomic_uint8_t rss_limit_exceeded; | ||||||||
250 | |||||||||
251 | // ------------------- Options -------------------------- | ||||||||
252 | atomic_uint16_t min_redzone; | ||||||||
253 | atomic_uint16_t max_redzone; | ||||||||
254 | atomic_uint8_t alloc_dealloc_mismatch; | ||||||||
255 | |||||||||
256 | // ------------------- Initialization ------------------------ | ||||||||
257 | explicit Allocator(LinkerInitialized) | ||||||||
258 | : quarantine(LINKER_INITIALIZED), | ||||||||
259 | fallback_quarantine_cache(LINKER_INITIALIZED) {} | ||||||||
260 | |||||||||
261 | void CheckOptions(const AllocatorOptions &options) const { | ||||||||
262 | CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 262, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")" , v1, v2); } while (false); | ||||||||
263 | CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone )); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 263, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)" ")", v1, v2); } while (false); | ||||||||
264 | CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone )); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 264, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)" ")", v1, v2); } while (false); | ||||||||
265 | CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 265, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
266 | CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options .max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 266, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
267 | } | ||||||||
268 | |||||||||
269 | void SharedInitCode(const AllocatorOptions &options) { | ||||||||
270 | CheckOptions(options); | ||||||||
271 | quarantine.Init((uptr)options.quarantine_size_mb << 20, | ||||||||
272 | (uptr)options.thread_local_quarantine_size_kb << 10); | ||||||||
273 | atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, | ||||||||
274 | memory_order_release); | ||||||||
275 | atomic_store(&min_redzone, options.min_redzone, memory_order_release); | ||||||||
276 | atomic_store(&max_redzone, options.max_redzone, memory_order_release); | ||||||||
277 | } | ||||||||
278 | |||||||||
279 | void InitLinkerInitialized(const AllocatorOptions &options) { | ||||||||
280 | SetAllocatorMayReturnNull(options.may_return_null); | ||||||||
281 | allocator.InitLinkerInitialized(options.release_to_os_interval_ms); | ||||||||
282 | SharedInitCode(options); | ||||||||
283 | } | ||||||||
284 | |||||||||
285 | bool RssLimitExceeded() { | ||||||||
286 | return atomic_load(&rss_limit_exceeded, memory_order_relaxed); | ||||||||
287 | } | ||||||||
288 | |||||||||
289 | void SetRssLimitExceeded(bool limit_exceeded) { | ||||||||
290 | atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); | ||||||||
291 | } | ||||||||
292 | |||||||||
293 | void RePoisonChunk(uptr chunk) { | ||||||||
294 | // This could be a user-facing chunk (with redzones), or some internal | ||||||||
295 | // housekeeping chunk, like TransferBatch. Start by assuming the former. | ||||||||
296 | AsanChunk *ac = GetAsanChunk((void *)chunk); | ||||||||
297 | uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); | ||||||||
298 | uptr beg = ac->Beg(); | ||||||||
299 | uptr end = ac->Beg() + ac->UsedSize(true); | ||||||||
300 | uptr chunk_end = chunk + allocated_size; | ||||||||
301 | if (chunk < beg && beg < end && end <= chunk_end && | ||||||||
302 | ac->chunk_state == CHUNK_ALLOCATED) { | ||||||||
303 | // Looks like a valid AsanChunk in use, poison redzones only. | ||||||||
304 | PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); | ||||||||
305 | uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||||||
306 | FastPoisonShadowPartialRightRedzone( | ||||||||
307 | end_aligned_down, end - end_aligned_down, | ||||||||
308 | chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); | ||||||||
309 | } else { | ||||||||
310 | // This is either not an AsanChunk or freed or quarantined AsanChunk. | ||||||||
311 | // In either case, poison everything. | ||||||||
312 | PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||||||
313 | } | ||||||||
314 | } | ||||||||
315 | |||||||||
316 | void ReInitialize(const AllocatorOptions &options) { | ||||||||
317 | SetAllocatorMayReturnNull(options.may_return_null); | ||||||||
318 | allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); | ||||||||
319 | SharedInitCode(options); | ||||||||
320 | |||||||||
321 | // Poison all existing allocation's redzones. | ||||||||
322 | if (CanPoisonMemory()) { | ||||||||
323 | allocator.ForceLock(); | ||||||||
324 | allocator.ForEachChunk( | ||||||||
325 | [](uptr chunk, void *alloc) { | ||||||||
326 | ((Allocator *)alloc)->RePoisonChunk(chunk); | ||||||||
327 | }, | ||||||||
328 | this); | ||||||||
329 | allocator.ForceUnlock(); | ||||||||
330 | } | ||||||||
331 | } | ||||||||
332 | |||||||||
333 | void GetOptions(AllocatorOptions *options) const { | ||||||||
334 | options->quarantine_size_mb = quarantine.GetSize() >> 20; | ||||||||
335 | options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; | ||||||||
336 | options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); | ||||||||
337 | options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); | ||||||||
338 | options->may_return_null = AllocatorMayReturnNull(); | ||||||||
339 | options->alloc_dealloc_mismatch = | ||||||||
340 | atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); | ||||||||
341 | options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); | ||||||||
342 | } | ||||||||
343 | |||||||||
344 | // -------------------- Helper methods. ------------------------- | ||||||||
345 | uptr ComputeRZLog(uptr user_requested_size) { | ||||||||
346 | u32 rz_log = | ||||||||
347 | user_requested_size <= 64 - 16 ? 0 : | ||||||||
348 | user_requested_size <= 128 - 32 ? 1 : | ||||||||
349 | user_requested_size <= 512 - 64 ? 2 : | ||||||||
350 | user_requested_size <= 4096 - 128 ? 3 : | ||||||||
351 | user_requested_size <= (1 << 14) - 256 ? 4 : | ||||||||
352 | user_requested_size <= (1 << 15) - 512 ? 5 : | ||||||||
353 | user_requested_size <= (1 << 16) - 1024 ? 6 : 7; | ||||||||
354 | u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); | ||||||||
355 | u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); | ||||||||
356 | return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); | ||||||||
357 | } | ||||||||
358 | |||||||||
359 | static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { | ||||||||
360 | if (user_requested_alignment < 8) | ||||||||
361 | return 0; | ||||||||
362 | if (user_requested_alignment > 512) | ||||||||
363 | user_requested_alignment = 512; | ||||||||
364 | return Log2(user_requested_alignment) - 2; | ||||||||
365 | } | ||||||||
366 | |||||||||
367 | static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { | ||||||||
368 | if (user_requested_alignment_log == 0) | ||||||||
369 | return 0; | ||||||||
370 | return 1LL << (user_requested_alignment_log + 2); | ||||||||
371 | } | ||||||||
372 | |||||||||
373 | // We have an address between two chunks, and we want to report just one. | ||||||||
374 | AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, | ||||||||
375 | AsanChunk *right_chunk) { | ||||||||
376 | // Prefer an allocated chunk over freed chunk and freed chunk | ||||||||
377 | // over available chunk. | ||||||||
378 | if (left_chunk->chunk_state != right_chunk->chunk_state) { | ||||||||
379 | if (left_chunk->chunk_state == CHUNK_ALLOCATED) | ||||||||
380 | return left_chunk; | ||||||||
381 | if (right_chunk->chunk_state == CHUNK_ALLOCATED) | ||||||||
382 | return right_chunk; | ||||||||
383 | if (left_chunk->chunk_state == CHUNK_QUARANTINE) | ||||||||
384 | return left_chunk; | ||||||||
385 | if (right_chunk->chunk_state == CHUNK_QUARANTINE) | ||||||||
386 | return right_chunk; | ||||||||
387 | } | ||||||||
388 | // Same chunk_state: choose based on offset. | ||||||||
389 | sptr l_offset = 0, r_offset = 0; | ||||||||
390 | CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 390, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
391 | CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView( right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 391, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
392 | if (l_offset < r_offset) | ||||||||
393 | return left_chunk; | ||||||||
394 | return right_chunk; | ||||||||
395 | } | ||||||||
396 | |||||||||
397 | // -------------------- Allocation/Deallocation routines --------------- | ||||||||
398 | void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, | ||||||||
399 | AllocType alloc_type, bool can_fill) { | ||||||||
400 | if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0)) | ||||||||
| |||||||||
401 | AsanInitFromRtl(); | ||||||||
402 | if (RssLimitExceeded()) { | ||||||||
403 | if (AllocatorMayReturnNull()) | ||||||||
404 | return nullptr; | ||||||||
405 | ReportRssLimitExceeded(stack); | ||||||||
406 | } | ||||||||
407 | Flags &fl = *flags(); | ||||||||
408 | CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 408, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
409 | const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale); | ||||||||
410 | const uptr user_requested_alignment_log = | ||||||||
411 | ComputeUserRequestedAlignmentLog(alignment); | ||||||||
412 | if (alignment
| ||||||||
413 | alignment = min_alignment; | ||||||||
414 | if (size == 0) { | ||||||||
415 | // We'd be happy to avoid allocating memory for zero-size requests, but | ||||||||
416 | // some programs/tests depend on this behavior and assume that malloc | ||||||||
417 | // would not return NULL even for zero-size allocations. Moreover, it | ||||||||
418 | // looks like operator new should never return NULL, and results of | ||||||||
419 | // consecutive "new" calls must be different even if the allocated size | ||||||||
420 | // is zero. | ||||||||
421 | size = 1; | ||||||||
422 | } | ||||||||
423 | CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment ))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 423, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")" , v1, v2); } while (false); | ||||||||
424 | uptr rz_log = ComputeRZLog(size); | ||||||||
425 | uptr rz_size = RZLog2Size(rz_log); | ||||||||
426 | uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); | ||||||||
427 | uptr needed_size = rounded_size + rz_size; | ||||||||
428 | if (alignment > min_alignment) | ||||||||
429 | needed_size += alignment; | ||||||||
430 | bool using_primary_allocator = true; | ||||||||
431 | // If we are allocating from the secondary allocator, there will be no | ||||||||
432 | // automatic right redzone, so add the right redzone manually. | ||||||||
433 | if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { | ||||||||
434 | needed_size += rz_size; | ||||||||
435 | using_primary_allocator = false; | ||||||||
436 | } | ||||||||
437 | CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size , min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 437, "(" "(IsAligned(needed_size, min_alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
438 | if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { | ||||||||
439 | if (AllocatorMayReturnNull()) { | ||||||||
440 | Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", | ||||||||
441 | (void*)size); | ||||||||
442 | return nullptr; | ||||||||
443 | } | ||||||||
444 | ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize, | ||||||||
445 | stack); | ||||||||
446 | } | ||||||||
447 | |||||||||
448 | AsanThread *t = GetCurrentThread(); | ||||||||
449 | void *allocated; | ||||||||
450 | if (t) { | ||||||||
451 | AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); | ||||||||
452 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||||||
453 | } else { | ||||||||
454 | SpinMutexLock l(&fallback_mutex); | ||||||||
455 | AllocatorCache *cache = &fallback_allocator_cache; | ||||||||
456 | allocated = allocator.Allocate(cache, needed_size, 8); | ||||||||
457 | } | ||||||||
458 | if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) { | ||||||||
459 | SetAllocatorOutOfMemory(); | ||||||||
460 | if (AllocatorMayReturnNull()) | ||||||||
461 | return nullptr; | ||||||||
462 | ReportOutOfMemory(size, stack); | ||||||||
463 | } | ||||||||
464 | |||||||||
465 | if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShadowOffset32 )) == 0 && CanPoisonMemory()) { | ||||||||
466 | // Heap poisoning is enabled, but the allocator provides an unpoisoned | ||||||||
467 | // chunk. This is possible if CanPoisonMemory() was false for some | ||||||||
468 | // time, for example, due to flags()->start_disabled. | ||||||||
469 | // Anyway, poison the block before using it for anything else. | ||||||||
470 | uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); | ||||||||
471 | PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); | ||||||||
472 | } | ||||||||
473 | |||||||||
474 | uptr alloc_beg = reinterpret_cast<uptr>(allocated); | ||||||||
475 | uptr alloc_end = alloc_beg + needed_size; | ||||||||
476 | uptr beg_plus_redzone = alloc_beg + rz_size; | ||||||||
477 | uptr user_beg = beg_plus_redzone; | ||||||||
478 | if (!IsAligned(user_beg, alignment)) | ||||||||
479 | user_beg = RoundUpTo(user_beg, alignment); | ||||||||
480 | uptr user_end = user_beg + size; | ||||||||
481 | CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer ::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect (!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 481, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1 , v2); } while (false); | ||||||||
482 | uptr chunk_beg = user_beg - kChunkHeaderSize; | ||||||||
483 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
484 | m->alloc_type = alloc_type; | ||||||||
485 | m->rz_log = rz_log; | ||||||||
486 | u32 alloc_tid = t
| ||||||||
487 | m->alloc_tid = alloc_tid; | ||||||||
488 | CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer ::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 488, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")" , v1, v2); } while (false); // Does alloc_tid fit into the bitfield? | ||||||||
489 | m->free_tid = kInvalidTid; | ||||||||
490 | m->from_memalign = user_beg != beg_plus_redzone; | ||||||||
491 | if (alloc_beg != chunk_beg) { | ||||||||
492 | CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 * sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 492, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)" ")", v1, v2); } while (false); | ||||||||
493 | reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; | ||||||||
| |||||||||
494 | reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; | ||||||||
495 | } | ||||||||
496 | if (using_primary_allocator) { | ||||||||
497 | CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 497, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while ( false); | ||||||||
498 | m->user_requested_size = size; | ||||||||
499 | CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary (allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 499, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
500 | } else { | ||||||||
501 | CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary (allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 501, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
502 | m->user_requested_size = SizeClassMap::kMaxSize; | ||||||||
503 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); | ||||||||
504 | meta[0] = size; | ||||||||
505 | meta[1] = chunk_beg; | ||||||||
506 | } | ||||||||
507 | m->user_requested_alignment_log = user_requested_alignment_log; | ||||||||
508 | |||||||||
509 | m->alloc_context_id = StackDepotPut(*stack); | ||||||||
510 | |||||||||
511 | uptr size_rounded_down_to_granularity = | ||||||||
512 | RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)); | ||||||||
513 | // Unpoison the bulk of the memory region. | ||||||||
514 | if (size_rounded_down_to_granularity) | ||||||||
515 | PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); | ||||||||
516 | // Deal with the end of the region if size is not aligned to granularity. | ||||||||
517 | if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { | ||||||||
518 | u8 *shadow = | ||||||||
519 | (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); | ||||||||
520 | *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0; | ||||||||
521 | } | ||||||||
522 | |||||||||
523 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
524 | thread_stats.mallocs++; | ||||||||
525 | thread_stats.malloced += size; | ||||||||
526 | thread_stats.malloced_redzones += needed_size - size; | ||||||||
527 | if (needed_size > SizeClassMap::kMaxSize) | ||||||||
528 | thread_stats.malloc_large++; | ||||||||
529 | else | ||||||||
530 | thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; | ||||||||
531 | |||||||||
532 | void *res = reinterpret_cast<void *>(user_beg); | ||||||||
533 | if (can_fill && fl.max_malloc_fill_size) { | ||||||||
534 | uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); | ||||||||
535 | REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size); | ||||||||
536 | } | ||||||||
537 | #if CAN_SANITIZE_LEAKS1 | ||||||||
538 | m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored | ||||||||
539 | : __lsan::kDirectlyLeaked; | ||||||||
540 | #endif | ||||||||
541 | // Must be the last mutation of metadata in this function. | ||||||||
542 | atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); | ||||||||
543 | ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook (res, size); RunMallocHooks(res, size); } while (false); | ||||||||
544 | return res; | ||||||||
545 | } | ||||||||
546 | |||||||||
547 | // Set quarantine flag if chunk is allocated, issue ASan error report on | ||||||||
548 | // available and quarantined chunks. Return true on success, false otherwise. | ||||||||
549 | bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, | ||||||||
550 | BufferedStackTrace *stack) { | ||||||||
551 | u8 old_chunk_state = CHUNK_ALLOCATED; | ||||||||
552 | // Flip the chunk_state atomically to avoid race on double-free. | ||||||||
553 | if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, | ||||||||
554 | CHUNK_QUARANTINE, | ||||||||
555 | memory_order_acquire)) { | ||||||||
556 | ReportInvalidFree(ptr, old_chunk_state, stack); | ||||||||
557 | // It's not safe to push a chunk in quarantine on invalid free. | ||||||||
558 | return false; | ||||||||
559 | } | ||||||||
560 | CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED )); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 560, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)" ")", v1, v2); } while (false); | ||||||||
561 | return true; | ||||||||
562 | } | ||||||||
563 | |||||||||
564 | // Expects the chunk to already be marked as quarantined by using | ||||||||
565 | // AtomicallySetQuarantineFlagIfAllocated. | ||||||||
566 | void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { | ||||||||
567 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state )); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE )); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 567, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)" ")", v1, v2); } while (false); | ||||||||
568 | CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 568, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1 , v2); } while (false); | ||||||||
569 | if (SANITIZER_WORDSIZE32 == 64) // On 32-bits this resides in user area. | ||||||||
570 | CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid )); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 570, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")" , v1, v2); } while (false); | ||||||||
571 | AsanThread *t = GetCurrentThread(); | ||||||||
572 | m->free_tid = t ? t->tid() : 0; | ||||||||
573 | m->free_context_id = StackDepotPut(*stack); | ||||||||
574 | |||||||||
575 | Flags &fl = *flags(); | ||||||||
576 | if (fl.max_free_fill_size > 0) { | ||||||||
577 | // We have to skip the chunk header, it contains free_context_id. | ||||||||
578 | uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; | ||||||||
579 | if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. | ||||||||
580 | uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; | ||||||||
581 | size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); | ||||||||
582 | REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill); | ||||||||
583 | } | ||||||||
584 | } | ||||||||
585 | |||||||||
586 | // Poison the region. | ||||||||
587 | PoisonShadow(m->Beg(), | ||||||||
588 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)), | ||||||||
589 | kAsanHeapFreeMagic); | ||||||||
590 | |||||||||
591 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
592 | thread_stats.frees++; | ||||||||
593 | thread_stats.freed += m->UsedSize(); | ||||||||
594 | |||||||||
595 | // Push into quarantine. | ||||||||
596 | if (t) { | ||||||||
597 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||||||
598 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||||||
599 | quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, | ||||||||
600 | m->UsedSize()); | ||||||||
601 | } else { | ||||||||
602 | SpinMutexLock l(&fallback_mutex); | ||||||||
603 | AllocatorCache *ac = &fallback_allocator_cache; | ||||||||
604 | quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), | ||||||||
605 | m, m->UsedSize()); | ||||||||
606 | } | ||||||||
607 | } | ||||||||
608 | |||||||||
609 | void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, | ||||||||
610 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
611 | uptr p = reinterpret_cast<uptr>(ptr); | ||||||||
612 | if (p == 0) return; | ||||||||
613 | |||||||||
614 | uptr chunk_beg = p - kChunkHeaderSize; | ||||||||
615 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
616 | |||||||||
617 | // On Windows, uninstrumented DLLs may allocate memory before ASan hooks | ||||||||
618 | // malloc. Don't report an invalid free in this case. | ||||||||
619 | if (SANITIZER_WINDOWS0 && | ||||||||
620 | !get_allocator().PointerIsMine(ptr)) { | ||||||||
621 | if (!IsSystemHeapAddress(p)) | ||||||||
622 | ReportFreeNotMalloced(p, stack); | ||||||||
623 | return; | ||||||||
624 | } | ||||||||
625 | |||||||||
626 | ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr ); RunFreeHooks(ptr); } while (false); | ||||||||
627 | |||||||||
628 | // Must mark the chunk as quarantined before any changes to its metadata. | ||||||||
629 | // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. | ||||||||
630 | if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; | ||||||||
631 | |||||||||
632 | if (m->alloc_type != alloc_type) { | ||||||||
633 | if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { | ||||||||
634 | ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, | ||||||||
635 | (AllocType)alloc_type); | ||||||||
636 | } | ||||||||
637 | } else { | ||||||||
638 | if (flags()->new_delete_type_mismatch && | ||||||||
639 | (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && | ||||||||
640 | ((delete_size && delete_size != m->UsedSize()) || | ||||||||
641 | ComputeUserRequestedAlignmentLog(delete_alignment) != | ||||||||
642 | m->user_requested_alignment_log)) { | ||||||||
643 | ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); | ||||||||
644 | } | ||||||||
645 | } | ||||||||
646 | |||||||||
647 | QuarantineChunk(m, ptr, stack); | ||||||||
648 | } | ||||||||
649 | |||||||||
650 | void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { | ||||||||
651 | CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr && new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 651, "(" "(old_ptr && new_size)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
652 | uptr p = reinterpret_cast<uptr>(old_ptr); | ||||||||
653 | uptr chunk_beg = p - kChunkHeaderSize; | ||||||||
654 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); | ||||||||
655 | |||||||||
656 | AsanStats &thread_stats = GetCurrentThreadStats(); | ||||||||
657 | thread_stats.reallocs++; | ||||||||
658 | thread_stats.realloced += new_size; | ||||||||
659 | |||||||||
660 | void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); | ||||||||
661 | if (new_ptr) { | ||||||||
662 | u8 chunk_state = m->chunk_state; | ||||||||
663 | if (chunk_state != CHUNK_ALLOCATED) | ||||||||
664 | ReportInvalidFree(old_ptr, chunk_state, stack); | ||||||||
665 | CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception ::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr )); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 665, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)" ")", v1, v2); } while (false); | ||||||||
666 | uptr memcpy_size = Min(new_size, m->UsedSize()); | ||||||||
667 | // If realloc() races with free(), we may start copying freed memory. | ||||||||
668 | // However, we will report racy double-free later anyway. | ||||||||
669 | REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size); | ||||||||
670 | Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); | ||||||||
671 | } | ||||||||
672 | return new_ptr; | ||||||||
673 | } | ||||||||
674 | |||||||||
675 | void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||||||
676 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||||||
677 | if (AllocatorMayReturnNull()) | ||||||||
678 | return nullptr; | ||||||||
679 | ReportCallocOverflow(nmemb, size, stack); | ||||||||
680 | } | ||||||||
681 | void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); | ||||||||
682 | // If the memory comes from the secondary allocator no need to clear it | ||||||||
683 | // as it comes directly from mmap. | ||||||||
684 | if (ptr && allocator.FromPrimary(ptr)) | ||||||||
685 | REAL(memset)__interception::real_memset(ptr, 0, nmemb * size); | ||||||||
686 | return ptr; | ||||||||
687 | } | ||||||||
688 | |||||||||
689 | void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { | ||||||||
690 | if (chunk_state == CHUNK_QUARANTINE) | ||||||||
691 | ReportDoubleFree((uptr)ptr, stack); | ||||||||
692 | else | ||||||||
693 | ReportFreeNotMalloced((uptr)ptr, stack); | ||||||||
694 | } | ||||||||
695 | |||||||||
696 | void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { | ||||||||
697 | AllocatorCache *ac = GetAllocatorCache(ms); | ||||||||
698 | quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); | ||||||||
699 | allocator.SwallowCache(ac); | ||||||||
700 | } | ||||||||
701 | |||||||||
702 | // -------------------------- Chunk lookup ---------------------- | ||||||||
703 | |||||||||
704 | // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). | ||||||||
705 | AsanChunk *GetAsanChunk(void *alloc_beg) { | ||||||||
706 | if (!alloc_beg) return nullptr; | ||||||||
707 | if (!allocator.FromPrimary(alloc_beg)) { | ||||||||
708 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); | ||||||||
709 | AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); | ||||||||
710 | return m; | ||||||||
711 | } | ||||||||
712 | uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); | ||||||||
713 | if (alloc_magic[0] == kAllocBegMagic) | ||||||||
714 | return reinterpret_cast<AsanChunk *>(alloc_magic[1]); | ||||||||
715 | return reinterpret_cast<AsanChunk *>(alloc_beg); | ||||||||
716 | } | ||||||||
717 | |||||||||
718 | AsanChunk *GetAsanChunkByAddr(uptr p) { | ||||||||
719 | void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); | ||||||||
720 | return GetAsanChunk(alloc_beg); | ||||||||
721 | } | ||||||||
722 | |||||||||
723 | // Allocator must be locked when this function is called. | ||||||||
724 | AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { | ||||||||
725 | void *alloc_beg = | ||||||||
726 | allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); | ||||||||
727 | return GetAsanChunk(alloc_beg); | ||||||||
728 | } | ||||||||
729 | |||||||||
730 | uptr AllocationSize(uptr p) { | ||||||||
731 | AsanChunk *m = GetAsanChunkByAddr(p); | ||||||||
732 | if (!m) return 0; | ||||||||
733 | if (m->chunk_state != CHUNK_ALLOCATED) return 0; | ||||||||
734 | if (m->Beg() != p) return 0; | ||||||||
735 | return m->UsedSize(); | ||||||||
736 | } | ||||||||
737 | |||||||||
738 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||||||
739 | AsanChunk *m1 = GetAsanChunkByAddr(addr); | ||||||||
740 | if (!m1) return AsanChunkView(m1); | ||||||||
741 | sptr offset = 0; | ||||||||
742 | if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { | ||||||||
743 | // The address is in the chunk's left redzone, so maybe it is actually | ||||||||
744 | // a right buffer overflow from the other chunk to the left. | ||||||||
745 | // Search a bit to the left to see if there is another chunk. | ||||||||
746 | AsanChunk *m2 = nullptr; | ||||||||
747 | for (uptr l = 1; l < GetPageSizeCached(); l++) { | ||||||||
748 | m2 = GetAsanChunkByAddr(addr - l); | ||||||||
749 | if (m2 == m1) continue; // Still the same chunk. | ||||||||
750 | break; | ||||||||
751 | } | ||||||||
752 | if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) | ||||||||
753 | m1 = ChooseChunk(addr, m2, m1); | ||||||||
754 | } | ||||||||
755 | return AsanChunkView(m1); | ||||||||
756 | } | ||||||||
757 | |||||||||
758 | void Purge(BufferedStackTrace *stack) { | ||||||||
759 | AsanThread *t = GetCurrentThread(); | ||||||||
760 | if (t) { | ||||||||
761 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); | ||||||||
762 | quarantine.DrainAndRecycle(GetQuarantineCache(ms), | ||||||||
763 | QuarantineCallback(GetAllocatorCache(ms), | ||||||||
764 | stack)); | ||||||||
765 | } | ||||||||
766 | { | ||||||||
767 | SpinMutexLock l(&fallback_mutex); | ||||||||
768 | quarantine.DrainAndRecycle(&fallback_quarantine_cache, | ||||||||
769 | QuarantineCallback(&fallback_allocator_cache, | ||||||||
770 | stack)); | ||||||||
771 | } | ||||||||
772 | |||||||||
773 | allocator.ForceReleaseToOS(); | ||||||||
774 | } | ||||||||
775 | |||||||||
776 | void PrintStats() { | ||||||||
777 | allocator.PrintStats(); | ||||||||
778 | quarantine.PrintStats(); | ||||||||
779 | } | ||||||||
780 | |||||||||
781 | void ForceLock() { | ||||||||
782 | allocator.ForceLock(); | ||||||||
783 | fallback_mutex.Lock(); | ||||||||
784 | } | ||||||||
785 | |||||||||
786 | void ForceUnlock() { | ||||||||
787 | fallback_mutex.Unlock(); | ||||||||
788 | allocator.ForceUnlock(); | ||||||||
789 | } | ||||||||
790 | }; | ||||||||
791 | |||||||||
792 | static Allocator instance(LINKER_INITIALIZED); | ||||||||
793 | |||||||||
794 | static AsanAllocator &get_allocator() { | ||||||||
795 | return instance.allocator; | ||||||||
796 | } | ||||||||
797 | |||||||||
798 | bool AsanChunkView::IsValid() const { | ||||||||
799 | return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; | ||||||||
800 | } | ||||||||
801 | bool AsanChunkView::IsAllocated() const { | ||||||||
802 | return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; | ||||||||
803 | } | ||||||||
804 | bool AsanChunkView::IsQuarantined() const { | ||||||||
805 | return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; | ||||||||
806 | } | ||||||||
807 | uptr AsanChunkView::Beg() const { return chunk_->Beg(); } | ||||||||
808 | uptr AsanChunkView::End() const { return Beg() + UsedSize(); } | ||||||||
809 | uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } | ||||||||
810 | u32 AsanChunkView::UserRequestedAlignment() const { | ||||||||
811 | return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); | ||||||||
812 | } | ||||||||
813 | uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } | ||||||||
814 | uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } | ||||||||
815 | AllocType AsanChunkView::GetAllocType() const { | ||||||||
816 | return (AllocType)chunk_->alloc_type; | ||||||||
817 | } | ||||||||
818 | |||||||||
819 | static StackTrace GetStackTraceFromId(u32 id) { | ||||||||
820 | CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 820, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
821 | StackTrace res = StackDepotGet(id); | ||||||||
822 | CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 822, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
823 | return res; | ||||||||
824 | } | ||||||||
825 | |||||||||
826 | u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } | ||||||||
827 | u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } | ||||||||
828 | |||||||||
829 | StackTrace AsanChunkView::GetAllocStack() const { | ||||||||
830 | return GetStackTraceFromId(GetAllocStackId()); | ||||||||
831 | } | ||||||||
832 | |||||||||
833 | StackTrace AsanChunkView::GetFreeStack() const { | ||||||||
834 | return GetStackTraceFromId(GetFreeStackId()); | ||||||||
835 | } | ||||||||
836 | |||||||||
837 | void InitializeAllocator(const AllocatorOptions &options) { | ||||||||
838 | instance.InitLinkerInitialized(options); | ||||||||
839 | } | ||||||||
840 | |||||||||
841 | void ReInitializeAllocator(const AllocatorOptions &options) { | ||||||||
842 | instance.ReInitialize(options); | ||||||||
843 | } | ||||||||
844 | |||||||||
845 | void GetAllocatorOptions(AllocatorOptions *options) { | ||||||||
846 | instance.GetOptions(options); | ||||||||
847 | } | ||||||||
848 | |||||||||
849 | AsanChunkView FindHeapChunkByAddress(uptr addr) { | ||||||||
850 | return instance.FindHeapChunkByAddress(addr); | ||||||||
851 | } | ||||||||
852 | AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { | ||||||||
853 | return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); | ||||||||
854 | } | ||||||||
855 | |||||||||
856 | void AsanThreadLocalMallocStorage::CommitBack() { | ||||||||
857 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||||||
858 | instance.CommitBack(this, &stack); | ||||||||
859 | } | ||||||||
860 | |||||||||
861 | void PrintInternalAllocatorStats() { | ||||||||
862 | instance.PrintStats(); | ||||||||
863 | } | ||||||||
864 | |||||||||
865 | void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
866 | instance.Deallocate(ptr, 0, 0, stack, alloc_type); | ||||||||
867 | } | ||||||||
868 | |||||||||
869 | void asan_delete(void *ptr, uptr size, uptr alignment, | ||||||||
870 | BufferedStackTrace *stack, AllocType alloc_type) { | ||||||||
871 | instance.Deallocate(ptr, size, alignment, stack, alloc_type); | ||||||||
872 | } | ||||||||
873 | |||||||||
874 | void *asan_malloc(uptr size, BufferedStackTrace *stack) { | ||||||||
875 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||||||
876 | } | ||||||||
877 | |||||||||
878 | void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { | ||||||||
879 | return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); | ||||||||
880 | } | ||||||||
881 | |||||||||
882 | void *asan_reallocarray(void *p, uptr nmemb, uptr size, | ||||||||
883 | BufferedStackTrace *stack) { | ||||||||
884 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) { | ||||||||
885 | errno(*__errno_location()) = errno_ENOMEM12; | ||||||||
886 | if (AllocatorMayReturnNull()) | ||||||||
887 | return nullptr; | ||||||||
888 | ReportReallocArrayOverflow(nmemb, size, stack); | ||||||||
889 | } | ||||||||
890 | return asan_realloc(p, nmemb * size, stack); | ||||||||
891 | } | ||||||||
892 | |||||||||
893 | void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { | ||||||||
894 | if (!p) | ||||||||
895 | return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); | ||||||||
896 | if (size == 0) { | ||||||||
897 | if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { | ||||||||
898 | instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); | ||||||||
899 | return nullptr; | ||||||||
900 | } | ||||||||
901 | // Allocate a size of 1 if we shouldn't free() on Realloc to 0 | ||||||||
902 | size = 1; | ||||||||
903 | } | ||||||||
904 | return SetErrnoOnNull(instance.Reallocate(p, size, stack)); | ||||||||
905 | } | ||||||||
906 | |||||||||
907 | void *asan_valloc(uptr size, BufferedStackTrace *stack) { | ||||||||
908 | return SetErrnoOnNull( | ||||||||
909 | instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); | ||||||||
910 | } | ||||||||
911 | |||||||||
912 | void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { | ||||||||
913 | uptr PageSize = GetPageSizeCached(); | ||||||||
914 | if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)), 0)) { | ||||||||
915 | errno(*__errno_location()) = errno_ENOMEM12; | ||||||||
916 | if (AllocatorMayReturnNull()) | ||||||||
917 | return nullptr; | ||||||||
918 | ReportPvallocOverflow(size, stack); | ||||||||
919 | } | ||||||||
920 | // pvalloc(0) should allocate one page. | ||||||||
921 | size = size ? RoundUpTo(size, PageSize) : PageSize; | ||||||||
922 | return SetErrnoOnNull( | ||||||||
923 | instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); | ||||||||
924 | } | ||||||||
925 | |||||||||
926 | void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, | ||||||||
927 | AllocType alloc_type) { | ||||||||
928 | if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) { | ||||||||
929 | errno(*__errno_location()) = errno_EINVAL22; | ||||||||
930 | if (AllocatorMayReturnNull()) | ||||||||
931 | return nullptr; | ||||||||
932 | ReportInvalidAllocationAlignment(alignment, stack); | ||||||||
933 | } | ||||||||
934 | return SetErrnoOnNull( | ||||||||
935 | instance.Allocate(size, alignment, stack, alloc_type, true)); | ||||||||
936 | } | ||||||||
937 | |||||||||
938 | void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { | ||||||||
939 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment , size)), 0)) { | ||||||||
940 | errno(*__errno_location()) = errno_EINVAL22; | ||||||||
941 | if (AllocatorMayReturnNull()) | ||||||||
942 | return nullptr; | ||||||||
943 | ReportInvalidAlignedAllocAlignment(size, alignment, stack); | ||||||||
944 | } | ||||||||
945 | return SetErrnoOnNull( | ||||||||
946 | instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); | ||||||||
947 | } | ||||||||
948 | |||||||||
949 | int asan_posix_memalign(void **memptr, uptr alignment, uptr size, | ||||||||
950 | BufferedStackTrace *stack) { | ||||||||
951 | if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)), 0)) { | ||||||||
952 | if (AllocatorMayReturnNull()) | ||||||||
953 | return errno_EINVAL22; | ||||||||
954 | ReportInvalidPosixMemalignAlignment(alignment, stack); | ||||||||
955 | } | ||||||||
956 | void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); | ||||||||
957 | if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0)) | ||||||||
958 | // OOM error is already taken care of by Allocate. | ||||||||
959 | return errno_ENOMEM12; | ||||||||
960 | CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr )ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)( 0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 960, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); | ||||||||
961 | *memptr = ptr; | ||||||||
962 | return 0; | ||||||||
963 | } | ||||||||
964 | |||||||||
965 | uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { | ||||||||
966 | if (!ptr) return 0; | ||||||||
967 | uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||||||
968 | if (flags()->check_malloc_usable_size && (usable_size == 0)) { | ||||||||
969 | GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags ()->fast_unwind_on_fatal); | ||||||||
970 | ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); | ||||||||
971 | } | ||||||||
972 | return usable_size; | ||||||||
973 | } | ||||||||
974 | |||||||||
975 | uptr asan_mz_size(const void *ptr) { | ||||||||
976 | return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); | ||||||||
977 | } | ||||||||
978 | |||||||||
979 | void asan_mz_force_lock() { | ||||||||
980 | instance.ForceLock(); | ||||||||
981 | } | ||||||||
982 | |||||||||
983 | void asan_mz_force_unlock() { | ||||||||
984 | instance.ForceUnlock(); | ||||||||
985 | } | ||||||||
986 | |||||||||
987 | void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { | ||||||||
988 | instance.SetRssLimitExceeded(limit_exceeded); | ||||||||
989 | } | ||||||||
990 | |||||||||
991 | } // namespace __asan | ||||||||
992 | |||||||||
993 | // --- Implementation of LSan-specific functions --- {{{1 | ||||||||
994 | namespace __lsan { | ||||||||
995 | void LockAllocator() { | ||||||||
996 | __asan::get_allocator().ForceLock(); | ||||||||
997 | } | ||||||||
998 | |||||||||
999 | void UnlockAllocator() { | ||||||||
1000 | __asan::get_allocator().ForceUnlock(); | ||||||||
1001 | } | ||||||||
1002 | |||||||||
1003 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { | ||||||||
1004 | *begin = (uptr)&__asan::get_allocator(); | ||||||||
1005 | *end = *begin + sizeof(__asan::get_allocator()); | ||||||||
1006 | } | ||||||||
1007 | |||||||||
1008 | uptr PointsIntoChunk(void* p) { | ||||||||
1009 | uptr addr = reinterpret_cast<uptr>(p); | ||||||||
1010 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); | ||||||||
1011 | if (!m) return 0; | ||||||||
1012 | uptr chunk = m->Beg(); | ||||||||
1013 | if (m->chunk_state != __asan::CHUNK_ALLOCATED) | ||||||||
1014 | return 0; | ||||||||
1015 | if (m->AddrIsInside(addr, /*locked_version=*/true)) | ||||||||
1016 | return chunk; | ||||||||
1017 | if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), | ||||||||
1018 | addr)) | ||||||||
1019 | return chunk; | ||||||||
1020 | return 0; | ||||||||
1021 | } | ||||||||
1022 | |||||||||
1023 | uptr GetUserBegin(uptr chunk) { | ||||||||
1024 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); | ||||||||
1025 | CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/asan_allocator.cpp" , 1025, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false ); | ||||||||
1026 | return m->Beg(); | ||||||||
1027 | } | ||||||||
1028 | |||||||||
1029 | LsanMetadata::LsanMetadata(uptr chunk) { | ||||||||
1030 | metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); | ||||||||
1031 | } | ||||||||
1032 | |||||||||
1033 | bool LsanMetadata::allocated() const { | ||||||||
1034 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1035 | return m->chunk_state == __asan::CHUNK_ALLOCATED; | ||||||||
1036 | } | ||||||||
1037 | |||||||||
1038 | ChunkTag LsanMetadata::tag() const { | ||||||||
1039 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1040 | return static_cast<ChunkTag>(m->lsan_tag); | ||||||||
1041 | } | ||||||||
1042 | |||||||||
1043 | void LsanMetadata::set_tag(ChunkTag value) { | ||||||||
1044 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1045 | m->lsan_tag = value; | ||||||||
1046 | } | ||||||||
1047 | |||||||||
1048 | uptr LsanMetadata::requested_size() const { | ||||||||
1049 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1050 | return m->UsedSize(/*locked_version=*/true); | ||||||||
1051 | } | ||||||||
1052 | |||||||||
1053 | u32 LsanMetadata::stack_trace_id() const { | ||||||||
1054 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); | ||||||||
1055 | return m->alloc_context_id; | ||||||||
1056 | } | ||||||||
1057 | |||||||||
1058 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | ||||||||
1059 | __asan::get_allocator().ForEachChunk(callback, arg); | ||||||||
1060 | } | ||||||||
1061 | |||||||||
1062 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { | ||||||||
1063 | uptr addr = reinterpret_cast<uptr>(p); | ||||||||
1064 | __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); | ||||||||
1065 | if (!m) return kIgnoreObjectInvalid; | ||||||||
1066 | if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { | ||||||||
1067 | if (m->lsan_tag == kIgnored) | ||||||||
1068 | return kIgnoreObjectAlreadyIgnored; | ||||||||
1069 | m->lsan_tag = __lsan::kIgnored; | ||||||||
1070 | return kIgnoreObjectSuccess; | ||||||||
1071 | } else { | ||||||||
1072 | return kIgnoreObjectInvalid; | ||||||||
1073 | } | ||||||||
1074 | } | ||||||||
1075 | } // namespace __lsan | ||||||||
1076 | |||||||||
1077 | // ---------------------- Interface ---------------- {{{1 | ||||||||
1078 | using namespace __asan; | ||||||||
1079 | |||||||||
1080 | // ASan allocator doesn't reserve extra bytes, so normally we would | ||||||||
1081 | // just return "size". We don't want to expose our redzone sizes, etc here. | ||||||||
1082 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { | ||||||||
1083 | return size; | ||||||||
1084 | } | ||||||||
1085 | |||||||||
1086 | int __sanitizer_get_ownership(const void *p) { | ||||||||
1087 | uptr ptr = reinterpret_cast<uptr>(p); | ||||||||
1088 | return instance.AllocationSize(ptr) > 0; | ||||||||
1089 | } | ||||||||
1090 | |||||||||
1091 | uptr __sanitizer_get_allocated_size(const void *p) { | ||||||||
1092 | if (!p) return 0; | ||||||||
1093 | uptr ptr = reinterpret_cast<uptr>(p); | ||||||||
1094 | uptr allocated_size = instance.AllocationSize(ptr); | ||||||||
1095 | // Die if p is not malloced or if it is already freed. | ||||||||
1096 | if (allocated_size == 0) { | ||||||||
1097 | GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack .size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer [0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack .trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address (0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer ::uptr) __builtin_frame_address(0), nullptr, common_flags()-> fast_unwind_on_fatal, kStackTraceMax); }; | ||||||||
1098 | ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); | ||||||||
1099 | } | ||||||||
1100 | return allocated_size; | ||||||||
1101 | } | ||||||||
1102 | |||||||||
1103 | void __sanitizer_purge_allocator() { | ||||||||
1104 | GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2) { stack.size = GetMallocContextSize(); if (GetMallocContextSize () > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address (0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if ( GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer ::uptr) __builtin_return_address(0); } } else { stack.Unwind( StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address (0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize ()); }; | ||||||||
1105 | instance.Purge(&stack); | ||||||||
1106 | } | ||||||||
1107 | |||||||||
1108 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | ||||||||
1109 | // Provide default (no-op) implementation of malloc hooks. | ||||||||
1110 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) | ||||||||
1111 | void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size) { | ||||||||
1112 | (void)ptr; | ||||||||
1113 | (void)size; | ||||||||
1114 | } | ||||||||
1115 | |||||||||
1116 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_free_hook(void *ptr) { | ||||||||
1117 | (void)ptr; | ||||||||
1118 | } | ||||||||
1119 | #endif |
1 | //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. | ||||||||
10 | // Not intended for direct inclusion. Include sanitizer_atomic.h. | ||||||||
11 | // | ||||||||
12 | //===----------------------------------------------------------------------===// | ||||||||
13 | |||||||||
14 | #ifndef SANITIZER_ATOMIC_CLANG_X86_H | ||||||||
15 | #define SANITIZER_ATOMIC_CLANG_X86_H | ||||||||
16 | |||||||||
17 | namespace __sanitizer { | ||||||||
18 | |||||||||
19 | INLINEinline void proc_yield(int cnt) { | ||||||||
20 | __asm__ __volatile__("" ::: "memory"); | ||||||||
21 | for (int i = 0; i < cnt; i++) | ||||||||
22 | __asm__ __volatile__("pause"); | ||||||||
23 | __asm__ __volatile__("" ::: "memory"); | ||||||||
24 | } | ||||||||
25 | |||||||||
26 | template<typename T> | ||||||||
27 | INLINEinline typename T::Type atomic_load( | ||||||||
28 | const volatile T *a, memory_order mo) { | ||||||||
29 | DCHECK(mo & (memory_order_relaxed | memory_order_consume | ||||||||
30 | | memory_order_acquire | memory_order_seq_cst)); | ||||||||
31 | DCHECK(!((uptr)a % sizeof(*a))); | ||||||||
32 | typename T::Type v; | ||||||||
33 | |||||||||
34 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||||||
35 | // Assume that aligned loads are atomic. | ||||||||
36 | if (mo
| ||||||||
37 | v = a->val_dont_use; | ||||||||
38 | } else if (mo == memory_order_consume) { | ||||||||
39 | // Assume that processor respects data dependencies | ||||||||
40 | // (and that compiler won't break them). | ||||||||
41 | __asm__ __volatile__("" ::: "memory"); | ||||||||
42 | v = a->val_dont_use; | ||||||||
43 | __asm__ __volatile__("" ::: "memory"); | ||||||||
44 | } else if (mo == memory_order_acquire) { | ||||||||
45 | __asm__ __volatile__("" ::: "memory"); | ||||||||
46 | v = a->val_dont_use; | ||||||||
47 | // On x86 loads are implicitly acquire. | ||||||||
48 | __asm__ __volatile__("" ::: "memory"); | ||||||||
49 | } else { // seq_cst | ||||||||
50 | // On x86 plain MOV is enough for seq_cst store. | ||||||||
51 | __asm__ __volatile__("" ::: "memory"); | ||||||||
52 | v = a->val_dont_use; | ||||||||
53 | __asm__ __volatile__("" ::: "memory"); | ||||||||
54 | } | ||||||||
55 | } else { | ||||||||
56 | // 64-bit load on 32-bit platform. | ||||||||
57 | __asm__ __volatile__( | ||||||||
58 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||||||
59 | "movq %%mm0, %0;" // (ptr could be read-only) | ||||||||
60 | "emms;" // Empty mmx state/Reset FP regs | ||||||||
61 | : "=m" (v) | ||||||||
62 | : "m" (a->val_dont_use) | ||||||||
63 | : // mark the mmx registers as clobbered | ||||||||
64 | #ifdef __MMX__ | ||||||||
65 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||||||
66 | #endif // #ifdef __MMX__ | ||||||||
67 | "memory"); | ||||||||
68 | } | ||||||||
69 | return v; | ||||||||
70 | } | ||||||||
71 | |||||||||
72 | template<typename T> | ||||||||
73 | INLINEinline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { | ||||||||
74 | DCHECK(mo & (memory_order_relaxed | memory_order_release | ||||||||
75 | | memory_order_seq_cst)); | ||||||||
76 | DCHECK(!((uptr)a % sizeof(*a))); | ||||||||
77 | |||||||||
78 | if (sizeof(*a) < 8 || sizeof(void*) == 8) { | ||||||||
79 | // Assume that aligned loads are atomic. | ||||||||
80 | if (mo == memory_order_relaxed) { | ||||||||
81 | a->val_dont_use = v; | ||||||||
82 | } else if (mo == memory_order_release) { | ||||||||
83 | // On x86 stores are implicitly release. | ||||||||
84 | __asm__ __volatile__("" ::: "memory"); | ||||||||
85 | a->val_dont_use = v; | ||||||||
86 | __asm__ __volatile__("" ::: "memory"); | ||||||||
87 | } else { // seq_cst | ||||||||
88 | // On x86 stores are implicitly release. | ||||||||
89 | __asm__ __volatile__("" ::: "memory"); | ||||||||
90 | a->val_dont_use = v; | ||||||||
91 | __sync_synchronize(); | ||||||||
92 | } | ||||||||
93 | } else { | ||||||||
94 | // 64-bit store on 32-bit platform. | ||||||||
95 | __asm__ __volatile__( | ||||||||
96 | "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves | ||||||||
97 | "movq %%mm0, %0;" | ||||||||
98 | "emms;" // Empty mmx state/Reset FP regs | ||||||||
99 | : "=m" (a->val_dont_use) | ||||||||
100 | : "m" (v) | ||||||||
101 | : // mark the mmx registers as clobbered | ||||||||
102 | #ifdef __MMX__ | ||||||||
103 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", | ||||||||
104 | #endif // #ifdef __MMX__ | ||||||||
105 | "memory"); | ||||||||
106 | if (mo == memory_order_seq_cst) | ||||||||
107 | __sync_synchronize(); | ||||||||
108 | } | ||||||||
109 | } | ||||||||
110 | |||||||||
111 | } // namespace __sanitizer | ||||||||
112 | |||||||||
113 | #endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H |
1 | //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // Part of the Sanitizer Allocator. | ||||||||
10 | // | ||||||||
11 | //===----------------------------------------------------------------------===// | ||||||||
12 | #ifndef SANITIZER_ALLOCATOR_H | ||||||||
13 | #error This file must be included inside sanitizer_allocator.h | ||||||||
14 | #endif | ||||||||
15 | |||||||||
16 | // This class implements a complete memory allocator by using two | ||||||||
17 | // internal allocators: | ||||||||
18 | // PrimaryAllocator is efficient, but may not allocate some sizes (alignments). | ||||||||
19 | // When allocating 2^x bytes it should return 2^x aligned chunk. | ||||||||
20 | // PrimaryAllocator is used via a local AllocatorCache. | ||||||||
21 | // SecondaryAllocator can allocate anything, but is not efficient. | ||||||||
22 | template <class PrimaryAllocator, | ||||||||
23 | class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray> | ||||||||
24 | class CombinedAllocator { | ||||||||
25 | public: | ||||||||
26 | using AllocatorCache = typename PrimaryAllocator::AllocatorCache; | ||||||||
27 | using SecondaryAllocator = | ||||||||
28 | LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback, | ||||||||
29 | LargeMmapAllocatorPtrArray, | ||||||||
30 | typename PrimaryAllocator::AddressSpaceView>; | ||||||||
31 | |||||||||
32 | void InitLinkerInitialized(s32 release_to_os_interval_ms) { | ||||||||
33 | stats_.InitLinkerInitialized(); | ||||||||
34 | primary_.Init(release_to_os_interval_ms); | ||||||||
35 | secondary_.InitLinkerInitialized(); | ||||||||
36 | } | ||||||||
37 | |||||||||
38 | void Init(s32 release_to_os_interval_ms) { | ||||||||
39 | stats_.Init(); | ||||||||
40 | primary_.Init(release_to_os_interval_ms); | ||||||||
41 | secondary_.Init(); | ||||||||
42 | } | ||||||||
43 | |||||||||
44 | void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { | ||||||||
45 | // Returning 0 on malloc(0) may break a lot of code. | ||||||||
46 | if (size == 0) | ||||||||
47 | size = 1; | ||||||||
48 | if (size + alignment < size) { | ||||||||
49 | Report("WARNING: %s: CombinedAllocator allocation overflow: " | ||||||||
50 | "0x%zx bytes with 0x%zx alignment requested\n", | ||||||||
51 | SanitizerToolName, size, alignment); | ||||||||
52 | return nullptr; | ||||||||
53 | } | ||||||||
54 | uptr original_size = size; | ||||||||
55 | // If alignment requirements are to be fulfilled by the frontend allocator | ||||||||
56 | // rather than by the primary or secondary, passing an alignment lower than | ||||||||
57 | // or equal to 8 will prevent any further rounding up, as well as the later | ||||||||
58 | // alignment check. | ||||||||
59 | if (alignment
| ||||||||
60 | size = RoundUpTo(size, alignment); | ||||||||
61 | // The primary allocator should return a 2^x aligned allocation when | ||||||||
62 | // requested 2^x bytes, hence using the rounded up 'size' when being | ||||||||
63 | // serviced by the primary (this is no longer true when the primary is | ||||||||
64 | // using a non-fixed base address). The secondary takes care of the | ||||||||
65 | // alignment without such requirement, and allocating 'size' would use | ||||||||
66 | // extraneous memory, so we employ 'original_size'. | ||||||||
67 | void *res; | ||||||||
68 | if (primary_.CanAllocate(size, alignment)) | ||||||||
69 | res = cache->Allocate(&primary_, primary_.ClassID(size)); | ||||||||
70 | else | ||||||||
71 | res = secondary_.Allocate(&stats_, original_size, alignment); | ||||||||
72 | if (alignment
| ||||||||
73 | CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast <uptr>(res) & (alignment - 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2 )), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h" , 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); | ||||||||
74 | return res; | ||||||||
75 | } | ||||||||
76 | |||||||||
77 | s32 ReleaseToOSIntervalMs() const { | ||||||||
78 | return primary_.ReleaseToOSIntervalMs(); | ||||||||
79 | } | ||||||||
80 | |||||||||
81 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { | ||||||||
82 | primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); | ||||||||
83 | } | ||||||||
84 | |||||||||
85 | void ForceReleaseToOS() { | ||||||||
86 | primary_.ForceReleaseToOS(); | ||||||||
87 | } | ||||||||
88 | |||||||||
89 | void Deallocate(AllocatorCache *cache, void *p) { | ||||||||
90 | if (!p) return; | ||||||||
91 | if (primary_.PointerIsMine(p)) | ||||||||
92 | cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); | ||||||||
93 | else | ||||||||
94 | secondary_.Deallocate(&stats_, p); | ||||||||
95 | } | ||||||||
96 | |||||||||
97 | void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, | ||||||||
98 | uptr alignment) { | ||||||||
99 | if (!p) | ||||||||
100 | return Allocate(cache, new_size, alignment); | ||||||||
101 | if (!new_size) { | ||||||||
102 | Deallocate(cache, p); | ||||||||
103 | return nullptr; | ||||||||
104 | } | ||||||||
105 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h" , 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); | ||||||||
106 | uptr old_size = GetActuallyAllocatedSize(p); | ||||||||
107 | uptr memcpy_size = Min(new_size, old_size); | ||||||||
108 | void *new_p = Allocate(cache, new_size, alignment); | ||||||||
109 | if (new_p) | ||||||||
110 | internal_memcpy(new_p, p, memcpy_size); | ||||||||
111 | Deallocate(cache, p); | ||||||||
112 | return new_p; | ||||||||
113 | } | ||||||||
114 | |||||||||
115 | bool PointerIsMine(void *p) { | ||||||||
116 | if (primary_.PointerIsMine(p)) | ||||||||
117 | return true; | ||||||||
118 | return secondary_.PointerIsMine(p); | ||||||||
119 | } | ||||||||
120 | |||||||||
121 | bool FromPrimary(void *p) { | ||||||||
122 | return primary_.PointerIsMine(p); | ||||||||
123 | } | ||||||||
124 | |||||||||
125 | void *GetMetaData(const void *p) { | ||||||||
126 | if (primary_.PointerIsMine(p)) | ||||||||
127 | return primary_.GetMetaData(p); | ||||||||
128 | return secondary_.GetMetaData(p); | ||||||||
129 | } | ||||||||
130 | |||||||||
131 | void *GetBlockBegin(const void *p) { | ||||||||
132 | if (primary_.PointerIsMine(p)) | ||||||||
133 | return primary_.GetBlockBegin(p); | ||||||||
134 | return secondary_.GetBlockBegin(p); | ||||||||
135 | } | ||||||||
136 | |||||||||
137 | // This function does the same as GetBlockBegin, but is much faster. | ||||||||
138 | // Must be called with the allocator locked. | ||||||||
139 | void *GetBlockBeginFastLocked(void *p) { | ||||||||
140 | if (primary_.PointerIsMine(p)) | ||||||||
141 | return primary_.GetBlockBegin(p); | ||||||||
142 | return secondary_.GetBlockBeginFastLocked(p); | ||||||||
143 | } | ||||||||
144 | |||||||||
145 | uptr GetActuallyAllocatedSize(void *p) { | ||||||||
146 | if (primary_.PointerIsMine(p)) | ||||||||
147 | return primary_.GetActuallyAllocatedSize(p); | ||||||||
148 | return secondary_.GetActuallyAllocatedSize(p); | ||||||||
149 | } | ||||||||
150 | |||||||||
151 | uptr TotalMemoryUsed() { | ||||||||
152 | return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); | ||||||||
153 | } | ||||||||
154 | |||||||||
155 | void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } | ||||||||
156 | |||||||||
157 | void InitCache(AllocatorCache *cache) { | ||||||||
158 | cache->Init(&stats_); | ||||||||
159 | } | ||||||||
160 | |||||||||
161 | void DestroyCache(AllocatorCache *cache) { | ||||||||
162 | cache->Destroy(&primary_, &stats_); | ||||||||
163 | } | ||||||||
164 | |||||||||
165 | void SwallowCache(AllocatorCache *cache) { | ||||||||
166 | cache->Drain(&primary_); | ||||||||
167 | } | ||||||||
168 | |||||||||
169 | void GetStats(AllocatorStatCounters s) const { | ||||||||
170 | stats_.Get(s); | ||||||||
171 | } | ||||||||
172 | |||||||||
173 | void PrintStats() { | ||||||||
174 | primary_.PrintStats(); | ||||||||
175 | secondary_.PrintStats(); | ||||||||
176 | } | ||||||||
177 | |||||||||
178 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | ||||||||
179 | // introspection API. | ||||||||
180 | void ForceLock() { | ||||||||
181 | primary_.ForceLock(); | ||||||||
182 | secondary_.ForceLock(); | ||||||||
183 | } | ||||||||
184 | |||||||||
185 | void ForceUnlock() { | ||||||||
186 | secondary_.ForceUnlock(); | ||||||||
187 | primary_.ForceUnlock(); | ||||||||
188 | } | ||||||||
189 | |||||||||
190 | // Iterate over all existing chunks. | ||||||||
191 | // The allocator must be locked when calling this function. | ||||||||
192 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | ||||||||
193 | primary_.ForEachChunk(callback, arg); | ||||||||
194 | secondary_.ForEachChunk(callback, arg); | ||||||||
195 | } | ||||||||
196 | |||||||||
197 | private: | ||||||||
198 | PrimaryAllocator primary_; | ||||||||
199 | SecondaryAllocator secondary_; | ||||||||
200 | AllocatorGlobalStats stats_; | ||||||||
201 | }; |
1 | //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Part of the Sanitizer Allocator. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #ifndef SANITIZER_ALLOCATOR_H |
13 | #error This file must be included inside sanitizer_allocator.h |
14 | #endif |
15 | |
16 | template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache; |
17 | |
18 | // SizeClassAllocator32 -- allocator for 32-bit address space. |
19 | // This allocator can theoretically be used on 64-bit arch, but there it is less |
20 | // efficient than SizeClassAllocator64. |
21 | // |
22 | // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can |
23 | // be returned by MmapOrDie(). |
24 | // |
25 | // Region: |
26 | // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize, |
27 | // kRegionSize). |
28 | // Since the regions are aligned by kRegionSize, there are exactly |
29 | // kNumPossibleRegions possible regions in the address space and so we keep |
30 | // a ByteMap possible_regions to store the size classes of each Region. |
31 | // 0 size class means the region is not used by the allocator. |
32 | // |
33 | // One Region is used to allocate chunks of a single size class. |
34 | // A Region looks like this: |
35 | // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1 |
36 | // |
37 | // In order to avoid false sharing the objects of this class should be |
38 | // chache-line aligned. |
39 | |
40 | struct SizeClassAllocator32FlagMasks { // Bit masks. |
41 | enum { |
42 | kRandomShuffleChunks = 1, |
43 | kUseSeparateSizeClassForBatch = 2, |
44 | }; |
45 | }; |
46 | |
47 | template <class Params> |
48 | class SizeClassAllocator32 { |
49 | private: |
50 | static const u64 kTwoLevelByteMapSize1 = |
51 | (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12; |
52 | static const u64 kMinFirstMapSizeTwoLevelByteMap = 4; |
53 | |
54 | public: |
55 | using AddressSpaceView = typename Params::AddressSpaceView; |
56 | static const uptr kSpaceBeg = Params::kSpaceBeg; |
57 | static const u64 kSpaceSize = Params::kSpaceSize; |
58 | static const uptr kMetadataSize = Params::kMetadataSize; |
59 | typedef typename Params::SizeClassMap SizeClassMap; |
60 | static const uptr kRegionSizeLog = Params::kRegionSizeLog; |
61 | typedef typename Params::MapUnmapCallback MapUnmapCallback; |
62 | using ByteMap = typename conditional< |
63 | (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap), |
64 | FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog), |
65 | AddressSpaceView>, |
66 | TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type; |
67 | |
68 | COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||typedef char assertion_failed__69[2*(int)(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0)-1] |
69 | (kSpaceSize & (kSpaceSize - 1)) == 0)typedef char assertion_failed__69[2*(int)(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0)-1]; |
70 | |
71 | static const bool kRandomShuffleChunks = Params::kFlags & |
72 | SizeClassAllocator32FlagMasks::kRandomShuffleChunks; |
73 | static const bool kUseSeparateSizeClassForBatch = Params::kFlags & |
74 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
75 | |
76 | struct TransferBatch { |
77 | static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; |
78 | void SetFromArray(void *batch[], uptr count) { |
79 | DCHECK_LE(count, kMaxNumCached); |
80 | count_ = count; |
81 | for (uptr i = 0; i < count; i++) |
82 | batch_[i] = batch[i]; |
83 | } |
84 | uptr Count() const { return count_; } |
85 | void Clear() { count_ = 0; } |
86 | void Add(void *ptr) { |
87 | batch_[count_++] = ptr; |
88 | DCHECK_LE(count_, kMaxNumCached); |
89 | } |
90 | void CopyToArray(void *to_batch[]) const { |
91 | for (uptr i = 0, n = Count(); i < n; i++) |
92 | to_batch[i] = batch_[i]; |
93 | } |
94 | |
95 | // How much memory do we need for a batch containing n elements. |
96 | static uptr AllocationSizeRequiredForNElements(uptr n) { |
97 | return sizeof(uptr) * 2 + sizeof(void *) * n; |
98 | } |
99 | static uptr MaxCached(uptr size) { |
100 | return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size)); |
101 | } |
102 | |
103 | TransferBatch *next; |
104 | |
105 | private: |
106 | uptr count_; |
107 | void *batch_[kMaxNumCached]; |
108 | }; |
109 | |
110 | static const uptr kBatchSize = sizeof(TransferBatch); |
111 | COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)typedef char assertion_failed__111[2*(int)((kBatchSize & ( kBatchSize - 1)) == 0)-1]; |
112 | COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))typedef char assertion_failed__112[2*(int)(kBatchSize == SizeClassMap ::kMaxNumCachedHint * sizeof(uptr))-1]; |
113 | |
114 | static uptr ClassIdToSize(uptr class_id) { |
115 | return (class_id == SizeClassMap::kBatchClassID) ? |
116 | kBatchSize : SizeClassMap::Size(class_id); |
117 | } |
118 | |
119 | typedef SizeClassAllocator32<Params> ThisT; |
120 | typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache; |
121 | |
122 | void Init(s32 release_to_os_interval_ms) { |
123 | possible_regions.Init(); |
124 | internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); |
125 | } |
126 | |
127 | s32 ReleaseToOSIntervalMs() const { |
128 | return kReleaseToOSIntervalNever; |
129 | } |
130 | |
131 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { |
132 | // This is empty here. Currently only implemented in 64-bit allocator. |
133 | } |
134 | |
135 | void ForceReleaseToOS() { |
136 | // Currently implemented in 64-bit allocator only. |
137 | } |
138 | |
139 | void *MapWithCallback(uptr size) { |
140 | void *res = MmapOrDie(size, PrimaryAllocatorName); |
141 | MapUnmapCallback().OnMap((uptr)res, size); |
142 | return res; |
143 | } |
144 | |
145 | void UnmapWithCallback(uptr beg, uptr size) { |
146 | MapUnmapCallback().OnUnmap(beg, size); |
147 | UnmapOrDie(reinterpret_cast<void *>(beg), size); |
148 | } |
149 | |
150 | static bool CanAllocate(uptr size, uptr alignment) { |
151 | return size <= SizeClassMap::kMaxSize && |
152 | alignment <= SizeClassMap::kMaxSize; |
153 | } |
154 | |
155 | void *GetMetaData(const void *p) { |
156 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 156, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
157 | uptr mem = reinterpret_cast<uptr>(p); |
158 | uptr beg = ComputeRegionBeg(mem); |
159 | uptr size = ClassIdToSize(GetSizeClass(p)); |
160 | u32 offset = mem - beg; |
161 | uptr n = offset / (u32)size; // 32-bit division |
162 | uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; |
163 | return reinterpret_cast<void*>(meta); |
164 | } |
165 | |
166 | NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, |
167 | uptr class_id) { |
168 | DCHECK_LT(class_id, kNumClasses); |
169 | SizeClassInfo *sci = GetSizeClassInfo(class_id); |
170 | SpinMutexLock l(&sci->mutex); |
171 | if (sci->free_list.empty()) { |
172 | if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id) ), 0)) |
173 | return nullptr; |
174 | DCHECK(!sci->free_list.empty()); |
175 | } |
176 | TransferBatch *b = sci->free_list.front(); |
177 | sci->free_list.pop_front(); |
178 | return b; |
179 | } |
180 | |
181 | NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id, |
182 | TransferBatch *b) { |
183 | DCHECK_LT(class_id, kNumClasses); |
184 | CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count()) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 184, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
185 | SizeClassInfo *sci = GetSizeClassInfo(class_id); |
186 | SpinMutexLock l(&sci->mutex); |
187 | sci->free_list.push_front(b); |
188 | } |
189 | |
190 | bool PointerIsMine(const void *p) { |
191 | uptr mem = reinterpret_cast<uptr>(p); |
192 | if (SANITIZER_SIGN_EXTENDED_ADDRESSES0) |
193 | mem &= (kSpaceSize - 1); |
194 | if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) |
195 | return false; |
196 | return GetSizeClass(p) != 0; |
197 | } |
198 | |
199 | uptr GetSizeClass(const void *p) { |
200 | return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; |
201 | } |
202 | |
203 | void *GetBlockBegin(const void *p) { |
204 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 204, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
205 | uptr mem = reinterpret_cast<uptr>(p); |
206 | uptr beg = ComputeRegionBeg(mem); |
207 | uptr size = ClassIdToSize(GetSizeClass(p)); |
208 | u32 offset = mem - beg; |
209 | u32 n = offset / (u32)size; // 32-bit division |
210 | uptr res = beg + (n * (u32)size); |
211 | return reinterpret_cast<void*>(res); |
212 | } |
213 | |
214 | uptr GetActuallyAllocatedSize(void *p) { |
215 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 215, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
216 | return ClassIdToSize(GetSizeClass(p)); |
217 | } |
218 | |
219 | static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } |
220 | |
221 | uptr TotalMemoryUsed() { |
222 | // No need to lock here. |
223 | uptr res = 0; |
224 | for (uptr i = 0; i < kNumPossibleRegions; i++) |
225 | if (possible_regions[i]) |
226 | res += kRegionSize; |
227 | return res; |
228 | } |
229 | |
230 | void TestOnlyUnmap() { |
231 | for (uptr i = 0; i < kNumPossibleRegions; i++) |
232 | if (possible_regions[i]) |
233 | UnmapWithCallback((i * kRegionSize), kRegionSize); |
234 | } |
235 | |
236 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone |
237 | // introspection API. |
238 | void ForceLock() { |
239 | for (uptr i = 0; i < kNumClasses; i++) { |
240 | GetSizeClassInfo(i)->mutex.Lock(); |
241 | } |
242 | } |
243 | |
244 | void ForceUnlock() { |
245 | for (int i = kNumClasses - 1; i >= 0; i--) { |
246 | GetSizeClassInfo(i)->mutex.Unlock(); |
247 | } |
248 | } |
249 | |
250 | // Iterate over all existing chunks. |
251 | // The allocator must be locked when calling this function. |
252 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
253 | for (uptr region = 0; region < kNumPossibleRegions; region++) |
254 | if (possible_regions[region]) { |
255 | uptr chunk_size = ClassIdToSize(possible_regions[region]); |
256 | uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); |
257 | uptr region_beg = region * kRegionSize; |
258 | for (uptr chunk = region_beg; |
259 | chunk < region_beg + max_chunks_in_region * chunk_size; |
260 | chunk += chunk_size) { |
261 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); |
262 | callback(chunk, arg); |
263 | } |
264 | } |
265 | } |
266 | |
267 | void PrintStats() {} |
268 | |
269 | static uptr AdditionalSize() { return 0; } |
270 | |
271 | typedef SizeClassMap SizeClassMapT; |
272 | static const uptr kNumClasses = SizeClassMap::kNumClasses; |
273 | |
274 | private: |
275 | static const uptr kRegionSize = 1 << kRegionSizeLog; |
276 | static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; |
277 | |
278 | struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo { |
279 | StaticSpinMutex mutex; |
280 | IntrusiveList<TransferBatch> free_list; |
281 | u32 rand_state; |
282 | }; |
283 | COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)typedef char assertion_failed__283[2*(int)(sizeof(SizeClassInfo ) % kCacheLineSize == 0)-1]; |
284 | |
285 | uptr ComputeRegionId(uptr mem) const { |
286 | if (SANITIZER_SIGN_EXTENDED_ADDRESSES0) |
287 | mem &= (kSpaceSize - 1); |
288 | const uptr res = mem >> kRegionSizeLog; |
289 | CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer ::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 289, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")" , v1, v2); } while (false); |
290 | return res; |
291 | } |
292 | |
293 | uptr ComputeRegionBeg(uptr mem) { |
294 | return mem & ~(kRegionSize - 1); |
295 | } |
296 | |
297 | uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { |
298 | DCHECK_LT(class_id, kNumClasses); |
299 | const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( |
300 | kRegionSize, kRegionSize, PrimaryAllocatorName)); |
301 | if (UNLIKELY(!res)__builtin_expect(!!(!res), 0)) |
302 | return 0; |
303 | MapUnmapCallback().OnMap(res, kRegionSize); |
304 | stat->Add(AllocatorStatMapped, kRegionSize); |
305 | CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res, kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 305, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
306 | possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); |
307 | return res; |
308 | } |
309 | |
310 | SizeClassInfo *GetSizeClassInfo(uptr class_id) { |
311 | DCHECK_LT(class_id, kNumClasses); |
312 | return &size_class_info_array[class_id]; |
313 | } |
314 | |
315 | bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id, |
316 | TransferBatch **current_batch, uptr max_count, |
317 | uptr *pointers_array, uptr count) { |
318 | // If using a separate class for batches, we do not need to shuffle it. |
319 | if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch || |
320 | class_id != SizeClassMap::kBatchClassID)) |
321 | RandomShuffle(pointers_array, count, &sci->rand_state); |
322 | TransferBatch *b = *current_batch; |
323 | for (uptr i = 0; i < count; i++) { |
324 | if (!b) { |
325 | b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]); |
326 | if (UNLIKELY(!b)__builtin_expect(!!(!b), 0)) |
327 | return false; |
328 | b->Clear(); |
329 | } |
330 | b->Add((void*)pointers_array[i]); |
331 | if (b->Count() == max_count) { |
332 | sci->free_list.push_back(b); |
333 | b = nullptr; |
334 | } |
335 | } |
336 | *current_batch = b; |
337 | return true; |
338 | } |
339 | |
340 | bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, |
341 | SizeClassInfo *sci, uptr class_id) { |
342 | const uptr region = AllocateRegion(stat, class_id); |
343 | if (UNLIKELY(!region)__builtin_expect(!!(!region), 0)) |
344 | return false; |
345 | if (kRandomShuffleChunks) |
346 | if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0)) |
347 | // The random state is initialized from ASLR (PIE) and time. |
348 | sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime(); |
349 | const uptr size = ClassIdToSize(class_id); |
350 | const uptr n_chunks = kRegionSize / (size + kMetadataSize); |
351 | const uptr max_count = TransferBatch::MaxCached(size); |
352 | DCHECK_GT(max_count, 0); |
353 | TransferBatch *b = nullptr; |
354 | constexpr uptr kShuffleArraySize = 48; |
355 | uptr shuffle_array[kShuffleArraySize]; |
356 | uptr count = 0; |
357 | for (uptr i = region; i < region + n_chunks * size; i += size) { |
358 | shuffle_array[count++] = i; |
359 | if (count == kShuffleArraySize) { |
360 | if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0) |
361 | shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0)) |
362 | return false; |
363 | count = 0; |
364 | } |
365 | } |
366 | if (count) { |
367 | if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0) |
368 | shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0)) |
369 | return false; |
370 | } |
371 | if (b) { |
372 | CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count()) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h" , 372, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
373 | sci->free_list.push_back(b); |
374 | } |
375 | return true; |
376 | } |
377 | |
378 | ByteMap possible_regions; |
379 | SizeClassInfo size_class_info_array[kNumClasses]; |
380 | }; |