Bug Summary

File:compiler-rt/lib/asan/asan_allocator.cpp
Warning:line 510, column 46
Array access (from variable 'alloc_beg') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name asan_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D ASAN_DYNAMIC=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/asan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -ftls-model=initial-exec -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp

1//===-- asan_allocator.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Implementation of ASan's memory allocator, 2-nd version.
12// This variant uses the allocator from sanitizer_common, i.e. the one shared
13// with ThreadSanitizer and MemorySanitizer.
14//
15//===----------------------------------------------------------------------===//
16
17#include "asan_allocator.h"
18#include "asan_mapping.h"
19#include "asan_poisoning.h"
20#include "asan_report.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "sanitizer_common/sanitizer_allocator_checks.h"
24#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_errno.h"
26#include "sanitizer_common/sanitizer_flags.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29#include "sanitizer_common/sanitizer_stackdepot.h"
30#include "sanitizer_common/sanitizer_quarantine.h"
31#include "lsan/lsan_common.h"
32
33namespace __asan {
34
35// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
36// We use adaptive redzones: for larger allocation larger redzones are used.
37static u32 RZLog2Size(u32 rz_log) {
38 CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(!
(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 38, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while
(false)
;
39 return 16 << rz_log;
40}
41
42static u32 RZSize2Log(u32 rz_size) {
43 CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!(
!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 43, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); }
while (false)
;
44 CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(!
!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 44, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2)
; } while (false)
;
45 CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 45, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1
, v2); } while (false)
;
46 u32 res = Log2(rz_size) - 4;
47 CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 47, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")",
v1, v2); } while (false)
;
48 return res;
49}
50
51static AsanAllocator &get_allocator();
52
53// The memory chunk allocated from the underlying allocator looks like this:
54// L L L L L L H H U U U U U U R R
55// L -- left redzone words (0 or more bytes)
56// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57// U -- user memory.
58// R -- right redzone (0 or more bytes)
59// ChunkBase consists of ChunkHeader and other bytes that overlap with user
60// memory.
61
62// If the left redzone is greater than the ChunkHeader size we store a magic
63// value in the first uptr word of the memory block and store the address of
64// ChunkBase in the next uptr.
65// M B L L L L L L L L L H H U U U U U U
66// | ^
67// ---------------------|
68// M -- magic value kAllocBegMagic
69// B -- address of ChunkHeader pointing to the first 'H'
70static const uptr kAllocBegMagic = 0xCC6E96B9;
71
72struct ChunkHeader {
73 // 1-st 8 bytes.
74 u32 chunk_state : 8; // Must be first.
75 u32 alloc_tid : 24;
76
77 u32 free_tid : 24;
78 u32 from_memalign : 1;
79 u32 alloc_type : 2;
80 u32 rz_log : 3;
81 u32 lsan_tag : 2;
82 // 2-nd 8 bytes
83 // This field is used for small sizes. For large sizes it is equal to
84 // SizeClassMap::kMaxSize and the actual size is stored in the
85 // SecondaryAllocator's metadata.
86 u32 user_requested_size : 29;
87 // align < 8 -> 0
88 // else -> log2(min(align, 512)) - 2
89 u32 user_requested_alignment_log : 3;
90 u32 alloc_context_id;
91};
92
93struct ChunkBase : ChunkHeader {
94 // Header2, intersects with user memory.
95 u32 free_context_id;
96};
97
98static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
99static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
100COMPILER_CHECK(kChunkHeaderSize == 16)typedef char assertion_failed__100[2*(int)(kChunkHeaderSize ==
16)-1]
;
101COMPILER_CHECK(kChunkHeader2Size <= 16)typedef char assertion_failed__101[2*(int)(kChunkHeader2Size <=
16)-1]
;
102
103// Every chunk of memory allocated by this allocator can be in one of 3 states:
104// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107enum {
108 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
109 CHUNK_ALLOCATED = 2,
110 CHUNK_QUARANTINE = 3
111};
112
113struct AsanChunk: ChunkBase {
114 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
115 uptr UsedSize(bool locked_version = false) {
116 if (user_requested_size != SizeClassMap::kMaxSize)
117 return user_requested_size;
118 return *reinterpret_cast<uptr *>(
119 get_allocator().GetMetaData(AllocBeg(locked_version)));
120 }
121 void *AllocBeg(bool locked_version = false) {
122 if (from_memalign) {
123 if (locked_version)
124 return get_allocator().GetBlockBeginFastLocked(
125 reinterpret_cast<void *>(this));
126 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
127 }
128 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
129 }
130 bool AddrIsInside(uptr addr, bool locked_version = false) {
131 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
132 }
133};
134
135struct QuarantineCallback {
136 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
137 : cache_(cache),
138 stack_(stack) {
139 }
140
141 void Recycle(AsanChunk *m) {
142 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 142, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
143 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
144 CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 144, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
145 CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 145, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
146 PoisonShadow(m->Beg(),
147 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
148 kAsanHeapLeftRedzoneMagic);
149 void *p = reinterpret_cast<void *>(m->AllocBeg());
150 if (p != m) {
151 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
152 CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)
); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 152, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)"
")", v1, v2); } while (false)
;
153 // Clear the magic value, as allocator internals may overwrite the
154 // contents of deallocated chunk, confusing GetAsanChunk lookup.
155 alloc_magic[0] = 0;
156 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast
<uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 156, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))"
")", v1, v2); } while (false)
;
157 }
158
159 // Statistics.
160 AsanStats &thread_stats = GetCurrentThreadStats();
161 thread_stats.real_frees++;
162 thread_stats.really_freed += m->UsedSize();
163
164 get_allocator().Deallocate(cache_, p);
165 }
166
167 void *Allocate(uptr size) {
168 void *res = get_allocator().Allocate(cache_, size, 1);
169 // TODO(alekseys): Consider making quarantine OOM-friendly.
170 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
171 ReportOutOfMemory(size, stack_);
172 return res;
173 }
174
175 void Deallocate(void *p) {
176 get_allocator().Deallocate(cache_, p);
177 }
178
179 private:
180 AllocatorCache* const cache_;
181 BufferedStackTrace* const stack_;
182};
183
184typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
185typedef AsanQuarantine::Cache QuarantineCache;
186
187void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
188 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
189 // Statistics.
190 AsanStats &thread_stats = GetCurrentThreadStats();
191 thread_stats.mmaps++;
192 thread_stats.mmaped += size;
193}
194void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
195 PoisonShadow(p, size, 0);
196 // We are about to unmap a chunk of user memory.
197 // Mark the corresponding shadow memory as not needed.
198 FlushUnneededASanShadowMemory(p, size);
199 // Statistics.
200 AsanStats &thread_stats = GetCurrentThreadStats();
201 thread_stats.munmaps++;
202 thread_stats.munmaped += size;
203}
204
205// We can not use THREADLOCAL because it is not supported on some of the
206// platforms we care about (OSX 10.6, Android).
207// static THREADLOCAL AllocatorCache cache;
208AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
209 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 209, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
210 return &ms->allocator_cache;
211}
212
213QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
214 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 214, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
215 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache
))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms->
quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 215, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))"
")", v1, v2); } while (false)
;
216 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
217}
218
219void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
220 quarantine_size_mb = f->quarantine_size_mb;
221 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
222 min_redzone = f->redzone;
223 max_redzone = f->max_redzone;
224 may_return_null = cf->allocator_may_return_null;
225 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
226 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
227}
228
229void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
230 f->quarantine_size_mb = quarantine_size_mb;
231 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
232 f->redzone = min_redzone;
233 f->max_redzone = max_redzone;
234 cf->allocator_may_return_null = may_return_null;
235 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
236 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
237}
238
239struct Allocator {
240 static const uptr kMaxAllowedMallocSize =
241 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(1ULL << 40);
242
243 AsanAllocator allocator;
244 AsanQuarantine quarantine;
245 StaticSpinMutex fallback_mutex;
246 AllocatorCache fallback_allocator_cache;
247 QuarantineCache fallback_quarantine_cache;
248
249 uptr max_user_defined_malloc_size;
250 atomic_uint8_t rss_limit_exceeded;
251
252 // ------------------- Options --------------------------
253 atomic_uint16_t min_redzone;
254 atomic_uint16_t max_redzone;
255 atomic_uint8_t alloc_dealloc_mismatch;
256
257 // ------------------- Initialization ------------------------
258 explicit Allocator(LinkerInitialized)
259 : quarantine(LINKER_INITIALIZED),
260 fallback_quarantine_cache(LINKER_INITIALIZED) {}
261
262 void CheckOptions(const AllocatorOptions &options) const {
263 CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 263, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")"
, v1, v2); } while (false)
;
264 CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone
)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 264, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)"
")", v1, v2); } while (false)
;
265 CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 265, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)"
")", v1, v2); } while (false)
;
266 CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 266, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
267 CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 267, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
268 }
269
270 void SharedInitCode(const AllocatorOptions &options) {
271 CheckOptions(options);
272 quarantine.Init((uptr)options.quarantine_size_mb << 20,
273 (uptr)options.thread_local_quarantine_size_kb << 10);
274 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
275 memory_order_release);
276 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
277 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
278 }
279
280 void InitLinkerInitialized(const AllocatorOptions &options) {
281 SetAllocatorMayReturnNull(options.may_return_null);
282 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
283 SharedInitCode(options);
284 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
285 ? common_flags()->max_allocation_size_mb
286 << 20
287 : kMaxAllowedMallocSize;
288 }
289
290 bool RssLimitExceeded() {
291 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
4
Calling 'atomic_load<__sanitizer::atomic_uint8_t>'
7
Returning from 'atomic_load<__sanitizer::atomic_uint8_t>'
8
Returning value, which participates in a condition later
292 }
293
294 void SetRssLimitExceeded(bool limit_exceeded) {
295 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
296 }
297
298 void RePoisonChunk(uptr chunk) {
299 // This could be a user-facing chunk (with redzones), or some internal
300 // housekeeping chunk, like TransferBatch. Start by assuming the former.
301 AsanChunk *ac = GetAsanChunk((void *)chunk);
302 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
303 uptr beg = ac->Beg();
304 uptr end = ac->Beg() + ac->UsedSize(true);
305 uptr chunk_end = chunk + allocated_size;
306 if (chunk < beg && beg < end && end <= chunk_end &&
307 ac->chunk_state == CHUNK_ALLOCATED) {
308 // Looks like a valid AsanChunk in use, poison redzones only.
309 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
310 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
311 FastPoisonShadowPartialRightRedzone(
312 end_aligned_down, end - end_aligned_down,
313 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
314 } else {
315 // This is either not an AsanChunk or freed or quarantined AsanChunk.
316 // In either case, poison everything.
317 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
318 }
319 }
320
321 void ReInitialize(const AllocatorOptions &options) {
322 SetAllocatorMayReturnNull(options.may_return_null);
323 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
324 SharedInitCode(options);
325
326 // Poison all existing allocation's redzones.
327 if (CanPoisonMemory()) {
328 allocator.ForceLock();
329 allocator.ForEachChunk(
330 [](uptr chunk, void *alloc) {
331 ((Allocator *)alloc)->RePoisonChunk(chunk);
332 },
333 this);
334 allocator.ForceUnlock();
335 }
336 }
337
338 void GetOptions(AllocatorOptions *options) const {
339 options->quarantine_size_mb = quarantine.GetSize() >> 20;
340 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
341 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
342 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
343 options->may_return_null = AllocatorMayReturnNull();
344 options->alloc_dealloc_mismatch =
345 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
346 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
347 }
348
349 // -------------------- Helper methods. -------------------------
350 uptr ComputeRZLog(uptr user_requested_size) {
351 u32 rz_log =
352 user_requested_size <= 64 - 16 ? 0 :
353 user_requested_size <= 128 - 32 ? 1 :
354 user_requested_size <= 512 - 64 ? 2 :
355 user_requested_size <= 4096 - 128 ? 3 :
356 user_requested_size <= (1 << 14) - 256 ? 4 :
357 user_requested_size <= (1 << 15) - 512 ? 5 :
358 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
359 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
360 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
361 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
362 }
363
364 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
365 if (user_requested_alignment < 8)
366 return 0;
367 if (user_requested_alignment > 512)
368 user_requested_alignment = 512;
369 return Log2(user_requested_alignment) - 2;
370 }
371
372 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
373 if (user_requested_alignment_log == 0)
374 return 0;
375 return 1LL << (user_requested_alignment_log + 2);
376 }
377
378 // We have an address between two chunks, and we want to report just one.
379 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
380 AsanChunk *right_chunk) {
381 // Prefer an allocated chunk over freed chunk and freed chunk
382 // over available chunk.
383 if (left_chunk->chunk_state != right_chunk->chunk_state) {
384 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
385 return left_chunk;
386 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
387 return right_chunk;
388 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
389 return left_chunk;
390 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
391 return right_chunk;
392 }
393 // Same chunk_state: choose based on offset.
394 sptr l_offset = 0, r_offset = 0;
395 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 395, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
396 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 396, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
397 if (l_offset < r_offset)
398 return left_chunk;
399 return right_chunk;
400 }
401
402 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
403 AsanChunk *m = GetAsanChunkByAddr(addr);
404 if (!m) return false;
405 if (m->chunk_state != CHUNK_ALLOCATED) return false;
406 if (m->Beg() != addr) return false;
407 atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
408 memory_order_relaxed);
409 return true;
410 }
411
412 // -------------------- Allocation/Deallocation routines ---------------
413 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
414 AllocType alloc_type, bool can_fill) {
415 if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0))
1
Assuming 'asan_inited' is not equal to 0
2
Taking false branch
416 AsanInitFromRtl();
417 if (RssLimitExceeded()) {
3
Calling 'Allocator::RssLimitExceeded'
9
Returning from 'Allocator::RssLimitExceeded'
10
Assuming the condition is false
11
Taking false branch
418 if (AllocatorMayReturnNull())
419 return nullptr;
420 ReportRssLimitExceeded(stack);
421 }
422 Flags &fl = *flags();
423 CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 423, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
12
Assuming 'v1' is not equal to 'v2'
13
Taking false branch
14
Loop condition is false. Exiting loop
424 const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale);
425 const uptr user_requested_alignment_log =
426 ComputeUserRequestedAlignmentLog(alignment);
427 if (alignment
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
< min_alignment)
15
Taking false branch
428 alignment = min_alignment;
429 if (size == 0) {
16
Assuming 'size' is not equal to 0
17
Taking false branch
430 // We'd be happy to avoid allocating memory for zero-size requests, but
431 // some programs/tests depend on this behavior and assume that malloc
432 // would not return NULL even for zero-size allocations. Moreover, it
433 // looks like operator new should never return NULL, and results of
434 // consecutive "new" calls must be different even if the allocated size
435 // is zero.
436 size = 1;
437 }
438 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 438, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
18
Taking false branch
19
Loop condition is false. Exiting loop
439 uptr rz_log = ComputeRZLog(size);
440 uptr rz_size = RZLog2Size(rz_log);
441 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
442 uptr needed_size = rounded_size + rz_size;
443 if (alignment > min_alignment)
20
Assuming 'alignment' is <= 'min_alignment'
21
Taking false branch
444 needed_size += alignment;
445 bool using_primary_allocator = true;
446 // If we are allocating from the secondary allocator, there will be no
447 // automatic right redzone, so add the right redzone manually.
448 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
22
Taking true branch
449 needed_size += rz_size;
450 using_primary_allocator = false;
451 }
452 CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size
, min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 452, "(" "(IsAligned(needed_size, min_alignment))" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
23
Taking false branch
24
Loop condition is false. Exiting loop
453 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
25
Assuming 'size' is <= 'kMaxAllowedMallocSize'
26
Assuming 'needed_size' is <= 'kMaxAllowedMallocSize'
28
Taking false branch
454 size > max_user_defined_malloc_size) {
27
Assuming 'size' is <= field 'max_user_defined_malloc_size'
455 if (AllocatorMayReturnNull()) {
456 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
457 (void*)size);
458 return nullptr;
459 }
460 uptr malloc_limit =
461 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
462 ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
463 }
464
465 AsanThread *t = GetCurrentThread();
466 void *allocated;
467 if (t) {
29
Assuming 't' is null
30
Taking false branch
468 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
469 allocated = allocator.Allocate(cache, needed_size, 8);
470 } else {
471 SpinMutexLock l(&fallback_mutex);
472 AllocatorCache *cache = &fallback_allocator_cache;
473 allocated = allocator.Allocate(cache, needed_size, 8);
31
Calling 'CombinedAllocator::Allocate'
45
Returning from 'CombinedAllocator::Allocate'
46
Value assigned to 'allocated'
474 }
475 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
47
Assuming 'allocated' is null
48
Taking true branch
476 SetAllocatorOutOfMemory();
477 if (AllocatorMayReturnNull())
49
Assuming the condition is false
50
Taking false branch
478 return nullptr;
479 ReportOutOfMemory(size, stack);
480 }
481
482 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShort64bitShadowOffset
))
== 0
&& CanPoisonMemory()) {
51
Assuming the condition is false
483 // Heap poisoning is enabled, but the allocator provides an unpoisoned
484 // chunk. This is possible if CanPoisonMemory() was false for some
485 // time, for example, due to flags()->start_disabled.
486 // Anyway, poison the block before using it for anything else.
487 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
488 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
489 }
490
491 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
52
'alloc_beg' initialized to 0
492 uptr alloc_end = alloc_beg + needed_size;
493 uptr beg_plus_redzone = alloc_beg + rz_size;
494 uptr user_beg = beg_plus_redzone;
495 if (!IsAligned(user_beg, alignment))
53
Taking false branch
496 user_beg = RoundUpTo(user_beg, alignment);
497 uptr user_end = user_beg + size;
498 CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer
::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 498, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1
, v2); } while (false)
;
54
Assuming 'v1' is <= 'v2'
55
Taking false branch
56
Loop condition is false. Exiting loop
499 uptr chunk_beg = user_beg - kChunkHeaderSize;
500 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
501 m->alloc_type = alloc_type;
502 m->rz_log = rz_log;
503 u32 alloc_tid = t
56.1
't' is null
56.1
't' is null
56.1
't' is null
56.1
't' is null
? t->tid() : 0;
57
'?' condition is false
504 m->alloc_tid = alloc_tid;
505 CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 505, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")"
, v1, v2); } while (false)
; // Does alloc_tid fit into the bitfield?
58
Taking false branch
59
Loop condition is false. Exiting loop
506 m->free_tid = kInvalidTid;
507 m->from_memalign = user_beg != beg_plus_redzone;
508 if (alloc_beg != chunk_beg) {
60
Assuming 'alloc_beg' is not equal to 'chunk_beg'
61
Taking true branch
509 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 *
sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 509, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)"
")", v1, v2); } while (false)
;
62
Assuming 'v1' is <= 'v2'
63
Taking false branch
64
Loop condition is false. Exiting loop
510 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
65
Array access (from variable 'alloc_beg') results in a null pointer dereference
511 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
512 }
513 if (using_primary_allocator) {
514 CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 514, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
515 m->user_requested_size = size;
516 CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 516, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
517 } else {
518 CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 518, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
519 m->user_requested_size = SizeClassMap::kMaxSize;
520 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
521 meta[0] = size;
522 meta[1] = chunk_beg;
523 }
524 m->user_requested_alignment_log = user_requested_alignment_log;
525
526 m->alloc_context_id = StackDepotPut(*stack);
527
528 uptr size_rounded_down_to_granularity =
529 RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
530 // Unpoison the bulk of the memory region.
531 if (size_rounded_down_to_granularity)
532 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
533 // Deal with the end of the region if size is not aligned to granularity.
534 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
535 u8 *shadow =
536 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
537 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0;
538 }
539
540 AsanStats &thread_stats = GetCurrentThreadStats();
541 thread_stats.mallocs++;
542 thread_stats.malloced += size;
543 thread_stats.malloced_redzones += needed_size - size;
544 if (needed_size > SizeClassMap::kMaxSize)
545 thread_stats.malloc_large++;
546 else
547 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
548
549 void *res = reinterpret_cast<void *>(user_beg);
550 if (can_fill && fl.max_malloc_fill_size) {
551 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
552 REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size);
553 }
554#if CAN_SANITIZE_LEAKS1
555 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
556 : __lsan::kDirectlyLeaked;
557#endif
558 // Must be the last mutation of metadata in this function.
559 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
560 ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook
(res, size); RunMallocHooks(res, size); } while (false)
;
561 return res;
562 }
563
564 // Set quarantine flag if chunk is allocated, issue ASan error report on
565 // available and quarantined chunks. Return true on success, false otherwise.
566 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
567 BufferedStackTrace *stack) {
568 u8 old_chunk_state = CHUNK_ALLOCATED;
569 // Flip the chunk_state atomically to avoid race on double-free.
570 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
571 CHUNK_QUARANTINE,
572 memory_order_acquire)) {
573 ReportInvalidFree(ptr, old_chunk_state, stack);
574 // It's not safe to push a chunk in quarantine on invalid free.
575 return false;
576 }
577 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED
)); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 577, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)"
")", v1, v2); } while (false)
;
578 return true;
579 }
580
581 // Expects the chunk to already be marked as quarantined by using
582 // AtomicallySetQuarantineFlagIfAllocated.
583 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
584 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 584, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
585 CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 585, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1
, v2); } while (false)
;
586 if (SANITIZER_WORDSIZE64 == 64) // On 32-bits this resides in user area.
587 CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 587, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
588 AsanThread *t = GetCurrentThread();
589 m->free_tid = t ? t->tid() : 0;
590 m->free_context_id = StackDepotPut(*stack);
591
592 Flags &fl = *flags();
593 if (fl.max_free_fill_size > 0) {
594 // We have to skip the chunk header, it contains free_context_id.
595 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
596 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
597 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
598 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
599 REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill);
600 }
601 }
602
603 // Poison the region.
604 PoisonShadow(m->Beg(),
605 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
606 kAsanHeapFreeMagic);
607
608 AsanStats &thread_stats = GetCurrentThreadStats();
609 thread_stats.frees++;
610 thread_stats.freed += m->UsedSize();
611
612 // Push into quarantine.
613 if (t) {
614 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
615 AllocatorCache *ac = GetAllocatorCache(ms);
616 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
617 m->UsedSize());
618 } else {
619 SpinMutexLock l(&fallback_mutex);
620 AllocatorCache *ac = &fallback_allocator_cache;
621 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
622 m, m->UsedSize());
623 }
624 }
625
626 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
627 BufferedStackTrace *stack, AllocType alloc_type) {
628 uptr p = reinterpret_cast<uptr>(ptr);
629 if (p == 0) return;
630
631 uptr chunk_beg = p - kChunkHeaderSize;
632 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
633
634 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
635 // malloc. Don't report an invalid free in this case.
636 if (SANITIZER_WINDOWS0 &&
637 !get_allocator().PointerIsMine(ptr)) {
638 if (!IsSystemHeapAddress(p))
639 ReportFreeNotMalloced(p, stack);
640 return;
641 }
642
643 ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr
); RunFreeHooks(ptr); } while (false)
;
644
645 // Must mark the chunk as quarantined before any changes to its metadata.
646 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
647 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
648
649 if (m->alloc_type != alloc_type) {
650 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
651 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
652 (AllocType)alloc_type);
653 }
654 } else {
655 if (flags()->new_delete_type_mismatch &&
656 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
657 ((delete_size && delete_size != m->UsedSize()) ||
658 ComputeUserRequestedAlignmentLog(delete_alignment) !=
659 m->user_requested_alignment_log)) {
660 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
661 }
662 }
663
664 QuarantineChunk(m, ptr, stack);
665 }
666
667 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
668 CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr &&
new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 668, "(" "(old_ptr && new_size)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
669 uptr p = reinterpret_cast<uptr>(old_ptr);
670 uptr chunk_beg = p - kChunkHeaderSize;
671 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
672
673 AsanStats &thread_stats = GetCurrentThreadStats();
674 thread_stats.reallocs++;
675 thread_stats.realloced += new_size;
676
677 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
678 if (new_ptr) {
679 u8 chunk_state = m->chunk_state;
680 if (chunk_state != CHUNK_ALLOCATED)
681 ReportInvalidFree(old_ptr, chunk_state, stack);
682 CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception
::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr
)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 682, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)"
")", v1, v2); } while (false)
;
683 uptr memcpy_size = Min(new_size, m->UsedSize());
684 // If realloc() races with free(), we may start copying freed memory.
685 // However, we will report racy double-free later anyway.
686 REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size);
687 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
688 }
689 return new_ptr;
690 }
691
692 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
693 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
694 if (AllocatorMayReturnNull())
695 return nullptr;
696 ReportCallocOverflow(nmemb, size, stack);
697 }
698 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
699 // If the memory comes from the secondary allocator no need to clear it
700 // as it comes directly from mmap.
701 if (ptr && allocator.FromPrimary(ptr))
702 REAL(memset)__interception::real_memset(ptr, 0, nmemb * size);
703 return ptr;
704 }
705
706 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
707 if (chunk_state == CHUNK_QUARANTINE)
708 ReportDoubleFree((uptr)ptr, stack);
709 else
710 ReportFreeNotMalloced((uptr)ptr, stack);
711 }
712
713 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
714 AllocatorCache *ac = GetAllocatorCache(ms);
715 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
716 allocator.SwallowCache(ac);
717 }
718
719 // -------------------------- Chunk lookup ----------------------
720
721 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
722 AsanChunk *GetAsanChunk(void *alloc_beg) {
723 if (!alloc_beg) return nullptr;
724 if (!allocator.FromPrimary(alloc_beg)) {
725 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
726 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
727 return m;
728 }
729 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
730 if (alloc_magic[0] == kAllocBegMagic)
731 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
732 return reinterpret_cast<AsanChunk *>(alloc_beg);
733 }
734
735 AsanChunk *GetAsanChunkByAddr(uptr p) {
736 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
737 return GetAsanChunk(alloc_beg);
738 }
739
740 // Allocator must be locked when this function is called.
741 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
742 void *alloc_beg =
743 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
744 return GetAsanChunk(alloc_beg);
745 }
746
747 uptr AllocationSize(uptr p) {
748 AsanChunk *m = GetAsanChunkByAddr(p);
749 if (!m) return 0;
750 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
751 if (m->Beg() != p) return 0;
752 return m->UsedSize();
753 }
754
755 AsanChunkView FindHeapChunkByAddress(uptr addr) {
756 AsanChunk *m1 = GetAsanChunkByAddr(addr);
757 if (!m1) return AsanChunkView(m1);
758 sptr offset = 0;
759 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
760 // The address is in the chunk's left redzone, so maybe it is actually
761 // a right buffer overflow from the other chunk to the left.
762 // Search a bit to the left to see if there is another chunk.
763 AsanChunk *m2 = nullptr;
764 for (uptr l = 1; l < GetPageSizeCached(); l++) {
765 m2 = GetAsanChunkByAddr(addr - l);
766 if (m2 == m1) continue; // Still the same chunk.
767 break;
768 }
769 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
770 m1 = ChooseChunk(addr, m2, m1);
771 }
772 return AsanChunkView(m1);
773 }
774
775 void Purge(BufferedStackTrace *stack) {
776 AsanThread *t = GetCurrentThread();
777 if (t) {
778 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
779 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
780 QuarantineCallback(GetAllocatorCache(ms),
781 stack));
782 }
783 {
784 SpinMutexLock l(&fallback_mutex);
785 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
786 QuarantineCallback(&fallback_allocator_cache,
787 stack));
788 }
789
790 allocator.ForceReleaseToOS();
791 }
792
793 void PrintStats() {
794 allocator.PrintStats();
795 quarantine.PrintStats();
796 }
797
798 void ForceLock() {
799 allocator.ForceLock();
800 fallback_mutex.Lock();
801 }
802
803 void ForceUnlock() {
804 fallback_mutex.Unlock();
805 allocator.ForceUnlock();
806 }
807};
808
809static Allocator instance(LINKER_INITIALIZED);
810
811static AsanAllocator &get_allocator() {
812 return instance.allocator;
813}
814
815bool AsanChunkView::IsValid() const {
816 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
817}
818bool AsanChunkView::IsAllocated() const {
819 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
820}
821bool AsanChunkView::IsQuarantined() const {
822 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
823}
824uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
825uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
826uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
827u32 AsanChunkView::UserRequestedAlignment() const {
828 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
829}
830uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
831uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
832AllocType AsanChunkView::GetAllocType() const {
833 return (AllocType)chunk_->alloc_type;
834}
835
836static StackTrace GetStackTraceFromId(u32 id) {
837 CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 837, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
838 StackTrace res = StackDepotGet(id);
839 CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 839, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
840 return res;
841}
842
843u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
844u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
845
846StackTrace AsanChunkView::GetAllocStack() const {
847 return GetStackTraceFromId(GetAllocStackId());
848}
849
850StackTrace AsanChunkView::GetFreeStack() const {
851 return GetStackTraceFromId(GetFreeStackId());
852}
853
854void InitializeAllocator(const AllocatorOptions &options) {
855 instance.InitLinkerInitialized(options);
856}
857
858void ReInitializeAllocator(const AllocatorOptions &options) {
859 instance.ReInitialize(options);
860}
861
862void GetAllocatorOptions(AllocatorOptions *options) {
863 instance.GetOptions(options);
864}
865
866AsanChunkView FindHeapChunkByAddress(uptr addr) {
867 return instance.FindHeapChunkByAddress(addr);
868}
869AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
870 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
871}
872
873void AsanThreadLocalMallocStorage::CommitBack() {
874 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
875 instance.CommitBack(this, &stack);
876}
877
878void PrintInternalAllocatorStats() {
879 instance.PrintStats();
880}
881
882void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
883 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
884}
885
886void asan_delete(void *ptr, uptr size, uptr alignment,
887 BufferedStackTrace *stack, AllocType alloc_type) {
888 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
889}
890
891void *asan_malloc(uptr size, BufferedStackTrace *stack) {
892 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
893}
894
895void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
896 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
897}
898
899void *asan_reallocarray(void *p, uptr nmemb, uptr size,
900 BufferedStackTrace *stack) {
901 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
902 errno(*__errno_location()) = errno_ENOMEM12;
903 if (AllocatorMayReturnNull())
904 return nullptr;
905 ReportReallocArrayOverflow(nmemb, size, stack);
906 }
907 return asan_realloc(p, nmemb * size, stack);
908}
909
910void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
911 if (!p)
912 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
913 if (size == 0) {
914 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
915 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
916 return nullptr;
917 }
918 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
919 size = 1;
920 }
921 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
922}
923
924void *asan_valloc(uptr size, BufferedStackTrace *stack) {
925 return SetErrnoOnNull(
926 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
927}
928
929void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
930 uptr PageSize = GetPageSizeCached();
931 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
932 errno(*__errno_location()) = errno_ENOMEM12;
933 if (AllocatorMayReturnNull())
934 return nullptr;
935 ReportPvallocOverflow(size, stack);
936 }
937 // pvalloc(0) should allocate one page.
938 size = size ? RoundUpTo(size, PageSize) : PageSize;
939 return SetErrnoOnNull(
940 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
941}
942
943void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
944 AllocType alloc_type) {
945 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
946 errno(*__errno_location()) = errno_EINVAL22;
947 if (AllocatorMayReturnNull())
948 return nullptr;
949 ReportInvalidAllocationAlignment(alignment, stack);
950 }
951 return SetErrnoOnNull(
952 instance.Allocate(size, alignment, stack, alloc_type, true));
953}
954
955void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
956 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
957 errno(*__errno_location()) = errno_EINVAL22;
958 if (AllocatorMayReturnNull())
959 return nullptr;
960 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
961 }
962 return SetErrnoOnNull(
963 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
964}
965
966int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
967 BufferedStackTrace *stack) {
968 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
969 if (AllocatorMayReturnNull())
970 return errno_EINVAL22;
971 ReportInvalidPosixMemalignAlignment(alignment, stack);
972 }
973 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
974 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
975 // OOM error is already taken care of by Allocate.
976 return errno_ENOMEM12;
977 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 977, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
978 *memptr = ptr;
979 return 0;
980}
981
982uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
983 if (!ptr) return 0;
984 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
985 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
986 GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags
()->fast_unwind_on_fatal)
;
987 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
988 }
989 return usable_size;
990}
991
992uptr asan_mz_size(const void *ptr) {
993 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
994}
995
996void asan_mz_force_lock() {
997 instance.ForceLock();
998}
999
1000void asan_mz_force_unlock() {
1001 instance.ForceUnlock();
1002}
1003
1004void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
1005 instance.SetRssLimitExceeded(limit_exceeded);
1006}
1007
1008} // namespace __asan
1009
1010// --- Implementation of LSan-specific functions --- {{{1
1011namespace __lsan {
1012void LockAllocator() {
1013 __asan::get_allocator().ForceLock();
1014}
1015
1016void UnlockAllocator() {
1017 __asan::get_allocator().ForceUnlock();
1018}
1019
1020void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1021 *begin = (uptr)&__asan::get_allocator();
1022 *end = *begin + sizeof(__asan::get_allocator());
1023}
1024
1025uptr PointsIntoChunk(void* p) {
1026 uptr addr = reinterpret_cast<uptr>(p);
1027 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1028 if (!m) return 0;
1029 uptr chunk = m->Beg();
1030 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1031 return 0;
1032 if (m->AddrIsInside(addr, /*locked_version=*/true))
1033 return chunk;
1034 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1035 addr))
1036 return chunk;
1037 return 0;
1038}
1039
1040uptr GetUserBegin(uptr chunk) {
1041 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1042 CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 1042, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
1043 return m->Beg();
1044}
1045
1046LsanMetadata::LsanMetadata(uptr chunk) {
1047 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1048}
1049
1050bool LsanMetadata::allocated() const {
1051 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1052 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1053}
1054
1055ChunkTag LsanMetadata::tag() const {
1056 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1057 return static_cast<ChunkTag>(m->lsan_tag);
1058}
1059
1060void LsanMetadata::set_tag(ChunkTag value) {
1061 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1062 m->lsan_tag = value;
1063}
1064
1065uptr LsanMetadata::requested_size() const {
1066 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1067 return m->UsedSize(/*locked_version=*/true);
1068}
1069
1070u32 LsanMetadata::stack_trace_id() const {
1071 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1072 return m->alloc_context_id;
1073}
1074
1075void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1076 __asan::get_allocator().ForEachChunk(callback, arg);
1077}
1078
1079IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1080 uptr addr = reinterpret_cast<uptr>(p);
1081 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1082 if (!m) return kIgnoreObjectInvalid;
1083 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1084 if (m->lsan_tag == kIgnored)
1085 return kIgnoreObjectAlreadyIgnored;
1086 m->lsan_tag = __lsan::kIgnored;
1087 return kIgnoreObjectSuccess;
1088 } else {
1089 return kIgnoreObjectInvalid;
1090 }
1091}
1092} // namespace __lsan
1093
1094// ---------------------- Interface ---------------- {{{1
1095using namespace __asan;
1096
1097// ASan allocator doesn't reserve extra bytes, so normally we would
1098// just return "size". We don't want to expose our redzone sizes, etc here.
1099uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1100 return size;
1101}
1102
1103int __sanitizer_get_ownership(const void *p) {
1104 uptr ptr = reinterpret_cast<uptr>(p);
1105 return instance.AllocationSize(ptr) > 0;
1106}
1107
1108uptr __sanitizer_get_allocated_size(const void *p) {
1109 if (!p) return 0;
1110 uptr ptr = reinterpret_cast<uptr>(p);
1111 uptr allocated_size = instance.AllocationSize(ptr);
1112 // Die if p is not malloced or if it is already freed.
1113 if (allocated_size == 0) {
1114 GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack
.size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp
= (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer
[0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack
.trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address
(0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer
::uptr) __builtin_frame_address(0), nullptr, common_flags()->
fast_unwind_on_fatal, kStackTraceMax); }
;
1115 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1116 }
1117 return allocated_size;
1118}
1119
1120void __sanitizer_purge_allocator() {
1121 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1122 instance.Purge(&stack);
1123}
1124
1125int __asan_update_allocation_context(void* addr) {
1126 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1127 return instance.UpdateAllocationStack((uptr)addr, &stack);
1128}
1129
1130#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1131// Provide default (no-op) implementation of malloc hooks.
1132SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
1133 void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
{
1134 (void)ptr;
1135 (void)size;
1136}
1137
1138SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *ptr)
{
1139 (void)ptr;
1140}
1141#endif

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_atomic_clang_x86.h

1//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10// Not intended for direct inclusion. Include sanitizer_atomic.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ATOMIC_CLANG_X86_H
15#define SANITIZER_ATOMIC_CLANG_X86_H
16
17namespace __sanitizer {
18
19INLINEinline void proc_yield(int cnt) {
20 __asm__ __volatile__("" ::: "memory");
21 for (int i = 0; i < cnt; i++)
22 __asm__ __volatile__("pause");
23 __asm__ __volatile__("" ::: "memory");
24}
25
26template<typename T>
27INLINEinline typename T::Type atomic_load(
28 const volatile T *a, memory_order mo) {
29 DCHECK(mo & (memory_order_relaxed | memory_order_consume
30 | memory_order_acquire | memory_order_seq_cst));
31 DCHECK(!((uptr)a % sizeof(*a)));
32 typename T::Type v;
33
34 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
35 // Assume that aligned loads are atomic.
36 if (mo
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
== memory_order_relaxed) {
5
Taking true branch
37 v = a->val_dont_use;
38 } else if (mo == memory_order_consume) {
39 // Assume that processor respects data dependencies
40 // (and that compiler won't break them).
41 __asm__ __volatile__("" ::: "memory");
42 v = a->val_dont_use;
43 __asm__ __volatile__("" ::: "memory");
44 } else if (mo == memory_order_acquire) {
45 __asm__ __volatile__("" ::: "memory");
46 v = a->val_dont_use;
47 // On x86 loads are implicitly acquire.
48 __asm__ __volatile__("" ::: "memory");
49 } else { // seq_cst
50 // On x86 plain MOV is enough for seq_cst store.
51 __asm__ __volatile__("" ::: "memory");
52 v = a->val_dont_use;
53 __asm__ __volatile__("" ::: "memory");
54 }
55 } else {
56 // 64-bit load on 32-bit platform.
57 __asm__ __volatile__(
58 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
59 "movq %%mm0, %0;" // (ptr could be read-only)
60 "emms;" // Empty mmx state/Reset FP regs
61 : "=m" (v)
62 : "m" (a->val_dont_use)
63 : // mark the mmx registers as clobbered
64#ifdef __MMX__1
65 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
66#endif // #ifdef __MMX__
67 "memory");
68 }
69 return v;
6
Returning value (loaded from 'v'), which participates in a condition later
70}
71
72template<typename T>
73INLINEinline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
74 DCHECK(mo & (memory_order_relaxed | memory_order_release
75 | memory_order_seq_cst));
76 DCHECK(!((uptr)a % sizeof(*a)));
77
78 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
79 // Assume that aligned loads are atomic.
80 if (mo == memory_order_relaxed) {
81 a->val_dont_use = v;
82 } else if (mo == memory_order_release) {
83 // On x86 stores are implicitly release.
84 __asm__ __volatile__("" ::: "memory");
85 a->val_dont_use = v;
86 __asm__ __volatile__("" ::: "memory");
87 } else { // seq_cst
88 // On x86 stores are implicitly release.
89 __asm__ __volatile__("" ::: "memory");
90 a->val_dont_use = v;
91 __sync_synchronize();
92 }
93 } else {
94 // 64-bit store on 32-bit platform.
95 __asm__ __volatile__(
96 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
97 "movq %%mm0, %0;"
98 "emms;" // Empty mmx state/Reset FP regs
99 : "=m" (a->val_dont_use)
100 : "m" (v)
101 : // mark the mmx registers as clobbered
102#ifdef __MMX__1
103 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
104#endif // #ifdef __MMX__
105 "memory");
106 if (mo == memory_order_seq_cst)
107 __sync_synchronize();
108 }
109}
110
111} // namespace __sanitizer
112
113#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
24class CombinedAllocator {
25 public:
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
32 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
33 stats_.InitLinkerInitialized();
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.InitLinkerInitialized();
36 }
37
38 void Init(s32 release_to_os_interval_ms) {
39 stats_.Init();
40 primary_.Init(release_to_os_interval_ms);
41 secondary_.Init();
42 }
43
44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
32
Assuming 'size' is not equal to 0
33
Taking false branch
47 size = 1;
48 if (size + alignment < size) {
34
Assuming the condition is false
35
Taking false branch
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
59 if (alignment
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
> 8)
36
Taking false branch
60 size = RoundUpTo(size, alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 void *res;
68 if (primary_.CanAllocate(size, alignment))
37
Calling 'SizeClassAllocator64::CanAllocate'
40
Returning from 'SizeClassAllocator64::CanAllocate'
41
Taking false branch
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
71 res = secondary_.Allocate(&stats_, original_size, alignment);
42
Value assigned to 'res'
72 if (alignment
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
> 8)
43
Taking false branch
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
74 return res;
44
Returning pointer (loaded from 'res')
75 }
76
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
79 }
80
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 }
84
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(new_size, old_size);
108 void *new_p = Allocate(cache, new_size, alignment);
109 if (new_p)
110 internal_memcpy(new_p, p, memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(void *p) {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(void *p) {
122 return primary_.PointerIsMine(p);
123 }
124
125 void *GetMetaData(const void *p) {
126 if (primary_.PointerIsMine(p))
127 return primary_.GetMetaData(p);
128 return secondary_.GetMetaData(p);
129 }
130
131 void *GetBlockBegin(const void *p) {
132 if (primary_.PointerIsMine(p))
133 return primary_.GetBlockBegin(p);
134 return secondary_.GetBlockBegin(p);
135 }
136
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p) {
140 if (primary_.PointerIsMine(p))
141 return primary_.GetBlockBegin(p);
142 return secondary_.GetBlockBeginFastLocked(p);
143 }
144
145 uptr GetActuallyAllocatedSize(void *p) {
146 if (primary_.PointerIsMine(p))
147 return primary_.GetActuallyAllocatedSize(p);
148 return secondary_.GetActuallyAllocatedSize(p);
149 }
150
151 uptr TotalMemoryUsed() {
152 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
153 }
154
155 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
156
157 void InitCache(AllocatorCache *cache) {
158 cache->Init(&stats_);
159 }
160
161 void DestroyCache(AllocatorCache *cache) {
162 cache->Destroy(&primary_, &stats_);
163 }
164
165 void SwallowCache(AllocatorCache *cache) {
166 cache->Drain(&primary_);
167 }
168
169 void GetStats(AllocatorStatCounters s) const {
170 stats_.Get(s);
171 }
172
173 void PrintStats() {
174 primary_.PrintStats();
175 secondary_.PrintStats();
176 }
177
178 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
179 // introspection API.
180 void ForceLock() {
181 primary_.ForceLock();
182 secondary_.ForceLock();
183 }
184
185 void ForceUnlock() {
186 secondary_.ForceUnlock();
187 primary_.ForceUnlock();
188 }
189
190 // Iterate over all existing chunks.
191 // The allocator must be locked when calling this function.
192 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
193 primary_.ForEachChunk(callback, arg);
194 secondary_.ForEachChunk(callback, arg);
195 }
196
197 private:
198 PrimaryAllocator primary_;
199 SecondaryAllocator secondary_;
200 AllocatorGlobalStats stats_;
201};

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h

1//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
17
18// SizeClassAllocator64 -- allocator for 64-bit address space.
19// The template parameter Params is a class containing the actual parameters.
20//
21// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
22// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
23// Otherwise SpaceBeg=kSpaceBeg (fixed address).
24// kSpaceSize is a power of two.
25// At the beginning the entire space is mprotect-ed, then small parts of it
26// are mapped on demand.
27//
28// Region: a part of Space dedicated to a single size class.
29// There are kNumClasses Regions of equal size.
30//
31// UserChunk: a piece of memory returned to user.
32// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33
34// FreeArray is an array free-d chunks (stored as 4-byte offsets)
35//
36// A Region looks like this:
37// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38
39struct SizeClassAllocator64FlagMasks { // Bit masks.
40 enum {
41 kRandomShuffleChunks = 1,
42 };
43};
44
45template <class Params>
46class SizeClassAllocator64 {
47 public:
48 using AddressSpaceView = typename Params::AddressSpaceView;
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const uptr kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 typedef typename Params::MapUnmapCallback MapUnmapCallback;
54
55 static const bool kRandomShuffleChunks =
56 Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
57
58 typedef SizeClassAllocator64<Params> ThisT;
59 typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
60
61 // When we know the size class (the region base) we can represent a pointer
62 // as a 4-byte integer (offset from the region start shifted right by 4).
63 typedef u32 CompactPtrT;
64 static const uptr kCompactPtrScale = 4;
65 CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
66 return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
67 }
68 uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
69 return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
70 }
71
72 void Init(s32 release_to_os_interval_ms) {
73 uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
74 if (kUsingConstantSpaceBeg) {
75 CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 76, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
76 PrimaryAllocatorName, kSpaceBeg))do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 76, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
;
77 } else {
78 NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
79 PrimaryAllocatorName);
80 CHECK_NE(NonConstSpaceBeg, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((NonConstSpaceBeg
)); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 80, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")"
, v1, v2); } while (false)
;
81 }
82 SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
84 "SizeClassAllocator: region info");
85 // Check that the RegionInfo array is aligned on the CacheLine size.
86 DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
87 }
88
89 s32 ReleaseToOSIntervalMs() const {
90 return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
91 }
92
93 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
94 atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
95 memory_order_relaxed);
96 }
97
98 void ForceReleaseToOS() {
99 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
100 BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
101 MaybeReleaseToOS(class_id, true /*force*/);
102 }
103 }
104
105 static bool CanAllocate(uptr size, uptr alignment) {
106 return size <= SizeClassMap::kMaxSize &&
38
Assuming 'size' is > 'kMaxSize'
39
Returning zero, which participates in a condition later
107 alignment <= SizeClassMap::kMaxSize;
108 }
109
110 NOINLINE__attribute__((noinline)) void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
111 const CompactPtrT *chunks, uptr n_chunks) {
112 RegionInfo *region = GetRegionInfo(class_id);
113 uptr region_beg = GetRegionBeginBySizeClass(class_id);
114 CompactPtrT *free_array = GetFreeArray(region_beg);
115
116 BlockingMutexLock l(&region->mutex);
117 uptr old_num_chunks = region->num_freed_chunks;
118 uptr new_num_freed_chunks = old_num_chunks + n_chunks;
119 // Failure to allocate free array space while releasing memory is non
120 // recoverable.
121 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
122 new_num_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
) {
123 Report("FATAL: Internal error: %s's allocator exhausted the free list "
124 "space for size class %zd (%zd bytes).\n", SanitizerToolName,
125 class_id, ClassIdToSize(class_id));
126 Die();
127 }
128 for (uptr i = 0; i < n_chunks; i++)
129 free_array[old_num_chunks + i] = chunks[i];
130 region->num_freed_chunks = new_num_freed_chunks;
131 region->stats.n_freed += n_chunks;
132
133 MaybeReleaseToOS(class_id, false /*force*/);
134 }
135
136 NOINLINE__attribute__((noinline)) bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
137 CompactPtrT *chunks, uptr n_chunks) {
138 RegionInfo *region = GetRegionInfo(class_id);
139 uptr region_beg = GetRegionBeginBySizeClass(class_id);
140 CompactPtrT *free_array = GetFreeArray(region_beg);
141
142 BlockingMutexLock l(&region->mutex);
143 if (UNLIKELY(region->num_freed_chunks < n_chunks)__builtin_expect(!!(region->num_freed_chunks < n_chunks
), 0)
) {
144 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
145 n_chunks - region->num_freed_chunks))__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
)
146 return false;
147 CHECK_GE(region->num_freed_chunks, n_chunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->num_freed_chunks
)); __sanitizer::u64 v2 = (__sanitizer::u64)((n_chunks)); if (
__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 147, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)"
")", v1, v2); } while (false)
;
148 }
149 region->num_freed_chunks -= n_chunks;
150 uptr base_idx = region->num_freed_chunks;
151 for (uptr i = 0; i < n_chunks; i++)
152 chunks[i] = free_array[base_idx + i];
153 region->stats.n_allocated += n_chunks;
154 return true;
155 }
156
157 bool PointerIsMine(const void *p) const {
158 uptr P = reinterpret_cast<uptr>(p);
159 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
160 return P / kSpaceSize == kSpaceBeg / kSpaceSize;
161 return P >= SpaceBeg() && P < SpaceEnd();
162 }
163
164 uptr GetRegionBegin(const void *p) {
165 if (kUsingConstantSpaceBeg)
166 return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
167 uptr space_beg = SpaceBeg();
168 return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
169 space_beg;
170 }
171
172 uptr GetRegionBeginBySizeClass(uptr class_id) const {
173 return SpaceBeg() + kRegionSize * class_id;
174 }
175
176 uptr GetSizeClass(const void *p) {
177 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
178 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
179 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
180 kNumClassesRounded;
181 }
182
183 void *GetBlockBegin(const void *p) {
184 uptr class_id = GetSizeClass(p);
185 uptr size = ClassIdToSize(class_id);
186 if (!size) return nullptr;
187 uptr chunk_idx = GetChunkIdx((uptr)p, size);
188 uptr reg_beg = GetRegionBegin(p);
189 uptr beg = chunk_idx * size;
190 uptr next_beg = beg + size;
191 if (class_id >= kNumClasses) return nullptr;
192 const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
193 if (region->mapped_user >= next_beg)
194 return reinterpret_cast<void*>(reg_beg + beg);
195 return nullptr;
196 }
197
198 uptr GetActuallyAllocatedSize(void *p) {
199 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 199, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
200 return ClassIdToSize(GetSizeClass(p));
201 }
202
203 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
204
205 void *GetMetaData(const void *p) {
206 uptr class_id = GetSizeClass(p);
207 uptr size = ClassIdToSize(class_id);
208 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
209 uptr region_beg = GetRegionBeginBySizeClass(class_id);
210 return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
211 (1 + chunk_idx) * kMetadataSize);
212 }
213
214 uptr TotalMemoryUsed() {
215 uptr res = 0;
216 for (uptr i = 0; i < kNumClasses; i++)
217 res += GetRegionInfo(i)->allocated_user;
218 return res;
219 }
220
221 // Test-only.
222 void TestOnlyUnmap() {
223 UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
224 }
225
226 static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
227 uptr stats_size) {
228 for (uptr class_id = 0; class_id < stats_size; class_id++)
229 if (stats[class_id] == start)
230 stats[class_id] = rss;
231 }
232
233 void PrintStats(uptr class_id, uptr rss) {
234 RegionInfo *region = GetRegionInfo(class_id);
235 if (region->mapped_user == 0) return;
236 uptr in_use = region->stats.n_allocated - region->stats.n_freed;
237 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
238 Printf(
239 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
240 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
241 "last released: %6zdK region: 0x%zx\n",
242 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
243 region->mapped_user >> 10, region->stats.n_allocated,
244 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
245 rss >> 10, region->rtoi.num_releases,
246 region->rtoi.last_released_bytes >> 10,
247 SpaceBeg() + kRegionSize * class_id);
248 }
249
250 void PrintStats() {
251 uptr rss_stats[kNumClasses];
252 for (uptr class_id = 0; class_id < kNumClasses; class_id++)
253 rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
254 GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
255
256 uptr total_mapped = 0;
257 uptr total_rss = 0;
258 uptr n_allocated = 0;
259 uptr n_freed = 0;
260 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
261 RegionInfo *region = GetRegionInfo(class_id);
262 if (region->mapped_user != 0) {
263 total_mapped += region->mapped_user;
264 total_rss += rss_stats[class_id];
265 }
266 n_allocated += region->stats.n_allocated;
267 n_freed += region->stats.n_freed;
268 }
269
270 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
271 "%zd allocations; remains %zd\n", total_mapped >> 20,
272 total_rss >> 20, n_allocated, n_allocated - n_freed);
273 for (uptr class_id = 1; class_id < kNumClasses; class_id++)
274 PrintStats(class_id, rss_stats[class_id]);
275 }
276
277 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
278 // introspection API.
279 void ForceLock() {
280 for (uptr i = 0; i < kNumClasses; i++) {
281 GetRegionInfo(i)->mutex.Lock();
282 }
283 }
284
285 void ForceUnlock() {
286 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
287 GetRegionInfo(i)->mutex.Unlock();
288 }
289 }
290
291 // Iterate over all existing chunks.
292 // The allocator must be locked when calling this function.
293 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
294 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
295 RegionInfo *region = GetRegionInfo(class_id);
296 uptr chunk_size = ClassIdToSize(class_id);
297 uptr region_beg = SpaceBeg() + class_id * kRegionSize;
298 uptr region_allocated_user_size =
299 AddressSpaceView::Load(region)->allocated_user;
300 for (uptr chunk = region_beg;
301 chunk < region_beg + region_allocated_user_size;
302 chunk += chunk_size) {
303 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
304 callback(chunk, arg);
305 }
306 }
307 }
308
309 static uptr ClassIdToSize(uptr class_id) {
310 return SizeClassMap::Size(class_id);
311 }
312
313 static uptr AdditionalSize() {
314 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
315 GetPageSizeCached());
316 }
317
318 typedef SizeClassMap SizeClassMapT;
319 static const uptr kNumClasses = SizeClassMap::kNumClasses;
320 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
321
322 // A packed array of counters. Each counter occupies 2^n bits, enough to store
323 // counter's max_value. Ctor will try to allocate the required buffer via
324 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
325 // whether the initialization was successful by checking IsAllocated() result.
326 // For the performance sake, none of the accessors check the validity of the
327 // arguments, it is assumed that index is always in [0, n) range and the value
328 // is not incremented past max_value.
329 template<class MemoryMapperT>
330 class PackedCounterArray {
331 public:
332 PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
333 : n(num_counters), memory_mapper(mapper) {
334 CHECK_GT(num_counters, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((num_counters))
; __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 334, "(" "(num_counters)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
335 CHECK_GT(max_value, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((max_value)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 335, "(" "(max_value)" ") " ">" " (" "(0)" ")", v1, v2);
} while (false)
;
336 constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
337 // Rounding counter storage size up to the power of two allows for using
338 // bit shifts calculating particular counter's index and offset.
339 uptr counter_size_bits =
340 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
341 CHECK_LE(counter_size_bits, kMaxCounterBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((counter_size_bits
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kMaxCounterBits
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 341, "(" "(counter_size_bits)" ") " "<=" " (" "(kMaxCounterBits)"
")", v1, v2); } while (false)
;
342 counter_size_bits_log = Log2(counter_size_bits);
343 counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
344
345 uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
346 CHECK_GT(packing_ratio, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((packing_ratio)
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 346, "(" "(packing_ratio)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
347 packing_ratio_log = Log2(packing_ratio);
348 bit_offset_mask = packing_ratio - 1;
349
350 buffer_size =
351 (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
352 sizeof(*buffer);
353 buffer = reinterpret_cast<u64*>(
354 memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
355 }
356 ~PackedCounterArray() {
357 if (buffer) {
358 memory_mapper->UnmapPackedCounterArrayBuffer(
359 reinterpret_cast<uptr>(buffer), buffer_size);
360 }
361 }
362
363 bool IsAllocated() const {
364 return !!buffer;
365 }
366
367 u64 GetCount() const {
368 return n;
369 }
370
371 uptr Get(uptr i) const {
372 DCHECK_LT(i, n);
373 uptr index = i >> packing_ratio_log;
374 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
375 return (buffer[index] >> bit_offset) & counter_mask;
376 }
377
378 void Inc(uptr i) const {
379 DCHECK_LT(Get(i), counter_mask);
380 uptr index = i >> packing_ratio_log;
381 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
382 buffer[index] += 1ULL << bit_offset;
383 }
384
385 void IncRange(uptr from, uptr to) const {
386 DCHECK_LE(from, to);
387 for (uptr i = from; i <= to; i++)
388 Inc(i);
389 }
390
391 private:
392 const u64 n;
393 u64 counter_size_bits_log;
394 u64 counter_mask;
395 u64 packing_ratio_log;
396 u64 bit_offset_mask;
397
398 MemoryMapperT* const memory_mapper;
399 u64 buffer_size;
400 u64* buffer;
401 };
402
403 template<class MemoryMapperT>
404 class FreePagesRangeTracker {
405 public:
406 explicit FreePagesRangeTracker(MemoryMapperT* mapper)
407 : memory_mapper(mapper),
408 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
409 in_the_range(false), current_page(0), current_range_start_page(0) {}
410
411 void NextPage(bool freed) {
412 if (freed) {
413 if (!in_the_range) {
414 current_range_start_page = current_page;
415 in_the_range = true;
416 }
417 } else {
418 CloseOpenedRange();
419 }
420 current_page++;
421 }
422
423 void Done() {
424 CloseOpenedRange();
425 }
426
427 private:
428 void CloseOpenedRange() {
429 if (in_the_range) {
430 memory_mapper->ReleasePageRangeToOS(
431 current_range_start_page << page_size_scaled_log,
432 current_page << page_size_scaled_log);
433 in_the_range = false;
434 }
435 }
436
437 MemoryMapperT* const memory_mapper;
438 const uptr page_size_scaled_log;
439 bool in_the_range;
440 uptr current_page;
441 uptr current_range_start_page;
442 };
443
444 // Iterates over the free_array to identify memory pages containing freed
445 // chunks only and returns these pages back to OS.
446 // allocated_pages_count is the total number of pages allocated for the
447 // current bucket.
448 template<class MemoryMapperT>
449 static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
450 uptr free_array_count, uptr chunk_size,
451 uptr allocated_pages_count,
452 MemoryMapperT *memory_mapper) {
453 const uptr page_size = GetPageSizeCached();
454
455 // Figure out the number of chunks per page and whether we can take a fast
456 // path (the number of chunks per page is the same for all pages).
457 uptr full_pages_chunk_count_max;
458 bool same_chunk_count_per_page;
459 if (chunk_size <= page_size && page_size % chunk_size == 0) {
460 // Same number of chunks per page, no cross overs.
461 full_pages_chunk_count_max = page_size / chunk_size;
462 same_chunk_count_per_page = true;
463 } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
464 chunk_size % (page_size % chunk_size) == 0) {
465 // Some chunks are crossing page boundaries, which means that the page
466 // contains one or two partial chunks, but all pages contain the same
467 // number of chunks.
468 full_pages_chunk_count_max = page_size / chunk_size + 1;
469 same_chunk_count_per_page = true;
470 } else if (chunk_size <= page_size) {
471 // Some chunks are crossing page boundaries, which means that the page
472 // contains one or two partial chunks.
473 full_pages_chunk_count_max = page_size / chunk_size + 2;
474 same_chunk_count_per_page = false;
475 } else if (chunk_size > page_size && chunk_size % page_size == 0) {
476 // One chunk covers multiple pages, no cross overs.
477 full_pages_chunk_count_max = 1;
478 same_chunk_count_per_page = true;
479 } else if (chunk_size > page_size) {
480 // One chunk covers multiple pages, Some chunks are crossing page
481 // boundaries. Some pages contain one chunk, some contain two.
482 full_pages_chunk_count_max = 2;
483 same_chunk_count_per_page = false;
484 } else {
485 UNREACHABLE("All chunk_size/page_size ratios must be handled.")do { do { __sanitizer::u64 v1 = (__sanitizer::u64)((0 &&
"All chunk_size/page_size ratios must be handled.")); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 485, "(" "(0 && \"All chunk_size/page_size ratios must be handled.\")"
") " "!=" " (" "0" ")", v1, v2); } while (false); Die(); } while
(0)
;
486 }
487
488 PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
489 full_pages_chunk_count_max,
490 memory_mapper);
491 if (!counters.IsAllocated())
492 return;
493
494 const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
495 const uptr page_size_scaled = page_size >> kCompactPtrScale;
496 const uptr page_size_scaled_log = Log2(page_size_scaled);
497
498 // Iterate over free chunks and count how many free chunks affect each
499 // allocated page.
500 if (chunk_size <= page_size && page_size % chunk_size == 0) {
501 // Each chunk affects one page only.
502 for (uptr i = 0; i < free_array_count; i++)
503 counters.Inc(free_array[i] >> page_size_scaled_log);
504 } else {
505 // In all other cases chunks might affect more than one page.
506 for (uptr i = 0; i < free_array_count; i++) {
507 counters.IncRange(
508 free_array[i] >> page_size_scaled_log,
509 (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
510 }
511 }
512
513 // Iterate over pages detecting ranges of pages with chunk counters equal
514 // to the expected number of chunks for the particular page.
515 FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
516 if (same_chunk_count_per_page) {
517 // Fast path, every page has the same number of chunks affecting it.
518 for (uptr i = 0; i < counters.GetCount(); i++)
519 range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
520 } else {
521 // Show path, go through the pages keeping count how many chunks affect
522 // each page.
523 const uptr pn =
524 chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
525 const uptr pnc = pn * chunk_size_scaled;
526 // The idea is to increment the current page pointer by the first chunk
527 // size, middle portion size (the portion of the page covered by chunks
528 // except the first and the last one) and then the last chunk size, adding
529 // up the number of chunks on the current page and checking on every step
530 // whether the page boundary was crossed.
531 uptr prev_page_boundary = 0;
532 uptr current_boundary = 0;
533 for (uptr i = 0; i < counters.GetCount(); i++) {
534 uptr page_boundary = prev_page_boundary + page_size_scaled;
535 uptr chunks_per_page = pn;
536 if (current_boundary < page_boundary) {
537 if (current_boundary > prev_page_boundary)
538 chunks_per_page++;
539 current_boundary += pnc;
540 if (current_boundary < page_boundary) {
541 chunks_per_page++;
542 current_boundary += chunk_size_scaled;
543 }
544 }
545 prev_page_boundary = page_boundary;
546
547 range_tracker.NextPage(counters.Get(i) == chunks_per_page);
548 }
549 }
550 range_tracker.Done();
551 }
552
553 private:
554 friend class MemoryMapper;
555
556 ReservedAddressRange address_range;
557
558 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
559 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
560 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
561 // elements, but in reality this will not happen. For simplicity we
562 // dedicate 1/8 of the region's virtual space to FreeArray.
563 static const uptr kFreeArraySize = kRegionSize / 8;
564
565 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
566 uptr NonConstSpaceBeg;
567 uptr SpaceBeg() const {
568 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
569 }
570 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
571 // kRegionSize must be >= 2^32.
572 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)))typedef char assertion_failed__572[2*(int)((kRegionSize) >=
(1ULL << (64 / 2)))-1]
;
573 // kRegionSize must be <= 2^36, see CompactPtrT.
574 COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)))typedef char assertion_failed__574[2*(int)((kRegionSize) <=
(1ULL << (64 / 2 + 4)))-1]
;
575 // Call mmap for user memory with at least this size.
576 static const uptr kUserMapSize = 1 << 16;
577 // Call mmap for metadata memory with at least this size.
578 static const uptr kMetaMapSize = 1 << 16;
579 // Call mmap for free array memory with at least this size.
580 static const uptr kFreeArrayMapSize = 1 << 16;
581
582 atomic_sint32_t release_to_os_interval_ms_;
583
584 struct Stats {
585 uptr n_allocated;
586 uptr n_freed;
587 };
588
589 struct ReleaseToOsInfo {
590 uptr n_freed_at_last_release;
591 uptr num_releases;
592 u64 last_release_at_ns;
593 u64 last_released_bytes;
594 };
595
596 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) RegionInfo {
597 BlockingMutex mutex;
598 uptr num_freed_chunks; // Number of elements in the freearray.
599 uptr mapped_free_array; // Bytes mapped for freearray.
600 uptr allocated_user; // Bytes allocated for user memory.
601 uptr allocated_meta; // Bytes allocated for metadata.
602 uptr mapped_user; // Bytes mapped for user memory.
603 uptr mapped_meta; // Bytes mapped for metadata.
604 u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
605 bool exhausted; // Whether region is out of space for new chunks.
606 Stats stats;
607 ReleaseToOsInfo rtoi;
608 };
609 COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0)typedef char assertion_failed__609[2*(int)(sizeof(RegionInfo)
% kCacheLineSize == 0)-1]
;
610
611 RegionInfo *GetRegionInfo(uptr class_id) const {
612 DCHECK_LT(class_id, kNumClasses);
613 RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
614 return &regions[class_id];
615 }
616
617 uptr GetMetadataEnd(uptr region_beg) const {
618 return region_beg + kRegionSize - kFreeArraySize;
619 }
620
621 uptr GetChunkIdx(uptr chunk, uptr size) const {
622 if (!kUsingConstantSpaceBeg)
623 chunk -= SpaceBeg();
624
625 uptr offset = chunk % kRegionSize;
626 // Here we divide by a non-constant. This is costly.
627 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
628 if (offset >> (SANITIZER_WORDSIZE64 / 2))
629 return offset / size;
630 return (u32)offset / (u32)size;
631 }
632
633 CompactPtrT *GetFreeArray(uptr region_beg) const {
634 return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
635 }
636
637 bool MapWithCallback(uptr beg, uptr size, const char *name) {
638 uptr mapped = address_range.Map(beg, size, name);
639 if (UNLIKELY(!mapped)__builtin_expect(!!(!mapped), 0))
640 return false;
641 CHECK_EQ(beg, mapped)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((mapped)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 641, "(" "(beg)" ") " "==" " (" "(mapped)" ")", v1, v2); } while
(false)
;
642 MapUnmapCallback().OnMap(beg, size);
643 return true;
644 }
645
646 void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
647 CHECK_EQ(beg, address_range.MapOrDie(beg, size, name))do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie(beg, size
, name))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 647, "(" "(beg)" ") " "==" " (" "(address_range.MapOrDie(beg, size, name))"
")", v1, v2); } while (false)
;
648 MapUnmapCallback().OnMap(beg, size);
649 }
650
651 void UnmapWithCallbackOrDie(uptr beg, uptr size) {
652 MapUnmapCallback().OnUnmap(beg, size);
653 address_range.Unmap(beg, size);
654 }
655
656 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
657 uptr num_freed_chunks) {
658 uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
659 if (region->mapped_free_array < needed_space) {
660 uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
661 CHECK_LE(new_mapped_free_array, kFreeArraySize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_mapped_free_array
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kFreeArraySize)
); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 661, "(" "(new_mapped_free_array)" ") " "<=" " (" "(kFreeArraySize)"
")", v1, v2); } while (false)
;
662 uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
663 region->mapped_free_array;
664 uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
665 if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size
, "SizeClassAllocator: freearray")), 0)
666 "SizeClassAllocator: freearray"))__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size
, "SizeClassAllocator: freearray")), 0)
)
667 return false;
668 region->mapped_free_array = new_mapped_free_array;
669 }
670 return true;
671 }
672
673 // Check whether this size class is exhausted.
674 bool IsRegionExhausted(RegionInfo *region, uptr class_id,
675 uptr additional_map_size) {
676 if (LIKELY(region->mapped_user + region->mapped_meta +__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
677 additional_map_size <= kRegionSize - kFreeArraySize)__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
)
678 return false;
679 if (!region->exhausted) {
680 region->exhausted = true;
681 Printf("%s: Out of memory. ", SanitizerToolName);
682 Printf("The process has exhausted %zuMB for size class %zu.\n",
683 kRegionSize >> 20, ClassIdToSize(class_id));
684 }
685 return true;
686 }
687
688 NOINLINE__attribute__((noinline)) bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
689 RegionInfo *region, uptr requested_count) {
690 // region->mutex is held.
691 const uptr region_beg = GetRegionBeginBySizeClass(class_id);
692 const uptr size = ClassIdToSize(class_id);
693
694 const uptr total_user_bytes =
695 region->allocated_user + requested_count * size;
696 // Map more space for chunks, if necessary.
697 if (LIKELY(total_user_bytes > region->mapped_user)__builtin_expect(!!(total_user_bytes > region->mapped_user
), 1)
) {
698 if (UNLIKELY(region->mapped_user == 0)__builtin_expect(!!(region->mapped_user == 0), 0)) {
699 if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
700 // The random state is initialized from ASLR.
701 region->rand_state = static_cast<u32>(region_beg >> 12);
702 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
703 // preventing just allocated memory from being released sooner than
704 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
705 // for short lived processes.
706 // Do it only when the feature is turned on, to avoid a potentially
707 // extraneous syscall.
708 if (ReleaseToOSIntervalMs() >= 0)
709 region->rtoi.last_release_at_ns = MonotonicNanoTime();
710 }
711 // Do the mmap for the user memory.
712 const uptr user_map_size =
713 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
714 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, user_map_size
)), 0)
)
715 return false;
716 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
717 user_map_size,__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
718 "SizeClassAllocator: region data"))__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
)
719 return false;
720 stat->Add(AllocatorStatMapped, user_map_size);
721 region->mapped_user += user_map_size;
722 }
723 const uptr new_chunks_count =
724 (region->mapped_user - region->allocated_user) / size;
725
726 if (kMetadataSize) {
727 // Calculate the required space for metadata.
728 const uptr total_meta_bytes =
729 region->allocated_meta + new_chunks_count * kMetadataSize;
730 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
731 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
732 // Map more space for metadata, if necessary.
733 if (meta_map_size) {
734 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, meta_map_size
)), 0)
)
735 return false;
736 if (UNLIKELY(!MapWithCallback(__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
737 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
738 meta_map_size, "SizeClassAllocator: region metadata"))__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
)
739 return false;
740 region->mapped_meta += meta_map_size;
741 }
742 }
743
744 // If necessary, allocate more space for the free array and populate it with
745 // newly allocated chunks.
746 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
747 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
total_freed_chunks)), 0)
)
748 return false;
749 CompactPtrT *free_array = GetFreeArray(region_beg);
750 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
751 i++, chunk += size)
752 free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
753 if (kRandomShuffleChunks)
754 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
755 &region->rand_state);
756
757 // All necessary memory is mapped and now it is safe to advance all
758 // 'allocated_*' counters.
759 region->num_freed_chunks += new_chunks_count;
760 region->allocated_user += new_chunks_count * size;
761 CHECK_LE(region->allocated_user, region->mapped_user)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_user
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_user
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 761, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)"
")", v1, v2); } while (false)
;
762 region->allocated_meta += new_chunks_count * kMetadataSize;
763 CHECK_LE(region->allocated_meta, region->mapped_meta)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_meta
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_meta
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary64.h"
, 763, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)"
")", v1, v2); } while (false)
;
764 region->exhausted = false;
765
766 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
767 // MaybeReleaseToOS from releasing just allocated pages or protect these
768 // not yet used chunks some other way.
769
770 return true;
771 }
772
773 class MemoryMapper {
774 public:
775 MemoryMapper(const ThisT& base_allocator, uptr class_id)
776 : allocator(base_allocator),
777 region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
778 released_ranges_count(0),
779 released_bytes(0) {
780 }
781
782 uptr GetReleasedRangesCount() const {
783 return released_ranges_count;
784 }
785
786 uptr GetReleasedBytes() const {
787 return released_bytes;
788 }
789
790 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
791 // TODO(alekseyshl): The idea to explore is to check if we have enough
792 // space between num_freed_chunks*sizeof(CompactPtrT) and
793 // mapped_free_array to fit buffer_size bytes and use that space instead
794 // of mapping a temporary one.
795 return reinterpret_cast<uptr>(
796 MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
797 }
798
799 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
800 UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
801 }
802
803 // Releases [from, to) range of pages back to OS.
804 void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
805 const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
806 const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
807 ReleaseMemoryPagesToOS(from_page, to_page);
808 released_ranges_count++;
809 released_bytes += to_page - from_page;
810 }
811
812 private:
813 const ThisT& allocator;
814 const uptr region_base;
815 uptr released_ranges_count;
816 uptr released_bytes;
817 };
818
819 // Attempts to release RAM occupied by freed chunks back to OS. The region is
820 // expected to be locked.
821 void MaybeReleaseToOS(uptr class_id, bool force) {
822 RegionInfo *region = GetRegionInfo(class_id);
823 const uptr chunk_size = ClassIdToSize(class_id);
824 const uptr page_size = GetPageSizeCached();
825
826 uptr n = region->num_freed_chunks;
827 if (n * chunk_size < page_size)
828 return; // No chance to release anything.
829 if ((region->stats.n_freed -
830 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
831 return; // Nothing new to release.
832 }
833
834 if (!force) {
835 s32 interval_ms = ReleaseToOSIntervalMs();
836 if (interval_ms < 0)
837 return;
838
839 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
840 MonotonicNanoTime()) {
841 return; // Memory was returned recently.
842 }
843 }
844
845 MemoryMapper memory_mapper(*this, class_id);
846
847 ReleaseFreeMemoryToOS<MemoryMapper>(
848 GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
849 RoundUpTo(region->allocated_user, page_size) / page_size,
850 &memory_mapper);
851
852 if (memory_mapper.GetReleasedRangesCount() > 0) {
853 region->rtoi.n_freed_at_last_release = region->stats.n_freed;
854 region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
855 region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
856 }
857 region->rtoi.last_release_at_ns = MonotonicNanoTime();
858 }
859};