Bug Summary

File:projects/compiler-rt/lib/asan/asan_allocator.cc
Warning:line 494, column 46
Array access (from variable 'alloc_beg') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name asan_allocator.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/asan -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc

1//===-- asan_allocator.cc -------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16//===----------------------------------------------------------------------===//
17
18#include "asan_allocator.h"
19#include "asan_mapping.h"
20#include "asan_poisoning.h"
21#include "asan_report.h"
22#include "asan_stack.h"
23#include "asan_thread.h"
24#include "sanitizer_common/sanitizer_allocator_checks.h"
25#include "sanitizer_common/sanitizer_allocator_interface.h"
26#include "sanitizer_common/sanitizer_errno.h"
27#include "sanitizer_common/sanitizer_flags.h"
28#include "sanitizer_common/sanitizer_internal_defs.h"
29#include "sanitizer_common/sanitizer_list.h"
30#include "sanitizer_common/sanitizer_stackdepot.h"
31#include "sanitizer_common/sanitizer_quarantine.h"
32#include "lsan/lsan_common.h"
33
34namespace __asan {
35
36// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37// We use adaptive redzones: for larger allocation larger redzones are used.
38static u32 RZLog2Size(u32 rz_log) {
39 CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(!
(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 39, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while
(false)
;
40 return 16 << rz_log;
41}
42
43static u32 RZSize2Log(u32 rz_size) {
44 CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!(
!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 44, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); }
while (false)
;
45 CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(!
!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 45, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2)
; } while (false)
;
46 CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 46, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1
, v2); } while (false)
;
47 u32 res = Log2(rz_size) - 4;
48 CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 48, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")",
v1, v2); } while (false)
;
49 return res;
50}
51
52static AsanAllocator &get_allocator();
53
54// The memory chunk allocated from the underlying allocator looks like this:
55// L L L L L L H H U U U U U U R R
56// L -- left redzone words (0 or more bytes)
57// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
58// U -- user memory.
59// R -- right redzone (0 or more bytes)
60// ChunkBase consists of ChunkHeader and other bytes that overlap with user
61// memory.
62
63// If the left redzone is greater than the ChunkHeader size we store a magic
64// value in the first uptr word of the memory block and store the address of
65// ChunkBase in the next uptr.
66// M B L L L L L L L L L H H U U U U U U
67// | ^
68// ---------------------|
69// M -- magic value kAllocBegMagic
70// B -- address of ChunkHeader pointing to the first 'H'
71static const uptr kAllocBegMagic = 0xCC6E96B9;
72
73struct ChunkHeader {
74 // 1-st 8 bytes.
75 u32 chunk_state : 8; // Must be first.
76 u32 alloc_tid : 24;
77
78 u32 free_tid : 24;
79 u32 from_memalign : 1;
80 u32 alloc_type : 2;
81 u32 rz_log : 3;
82 u32 lsan_tag : 2;
83 // 2-nd 8 bytes
84 // This field is used for small sizes. For large sizes it is equal to
85 // SizeClassMap::kMaxSize and the actual size is stored in the
86 // SecondaryAllocator's metadata.
87 u32 user_requested_size : 29;
88 // align < 8 -> 0
89 // else -> log2(min(align, 512)) - 2
90 u32 user_requested_alignment_log : 3;
91 u32 alloc_context_id;
92};
93
94struct ChunkBase : ChunkHeader {
95 // Header2, intersects with user memory.
96 u32 free_context_id;
97};
98
99static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
100static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
101COMPILER_CHECK(kChunkHeaderSize == 16)typedef char assertion_failed__101[2*(int)(kChunkHeaderSize ==
16)-1]
;
102COMPILER_CHECK(kChunkHeader2Size <= 16)typedef char assertion_failed__102[2*(int)(kChunkHeader2Size <=
16)-1]
;
103
104// Every chunk of memory allocated by this allocator can be in one of 3 states:
105// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
106// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
107// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
108enum {
109 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
110 CHUNK_ALLOCATED = 2,
111 CHUNK_QUARANTINE = 3
112};
113
114struct AsanChunk: ChunkBase {
115 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
116 uptr UsedSize(bool locked_version = false) {
117 if (user_requested_size != SizeClassMap::kMaxSize)
118 return user_requested_size;
119 return *reinterpret_cast<uptr *>(
120 get_allocator().GetMetaData(AllocBeg(locked_version)));
121 }
122 void *AllocBeg(bool locked_version = false) {
123 if (from_memalign) {
124 if (locked_version)
125 return get_allocator().GetBlockBeginFastLocked(
126 reinterpret_cast<void *>(this));
127 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
128 }
129 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
130 }
131 bool AddrIsInside(uptr addr, bool locked_version = false) {
132 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
133 }
134};
135
136struct QuarantineCallback {
137 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
138 : cache_(cache),
139 stack_(stack) {
140 }
141
142 void Recycle(AsanChunk *m) {
143 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 143, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
144 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
145 CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 145, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
146 CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 146, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
147 PoisonShadow(m->Beg(),
148 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
149 kAsanHeapLeftRedzoneMagic);
150 void *p = reinterpret_cast<void *>(m->AllocBeg());
151 if (p != m) {
152 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
153 CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)
); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 153, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)"
")", v1, v2); } while (false)
;
154 // Clear the magic value, as allocator internals may overwrite the
155 // contents of deallocated chunk, confusing GetAsanChunk lookup.
156 alloc_magic[0] = 0;
157 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast
<uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 157, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))"
")", v1, v2); } while (false)
;
158 }
159
160 // Statistics.
161 AsanStats &thread_stats = GetCurrentThreadStats();
162 thread_stats.real_frees++;
163 thread_stats.really_freed += m->UsedSize();
164
165 get_allocator().Deallocate(cache_, p);
166 }
167
168 void *Allocate(uptr size) {
169 void *res = get_allocator().Allocate(cache_, size, 1);
170 // TODO(alekseys): Consider making quarantine OOM-friendly.
171 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
172 ReportOutOfMemory(size, stack_);
173 return res;
174 }
175
176 void Deallocate(void *p) {
177 get_allocator().Deallocate(cache_, p);
178 }
179
180 private:
181 AllocatorCache* const cache_;
182 BufferedStackTrace* const stack_;
183};
184
185typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
186typedef AsanQuarantine::Cache QuarantineCache;
187
188void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
189 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
190 // Statistics.
191 AsanStats &thread_stats = GetCurrentThreadStats();
192 thread_stats.mmaps++;
193 thread_stats.mmaped += size;
194}
195void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
196 PoisonShadow(p, size, 0);
197 // We are about to unmap a chunk of user memory.
198 // Mark the corresponding shadow memory as not needed.
199 FlushUnneededASanShadowMemory(p, size);
200 // Statistics.
201 AsanStats &thread_stats = GetCurrentThreadStats();
202 thread_stats.munmaps++;
203 thread_stats.munmaped += size;
204}
205
206// We can not use THREADLOCAL because it is not supported on some of the
207// platforms we care about (OSX 10.6, Android).
208// static THREADLOCAL AllocatorCache cache;
209AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
210 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 210, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
211 return &ms->allocator_cache;
212}
213
214QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
215 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 215, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
216 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache
))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms->
quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 216, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))"
")", v1, v2); } while (false)
;
217 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
218}
219
220void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
221 quarantine_size_mb = f->quarantine_size_mb;
222 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
223 min_redzone = f->redzone;
224 max_redzone = f->max_redzone;
225 may_return_null = cf->allocator_may_return_null;
226 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
227 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
228}
229
230void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
231 f->quarantine_size_mb = quarantine_size_mb;
232 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
233 f->redzone = min_redzone;
234 f->max_redzone = max_redzone;
235 cf->allocator_may_return_null = may_return_null;
236 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
237 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
238}
239
240struct Allocator {
241 static const uptr kMaxAllowedMallocSize =
242 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(1ULL << 40);
243
244 AsanAllocator allocator;
245 AsanQuarantine quarantine;
246 StaticSpinMutex fallback_mutex;
247 AllocatorCache fallback_allocator_cache;
248 QuarantineCache fallback_quarantine_cache;
249
250 atomic_uint8_t rss_limit_exceeded;
251
252 // ------------------- Options --------------------------
253 atomic_uint16_t min_redzone;
254 atomic_uint16_t max_redzone;
255 atomic_uint8_t alloc_dealloc_mismatch;
256
257 // ------------------- Initialization ------------------------
258 explicit Allocator(LinkerInitialized)
259 : quarantine(LINKER_INITIALIZED),
260 fallback_quarantine_cache(LINKER_INITIALIZED) {}
261
262 void CheckOptions(const AllocatorOptions &options) const {
263 CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 263, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")"
, v1, v2); } while (false)
;
264 CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone
)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 264, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)"
")", v1, v2); } while (false)
;
265 CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 265, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)"
")", v1, v2); } while (false)
;
266 CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 266, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
267 CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 267, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
268 }
269
270 void SharedInitCode(const AllocatorOptions &options) {
271 CheckOptions(options);
272 quarantine.Init((uptr)options.quarantine_size_mb << 20,
273 (uptr)options.thread_local_quarantine_size_kb << 10);
274 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
275 memory_order_release);
276 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
277 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
278 }
279
280 void InitLinkerInitialized(const AllocatorOptions &options) {
281 SetAllocatorMayReturnNull(options.may_return_null);
282 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
283 SharedInitCode(options);
284 }
285
286 bool RssLimitExceeded() {
287 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
288 }
289
290 void SetRssLimitExceeded(bool limit_exceeded) {
291 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
292 }
293
294 void RePoisonChunk(uptr chunk) {
295 // This could be a user-facing chunk (with redzones), or some internal
296 // housekeeping chunk, like TransferBatch. Start by assuming the former.
297 AsanChunk *ac = GetAsanChunk((void *)chunk);
298 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
299 uptr beg = ac->Beg();
300 uptr end = ac->Beg() + ac->UsedSize(true);
301 uptr chunk_end = chunk + allocated_size;
302 if (chunk < beg && beg < end && end <= chunk_end &&
303 ac->chunk_state == CHUNK_ALLOCATED) {
304 // Looks like a valid AsanChunk in use, poison redzones only.
305 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
306 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
307 FastPoisonShadowPartialRightRedzone(
308 end_aligned_down, end - end_aligned_down,
309 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
310 } else {
311 // This is either not an AsanChunk or freed or quarantined AsanChunk.
312 // In either case, poison everything.
313 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
314 }
315 }
316
317 void ReInitialize(const AllocatorOptions &options) {
318 SetAllocatorMayReturnNull(options.may_return_null);
319 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
320 SharedInitCode(options);
321
322 // Poison all existing allocation's redzones.
323 if (CanPoisonMemory()) {
324 allocator.ForceLock();
325 allocator.ForEachChunk(
326 [](uptr chunk, void *alloc) {
327 ((Allocator *)alloc)->RePoisonChunk(chunk);
328 },
329 this);
330 allocator.ForceUnlock();
331 }
332 }
333
334 void GetOptions(AllocatorOptions *options) const {
335 options->quarantine_size_mb = quarantine.GetSize() >> 20;
336 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
337 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
338 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
339 options->may_return_null = AllocatorMayReturnNull();
340 options->alloc_dealloc_mismatch =
341 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
342 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
343 }
344
345 // -------------------- Helper methods. -------------------------
346 uptr ComputeRZLog(uptr user_requested_size) {
347 u32 rz_log =
348 user_requested_size <= 64 - 16 ? 0 :
349 user_requested_size <= 128 - 32 ? 1 :
350 user_requested_size <= 512 - 64 ? 2 :
351 user_requested_size <= 4096 - 128 ? 3 :
352 user_requested_size <= (1 << 14) - 256 ? 4 :
353 user_requested_size <= (1 << 15) - 512 ? 5 :
354 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
355 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
356 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
357 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
358 }
359
360 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
361 if (user_requested_alignment < 8)
362 return 0;
363 if (user_requested_alignment > 512)
364 user_requested_alignment = 512;
365 return Log2(user_requested_alignment) - 2;
366 }
367
368 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
369 if (user_requested_alignment_log == 0)
370 return 0;
371 return 1LL << (user_requested_alignment_log + 2);
372 }
373
374 // We have an address between two chunks, and we want to report just one.
375 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
376 AsanChunk *right_chunk) {
377 // Prefer an allocated chunk over freed chunk and freed chunk
378 // over available chunk.
379 if (left_chunk->chunk_state != right_chunk->chunk_state) {
380 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
381 return left_chunk;
382 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
383 return right_chunk;
384 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
385 return left_chunk;
386 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
387 return right_chunk;
388 }
389 // Same chunk_state: choose based on offset.
390 sptr l_offset = 0, r_offset = 0;
391 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 391, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
392 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 392, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
393 if (l_offset < r_offset)
394 return left_chunk;
395 return right_chunk;
396 }
397
398 // -------------------- Allocation/Deallocation routines ---------------
399 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
400 AllocType alloc_type, bool can_fill) {
401 if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0))
1
Taking false branch
402 AsanInitFromRtl();
403 if (RssLimitExceeded()) {
2
Assuming the condition is false
3
Taking false branch
404 if (AllocatorMayReturnNull())
405 return nullptr;
406 ReportRssLimitExceeded(stack);
407 }
408 Flags &fl = *flags();
409 CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 409, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
410 const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale);
411 const uptr user_requested_alignment_log =
412 ComputeUserRequestedAlignmentLog(alignment);
413 if (alignment < min_alignment)
4
Taking false branch
414 alignment = min_alignment;
415 if (size == 0) {
5
Assuming 'size' is not equal to 0
6
Taking false branch
416 // We'd be happy to avoid allocating memory for zero-size requests, but
417 // some programs/tests depend on this behavior and assume that malloc
418 // would not return NULL even for zero-size allocations. Moreover, it
419 // looks like operator new should never return NULL, and results of
420 // consecutive "new" calls must be different even if the allocated size
421 // is zero.
422 size = 1;
423 }
424 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 424, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
425 uptr rz_log = ComputeRZLog(size);
426 uptr rz_size = RZLog2Size(rz_log);
427 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
428 uptr needed_size = rounded_size + rz_size;
429 if (alignment > min_alignment)
7
Assuming 'alignment' is <= 'min_alignment'
8
Taking false branch
430 needed_size += alignment;
431 bool using_primary_allocator = true;
432 // If we are allocating from the secondary allocator, there will be no
433 // automatic right redzone, so add the right redzone manually.
434 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
9
Taking true branch
435 needed_size += rz_size;
436 using_primary_allocator = false;
437 }
438 CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size
, min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 438, "(" "(IsAligned(needed_size, min_alignment))" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
439 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
10
Assuming 'size' is <= 'kMaxAllowedMallocSize'
11
Assuming 'needed_size' is <= 'kMaxAllowedMallocSize'
12
Taking false branch
440 if (AllocatorMayReturnNull()) {
441 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
442 (void*)size);
443 return nullptr;
444 }
445 ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
446 stack);
447 }
448
449 AsanThread *t = GetCurrentThread();
450 void *allocated;
451 if (t) {
13
Assuming 't' is null
14
Taking false branch
452 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
453 allocated = allocator.Allocate(cache, needed_size, 8);
454 } else {
455 SpinMutexLock l(&fallback_mutex);
456 AllocatorCache *cache = &fallback_allocator_cache;
457 allocated = allocator.Allocate(cache, needed_size, 8);
15
Calling 'CombinedAllocator::Allocate'
42
Returning from 'CombinedAllocator::Allocate'
43
Value assigned to 'allocated'
458 }
459 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
44
Within the expansion of the macro 'UNLIKELY':
a
Assuming 'allocated' is null
45
Taking true branch
460 SetAllocatorOutOfMemory();
461 if (AllocatorMayReturnNull())
46
Assuming the condition is false
47
Taking false branch
462 return nullptr;
463 ReportOutOfMemory(size, stack);
464 }
465
466 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShort64bitShadowOffset
))
== 0 && CanPoisonMemory()) {
467 // Heap poisoning is enabled, but the allocator provides an unpoisoned
468 // chunk. This is possible if CanPoisonMemory() was false for some
469 // time, for example, due to flags()->start_disabled.
470 // Anyway, poison the block before using it for anything else.
471 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
472 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
473 }
474
475 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
48
'alloc_beg' initialized to 0
476 uptr alloc_end = alloc_beg + needed_size;
477 uptr beg_plus_redzone = alloc_beg + rz_size;
478 uptr user_beg = beg_plus_redzone;
479 if (!IsAligned(user_beg, alignment))
49
Taking false branch
480 user_beg = RoundUpTo(user_beg, alignment);
481 uptr user_end = user_beg + size;
482 CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer
::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 482, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1
, v2); } while (false)
;
483 uptr chunk_beg = user_beg - kChunkHeaderSize;
484 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
485 m->alloc_type = alloc_type;
486 m->rz_log = rz_log;
487 u32 alloc_tid = t ? t->tid() : 0;
50
'?' condition is false
488 m->alloc_tid = alloc_tid;
489 CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 489, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")"
, v1, v2); } while (false)
; // Does alloc_tid fit into the bitfield?
490 m->free_tid = kInvalidTid;
491 m->from_memalign = user_beg != beg_plus_redzone;
492 if (alloc_beg != chunk_beg) {
51
Assuming 'alloc_beg' is not equal to 'chunk_beg'
52
Taking true branch
493 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 *
sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 493, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)"
")", v1, v2); } while (false)
;
494 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
53
Array access (from variable 'alloc_beg') results in a null pointer dereference
495 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
496 }
497 if (using_primary_allocator) {
498 CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 498, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
499 m->user_requested_size = size;
500 CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 500, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
501 } else {
502 CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 502, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
503 m->user_requested_size = SizeClassMap::kMaxSize;
504 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
505 meta[0] = size;
506 meta[1] = chunk_beg;
507 }
508 m->user_requested_alignment_log = user_requested_alignment_log;
509
510 m->alloc_context_id = StackDepotPut(*stack);
511
512 uptr size_rounded_down_to_granularity =
513 RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
514 // Unpoison the bulk of the memory region.
515 if (size_rounded_down_to_granularity)
516 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
517 // Deal with the end of the region if size is not aligned to granularity.
518 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
519 u8 *shadow =
520 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
521 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0;
522 }
523
524 AsanStats &thread_stats = GetCurrentThreadStats();
525 thread_stats.mallocs++;
526 thread_stats.malloced += size;
527 thread_stats.malloced_redzones += needed_size - size;
528 if (needed_size > SizeClassMap::kMaxSize)
529 thread_stats.malloc_large++;
530 else
531 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
532
533 void *res = reinterpret_cast<void *>(user_beg);
534 if (can_fill && fl.max_malloc_fill_size) {
535 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
536 REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size);
537 }
538#if CAN_SANITIZE_LEAKS1
539 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
540 : __lsan::kDirectlyLeaked;
541#endif
542 // Must be the last mutation of metadata in this function.
543 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
544 ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook
(res, size); RunMallocHooks(res, size); } while (false)
;
545 return res;
546 }
547
548 // Set quarantine flag if chunk is allocated, issue ASan error report on
549 // available and quarantined chunks. Return true on success, false otherwise.
550 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
551 BufferedStackTrace *stack) {
552 u8 old_chunk_state = CHUNK_ALLOCATED;
553 // Flip the chunk_state atomically to avoid race on double-free.
554 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
555 CHUNK_QUARANTINE,
556 memory_order_acquire)) {
557 ReportInvalidFree(ptr, old_chunk_state, stack);
558 // It's not safe to push a chunk in quarantine on invalid free.
559 return false;
560 }
561 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED
)); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 561, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)"
")", v1, v2); } while (false)
;
562 return true;
563 }
564
565 // Expects the chunk to already be marked as quarantined by using
566 // AtomicallySetQuarantineFlagIfAllocated.
567 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
568 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 568, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
569 CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 569, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1
, v2); } while (false)
;
570 if (SANITIZER_WORDSIZE64 == 64) // On 32-bits this resides in user area.
571 CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 571, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
572 AsanThread *t = GetCurrentThread();
573 m->free_tid = t ? t->tid() : 0;
574 m->free_context_id = StackDepotPut(*stack);
575
576 Flags &fl = *flags();
577 if (fl.max_free_fill_size > 0) {
578 // We have to skip the chunk header, it contains free_context_id.
579 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
580 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
581 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
582 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
583 REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill);
584 }
585 }
586
587 // Poison the region.
588 PoisonShadow(m->Beg(),
589 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
590 kAsanHeapFreeMagic);
591
592 AsanStats &thread_stats = GetCurrentThreadStats();
593 thread_stats.frees++;
594 thread_stats.freed += m->UsedSize();
595
596 // Push into quarantine.
597 if (t) {
598 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
599 AllocatorCache *ac = GetAllocatorCache(ms);
600 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
601 m->UsedSize());
602 } else {
603 SpinMutexLock l(&fallback_mutex);
604 AllocatorCache *ac = &fallback_allocator_cache;
605 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
606 m, m->UsedSize());
607 }
608 }
609
610 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
611 BufferedStackTrace *stack, AllocType alloc_type) {
612 uptr p = reinterpret_cast<uptr>(ptr);
613 if (p == 0) return;
614
615 uptr chunk_beg = p - kChunkHeaderSize;
616 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
617
618 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
619 // malloc. Don't report an invalid free in this case.
620 if (SANITIZER_WINDOWS0 &&
621 !get_allocator().PointerIsMine(ptr)) {
622 if (!IsSystemHeapAddress(p))
623 ReportFreeNotMalloced(p, stack);
624 return;
625 }
626
627 ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr
); RunFreeHooks(ptr); } while (false)
;
628
629 // Must mark the chunk as quarantined before any changes to its metadata.
630 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
631 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
632
633 if (m->alloc_type != alloc_type) {
634 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
635 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
636 (AllocType)alloc_type);
637 }
638 } else {
639 if (flags()->new_delete_type_mismatch &&
640 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
641 ((delete_size && delete_size != m->UsedSize()) ||
642 ComputeUserRequestedAlignmentLog(delete_alignment) !=
643 m->user_requested_alignment_log)) {
644 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
645 }
646 }
647
648 QuarantineChunk(m, ptr, stack);
649 }
650
651 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
652 CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr &&
new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 652, "(" "(old_ptr && new_size)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
653 uptr p = reinterpret_cast<uptr>(old_ptr);
654 uptr chunk_beg = p - kChunkHeaderSize;
655 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
656
657 AsanStats &thread_stats = GetCurrentThreadStats();
658 thread_stats.reallocs++;
659 thread_stats.realloced += new_size;
660
661 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
662 if (new_ptr) {
663 u8 chunk_state = m->chunk_state;
664 if (chunk_state != CHUNK_ALLOCATED)
665 ReportInvalidFree(old_ptr, chunk_state, stack);
666 CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception
::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr
)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 666, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)"
")", v1, v2); } while (false)
;
667 uptr memcpy_size = Min(new_size, m->UsedSize());
668 // If realloc() races with free(), we may start copying freed memory.
669 // However, we will report racy double-free later anyway.
670 REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size);
671 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
672 }
673 return new_ptr;
674 }
675
676 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
677 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
678 if (AllocatorMayReturnNull())
679 return nullptr;
680 ReportCallocOverflow(nmemb, size, stack);
681 }
682 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
683 // If the memory comes from the secondary allocator no need to clear it
684 // as it comes directly from mmap.
685 if (ptr && allocator.FromPrimary(ptr))
686 REAL(memset)__interception::real_memset(ptr, 0, nmemb * size);
687 return ptr;
688 }
689
690 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
691 if (chunk_state == CHUNK_QUARANTINE)
692 ReportDoubleFree((uptr)ptr, stack);
693 else
694 ReportFreeNotMalloced((uptr)ptr, stack);
695 }
696
697 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
698 AllocatorCache *ac = GetAllocatorCache(ms);
699 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
700 allocator.SwallowCache(ac);
701 }
702
703 // -------------------------- Chunk lookup ----------------------
704
705 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
706 AsanChunk *GetAsanChunk(void *alloc_beg) {
707 if (!alloc_beg) return nullptr;
708 if (!allocator.FromPrimary(alloc_beg)) {
709 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
710 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
711 return m;
712 }
713 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
714 if (alloc_magic[0] == kAllocBegMagic)
715 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
716 return reinterpret_cast<AsanChunk *>(alloc_beg);
717 }
718
719 AsanChunk *GetAsanChunkByAddr(uptr p) {
720 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
721 return GetAsanChunk(alloc_beg);
722 }
723
724 // Allocator must be locked when this function is called.
725 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
726 void *alloc_beg =
727 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
728 return GetAsanChunk(alloc_beg);
729 }
730
731 uptr AllocationSize(uptr p) {
732 AsanChunk *m = GetAsanChunkByAddr(p);
733 if (!m) return 0;
734 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
735 if (m->Beg() != p) return 0;
736 return m->UsedSize();
737 }
738
739 AsanChunkView FindHeapChunkByAddress(uptr addr) {
740 AsanChunk *m1 = GetAsanChunkByAddr(addr);
741 if (!m1) return AsanChunkView(m1);
742 sptr offset = 0;
743 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
744 // The address is in the chunk's left redzone, so maybe it is actually
745 // a right buffer overflow from the other chunk to the left.
746 // Search a bit to the left to see if there is another chunk.
747 AsanChunk *m2 = nullptr;
748 for (uptr l = 1; l < GetPageSizeCached(); l++) {
749 m2 = GetAsanChunkByAddr(addr - l);
750 if (m2 == m1) continue; // Still the same chunk.
751 break;
752 }
753 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
754 m1 = ChooseChunk(addr, m2, m1);
755 }
756 return AsanChunkView(m1);
757 }
758
759 void Purge(BufferedStackTrace *stack) {
760 AsanThread *t = GetCurrentThread();
761 if (t) {
762 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
763 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
764 QuarantineCallback(GetAllocatorCache(ms),
765 stack));
766 }
767 {
768 SpinMutexLock l(&fallback_mutex);
769 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
770 QuarantineCallback(&fallback_allocator_cache,
771 stack));
772 }
773
774 allocator.ForceReleaseToOS();
775 }
776
777 void PrintStats() {
778 allocator.PrintStats();
779 quarantine.PrintStats();
780 }
781
782 void ForceLock() {
783 allocator.ForceLock();
784 fallback_mutex.Lock();
785 }
786
787 void ForceUnlock() {
788 fallback_mutex.Unlock();
789 allocator.ForceUnlock();
790 }
791};
792
793static Allocator instance(LINKER_INITIALIZED);
794
795static AsanAllocator &get_allocator() {
796 return instance.allocator;
797}
798
799bool AsanChunkView::IsValid() const {
800 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
801}
802bool AsanChunkView::IsAllocated() const {
803 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
804}
805bool AsanChunkView::IsQuarantined() const {
806 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
807}
808uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
809uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
810uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
811u32 AsanChunkView::UserRequestedAlignment() const {
812 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
813}
814uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
815uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
816AllocType AsanChunkView::GetAllocType() const {
817 return (AllocType)chunk_->alloc_type;
818}
819
820static StackTrace GetStackTraceFromId(u32 id) {
821 CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 821, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
822 StackTrace res = StackDepotGet(id);
823 CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 823, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
824 return res;
825}
826
827u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
828u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
829
830StackTrace AsanChunkView::GetAllocStack() const {
831 return GetStackTraceFromId(GetAllocStackId());
832}
833
834StackTrace AsanChunkView::GetFreeStack() const {
835 return GetStackTraceFromId(GetFreeStackId());
836}
837
838void InitializeAllocator(const AllocatorOptions &options) {
839 instance.InitLinkerInitialized(options);
840}
841
842void ReInitializeAllocator(const AllocatorOptions &options) {
843 instance.ReInitialize(options);
844}
845
846void GetAllocatorOptions(AllocatorOptions *options) {
847 instance.GetOptions(options);
848}
849
850AsanChunkView FindHeapChunkByAddress(uptr addr) {
851 return instance.FindHeapChunkByAddress(addr);
852}
853AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
854 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
855}
856
857void AsanThreadLocalMallocStorage::CommitBack() {
858 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { GetStackTrace
(&stack, GetMallocContextSize(), StackTrace::GetCurrentPc
(), (__sanitizer::uptr) __builtin_frame_address(0), 0, common_flags
()->fast_unwind_on_malloc); }
;
859 instance.CommitBack(this, &stack);
860}
861
862void PrintInternalAllocatorStats() {
863 instance.PrintStats();
864}
865
866void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
867 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
868}
869
870void asan_delete(void *ptr, uptr size, uptr alignment,
871 BufferedStackTrace *stack, AllocType alloc_type) {
872 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
873}
874
875void *asan_malloc(uptr size, BufferedStackTrace *stack) {
876 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
877}
878
879void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
880 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
881}
882
883void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
884 if (!p)
885 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
886 if (size == 0) {
887 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
888 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
889 return nullptr;
890 }
891 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
892 size = 1;
893 }
894 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
895}
896
897void *asan_valloc(uptr size, BufferedStackTrace *stack) {
898 return SetErrnoOnNull(
899 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
900}
901
902void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
903 uptr PageSize = GetPageSizeCached();
904 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
905 errno(*__errno_location()) = errno_ENOMEM12;
906 if (AllocatorMayReturnNull())
907 return nullptr;
908 ReportPvallocOverflow(size, stack);
909 }
910 // pvalloc(0) should allocate one page.
911 size = size ? RoundUpTo(size, PageSize) : PageSize;
912 return SetErrnoOnNull(
913 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
914}
915
916void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
917 AllocType alloc_type) {
918 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
919 errno(*__errno_location()) = errno_EINVAL22;
920 if (AllocatorMayReturnNull())
921 return nullptr;
922 ReportInvalidAllocationAlignment(alignment, stack);
923 }
924 return SetErrnoOnNull(
925 instance.Allocate(size, alignment, stack, alloc_type, true));
926}
927
928void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
929 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
930 errno(*__errno_location()) = errno_EINVAL22;
931 if (AllocatorMayReturnNull())
932 return nullptr;
933 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
934 }
935 return SetErrnoOnNull(
936 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
937}
938
939int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
940 BufferedStackTrace *stack) {
941 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
942 if (AllocatorMayReturnNull())
943 return errno_EINVAL22;
944 ReportInvalidPosixMemalignAlignment(alignment, stack);
945 }
946 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
947 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
948 // OOM error is already taken care of by Allocate.
949 return errno_ENOMEM12;
950 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 950, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
951 *memptr = ptr;
952 return 0;
953}
954
955uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
956 if (!ptr) return 0;
957 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
958 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
959 GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; GetStackTrace(&stack, kStackTraceMax
, pc, bp, 0, common_flags()->fast_unwind_on_fatal)
;
960 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
961 }
962 return usable_size;
963}
964
965uptr asan_mz_size(const void *ptr) {
966 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
967}
968
969void asan_mz_force_lock() {
970 instance.ForceLock();
971}
972
973void asan_mz_force_unlock() {
974 instance.ForceUnlock();
975}
976
977void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
978 instance.SetRssLimitExceeded(limit_exceeded);
979}
980
981} // namespace __asan
982
983// --- Implementation of LSan-specific functions --- {{{1
984namespace __lsan {
985void LockAllocator() {
986 __asan::get_allocator().ForceLock();
987}
988
989void UnlockAllocator() {
990 __asan::get_allocator().ForceUnlock();
991}
992
993void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
994 *begin = (uptr)&__asan::get_allocator();
995 *end = *begin + sizeof(__asan::get_allocator());
996}
997
998uptr PointsIntoChunk(void* p) {
999 uptr addr = reinterpret_cast<uptr>(p);
1000 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1001 if (!m) return 0;
1002 uptr chunk = m->Beg();
1003 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1004 return 0;
1005 if (m->AddrIsInside(addr, /*locked_version=*/true))
1006 return chunk;
1007 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1008 addr))
1009 return chunk;
1010 return 0;
1011}
1012
1013uptr GetUserBegin(uptr chunk) {
1014 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1015 CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 1015, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
1016 return m->Beg();
1017}
1018
1019LsanMetadata::LsanMetadata(uptr chunk) {
1020 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1021}
1022
1023bool LsanMetadata::allocated() const {
1024 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1025 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1026}
1027
1028ChunkTag LsanMetadata::tag() const {
1029 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1030 return static_cast<ChunkTag>(m->lsan_tag);
1031}
1032
1033void LsanMetadata::set_tag(ChunkTag value) {
1034 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1035 m->lsan_tag = value;
1036}
1037
1038uptr LsanMetadata::requested_size() const {
1039 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1040 return m->UsedSize(/*locked_version=*/true);
1041}
1042
1043u32 LsanMetadata::stack_trace_id() const {
1044 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1045 return m->alloc_context_id;
1046}
1047
1048void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1049 __asan::get_allocator().ForEachChunk(callback, arg);
1050}
1051
1052IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1053 uptr addr = reinterpret_cast<uptr>(p);
1054 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1055 if (!m) return kIgnoreObjectInvalid;
1056 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1057 if (m->lsan_tag == kIgnored)
1058 return kIgnoreObjectAlreadyIgnored;
1059 m->lsan_tag = __lsan::kIgnored;
1060 return kIgnoreObjectSuccess;
1061 } else {
1062 return kIgnoreObjectInvalid;
1063 }
1064}
1065} // namespace __lsan
1066
1067// ---------------------- Interface ---------------- {{{1
1068using namespace __asan; // NOLINT
1069
1070// ASan allocator doesn't reserve extra bytes, so normally we would
1071// just return "size". We don't want to expose our redzone sizes, etc here.
1072uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1073 return size;
1074}
1075
1076int __sanitizer_get_ownership(const void *p) {
1077 uptr ptr = reinterpret_cast<uptr>(p);
1078 return instance.AllocationSize(ptr) > 0;
1079}
1080
1081uptr __sanitizer_get_allocated_size(const void *p) {
1082 if (!p) return 0;
1083 uptr ptr = reinterpret_cast<uptr>(p);
1084 uptr allocated_size = instance.AllocationSize(ptr);
1085 // Die if p is not malloced or if it is already freed.
1086 if (allocated_size == 0) {
1087 GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack
.size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp
= (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer
[0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack
.trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address
(0); } } else { GetStackTrace(&stack, kStackTraceMax, StackTrace
::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), 0, common_flags()->fast_unwind_on_fatal); }
;
1088 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1089 }
1090 return allocated_size;
1091}
1092
1093void __sanitizer_purge_allocator() {
1094 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { GetStackTrace
(&stack, GetMallocContextSize(), StackTrace::GetCurrentPc
(), (__sanitizer::uptr) __builtin_frame_address(0), 0, common_flags
()->fast_unwind_on_malloc); }
;
1095 instance.Purge(&stack);
1096}
1097
1098#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1099// Provide default (no-op) implementation of malloc hooks.
1100SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
1101 void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
{
1102 (void)ptr;
1103 (void)size;
1104}
1105
1106SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *ptr)
{
1107 (void)ptr;
1108}
1109#endif

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// This class implements a complete memory allocator by using two
18// internal allocators:
19// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20// When allocating 2^x bytes it should return 2^x aligned chunk.
21// PrimaryAllocator is used via a local AllocatorCache.
22// SecondaryAllocator can allocate anything, but is not efficient.
23template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator> // NOLINT
25class CombinedAllocator {
26 public:
27 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
28 primary_.Init(release_to_os_interval_ms);
29 secondary_.InitLinkerInitialized();
30 stats_.InitLinkerInitialized();
31 }
32
33 void Init(s32 release_to_os_interval_ms) {
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.Init();
36 stats_.Init();
37 }
38
39 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
40 // Returning 0 on malloc(0) may break a lot of code.
41 if (size == 0)
16
Assuming 'size' is not equal to 0
17
Taking false branch
42 size = 1;
43 if (size + alignment < size) {
18
Assuming the condition is false
19
Taking false branch
44 Report("WARNING: %s: CombinedAllocator allocation overflow: "
45 "0x%zx bytes with 0x%zx alignment requested\n",
46 SanitizerToolName, size, alignment);
47 return nullptr;
48 }
49 uptr original_size = size;
50 // If alignment requirements are to be fulfilled by the frontend allocator
51 // rather than by the primary or secondary, passing an alignment lower than
52 // or equal to 8 will prevent any further rounding up, as well as the later
53 // alignment check.
54 if (alignment > 8)
20
Taking false branch
55 size = RoundUpTo(size, alignment);
56 // The primary allocator should return a 2^x aligned allocation when
57 // requested 2^x bytes, hence using the rounded up 'size' when being
58 // serviced by the primary (this is no longer true when the primary is
59 // using a non-fixed base address). The secondary takes care of the
60 // alignment without such requirement, and allocating 'size' would use
61 // extraneous memory, so we employ 'original_size'.
62 void *res;
63 if (primary_.CanAllocate(size, alignment))
21
Taking false branch
64 res = cache->Allocate(&primary_, primary_.ClassID(size));
65 else
66 res = secondary_.Allocate(&stats_, original_size, alignment);
22
Calling 'LargeMmapAllocator::Allocate'
38
Returning from 'LargeMmapAllocator::Allocate'
39
Value assigned to 'res'
67 if (alignment > 8)
40
Taking false branch
68 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 68, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
69 return res;
41
Returning pointer (loaded from 'res')
70 }
71
72 s32 ReleaseToOSIntervalMs() const {
73 return primary_.ReleaseToOSIntervalMs();
74 }
75
76 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
77 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
78 }
79
80 void ForceReleaseToOS() {
81 primary_.ForceReleaseToOS();
82 }
83
84 void Deallocate(AllocatorCache *cache, void *p) {
85 if (!p) return;
86 if (primary_.PointerIsMine(p))
87 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
88 else
89 secondary_.Deallocate(&stats_, p);
90 }
91
92 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
93 uptr alignment) {
94 if (!p)
95 return Allocate(cache, new_size, alignment);
96 if (!new_size) {
97 Deallocate(cache, p);
98 return nullptr;
99 }
100 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 100, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
101 uptr old_size = GetActuallyAllocatedSize(p);
102 uptr memcpy_size = Min(new_size, old_size);
103 void *new_p = Allocate(cache, new_size, alignment);
104 if (new_p)
105 internal_memcpy(new_p, p, memcpy_size);
106 Deallocate(cache, p);
107 return new_p;
108 }
109
110 bool PointerIsMine(void *p) {
111 if (primary_.PointerIsMine(p))
112 return true;
113 return secondary_.PointerIsMine(p);
114 }
115
116 bool FromPrimary(void *p) {
117 return primary_.PointerIsMine(p);
118 }
119
120 void *GetMetaData(const void *p) {
121 if (primary_.PointerIsMine(p))
122 return primary_.GetMetaData(p);
123 return secondary_.GetMetaData(p);
124 }
125
126 void *GetBlockBegin(const void *p) {
127 if (primary_.PointerIsMine(p))
128 return primary_.GetBlockBegin(p);
129 return secondary_.GetBlockBegin(p);
130 }
131
132 // This function does the same as GetBlockBegin, but is much faster.
133 // Must be called with the allocator locked.
134 void *GetBlockBeginFastLocked(void *p) {
135 if (primary_.PointerIsMine(p))
136 return primary_.GetBlockBegin(p);
137 return secondary_.GetBlockBeginFastLocked(p);
138 }
139
140 uptr GetActuallyAllocatedSize(void *p) {
141 if (primary_.PointerIsMine(p))
142 return primary_.GetActuallyAllocatedSize(p);
143 return secondary_.GetActuallyAllocatedSize(p);
144 }
145
146 uptr TotalMemoryUsed() {
147 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
148 }
149
150 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
151
152 void InitCache(AllocatorCache *cache) {
153 cache->Init(&stats_);
154 }
155
156 void DestroyCache(AllocatorCache *cache) {
157 cache->Destroy(&primary_, &stats_);
158 }
159
160 void SwallowCache(AllocatorCache *cache) {
161 cache->Drain(&primary_);
162 }
163
164 void GetStats(AllocatorStatCounters s) const {
165 stats_.Get(s);
166 }
167
168 void PrintStats() {
169 primary_.PrintStats();
170 secondary_.PrintStats();
171 }
172
173 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
174 // introspection API.
175 void ForceLock() {
176 primary_.ForceLock();
177 secondary_.ForceLock();
178 }
179
180 void ForceUnlock() {
181 secondary_.ForceUnlock();
182 primary_.ForceUnlock();
183 }
184
185 // Iterate over all existing chunks.
186 // The allocator must be locked when calling this function.
187 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
188 primary_.ForEachChunk(callback, arg);
189 secondary_.ForEachChunk(callback, arg);
190 }
191
192 private:
193 PrimaryAllocator primary_;
194 SecondaryAllocator secondary_;
195 AllocatorGlobalStats stats_;
196};
197

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h

1//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
18// allocated chunks. To be used in memory constrained or not memory hungry cases
19// (currently, 32 bits and internal allocator).
20class LargeMmapAllocatorPtrArrayStatic {
21 public:
22 INLINEinline void *Init() { return &p_[0]; }
23 INLINEinline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((n)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kMaxNumChunks)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 23, "(" "(n)" ") " "<" " (" "(kMaxNumChunks)" ")", v1, v2
); } while (false)
; }
24 private:
25 static const int kMaxNumChunks = 1 << 15;
26 uptr p_[kMaxNumChunks];
27};
28
29// Much less restricted LargeMmapAllocator chunks list (comparing to
30// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
31// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
32// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
33class LargeMmapAllocatorPtrArrayDynamic {
34 public:
35 INLINEinline void *Init() {
36 uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
37 SecondaryAllocatorName);
38 CHECK(p)do { __sanitizer::u64 v1 = (__sanitizer::u64)((p)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 38, "(" "(p)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
39 return reinterpret_cast<void*>(p);
40 }
41
42 INLINEinline void EnsureSpace(uptr n) {
43 CHECK_LT(n, kMaxNumChunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((n)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kMaxNumChunks)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 43, "(" "(n)" ") " "<" " (" "(kMaxNumChunks)" ")", v1, v2
); } while (false)
;
44 DCHECK(n <= n_reserved_);
45 if (UNLIKELY(n == n_reserved_)__builtin_expect(!!(n == n_reserved_), 0)) {
46 address_range_.MapOrDie(
47 reinterpret_cast<uptr>(address_range_.base()) +
48 n_reserved_ * sizeof(uptr),
49 kChunksBlockCount * sizeof(uptr));
50 n_reserved_ += kChunksBlockCount;
51 }
52 }
53
54 private:
55 static const int kMaxNumChunks = 1 << 20;
56 static const int kChunksBlockCount = 1 << 14;
57 ReservedAddressRange address_range_;
58 uptr n_reserved_;
59};
60
61#if SANITIZER_WORDSIZE64 == 32
62typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
63#else
64typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
65#endif
66
67// This class can (de)allocate only large chunks of memory using mmap/unmap.
68// The main purpose of this allocator is to cover large and rare allocation
69// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
70template <class MapUnmapCallback = NoOpMapUnmapCallback,
71 class PtrArrayT = DefaultLargeMmapAllocatorPtrArray>
72class LargeMmapAllocator {
73 public:
74 void InitLinkerInitialized() {
75 page_size_ = GetPageSizeCached();
76 chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
77 }
78
79 void Init() {
80 internal_memset(this, 0, sizeof(*this));
81 InitLinkerInitialized();
82 }
83
84 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
85 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 85, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")",
v1, v2); } while (false)
;
23
Within the expansion of the macro 'CHECK':
86 uptr map_size = RoundUpMapSize(size);
87 if (alignment > page_size_)
24
Assuming the condition is false
25
Taking false branch
88 map_size += alignment;
89 // Overflow.
90 if (map_size < size) {
26
Assuming 'map_size' is >= 'size'
27
Taking false branch
91 Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
92 "0x%zx bytes with 0x%zx alignment requested\n",
93 SanitizerToolName, map_size, alignment);
94 return nullptr;
95 }
96 uptr map_beg = reinterpret_cast<uptr>(
97 MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
98 if (!map_beg)
28
Assuming 'map_beg' is not equal to 0
29
Taking false branch
99 return nullptr;
100 CHECK(IsAligned(map_beg, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(map_beg
, page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 100, "(" "(IsAligned(map_beg, page_size_))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
30
Within the expansion of the macro 'CHECK':
101 MapUnmapCallback().OnMap(map_beg, map_size);
102 uptr map_end = map_beg + map_size;
103 uptr res = map_beg + page_size_;
104 if (res & (alignment - 1)) // Align.
31
Assuming the condition is false
32
Taking false branch
105 res += alignment - (res & (alignment - 1));
106 CHECK(IsAligned(res, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 106, "(" "(IsAligned(res, alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
33
Within the expansion of the macro 'CHECK':
107 CHECK(IsAligned(res, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 107, "(" "(IsAligned(res, page_size_))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
34
Within the expansion of the macro 'CHECK':
108 CHECK_GE(res + size, map_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res + size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((map_beg)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 108, "(" "(res + size)" ") " ">=" " (" "(map_beg)" ")", v1
, v2); } while (false)
;
35
Within the expansion of the macro 'CHECK_GE':
a
Assuming 'v1' is >= 'v2'
109 CHECK_LE(res + size, map_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res + size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((map_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 109, "(" "(res + size)" ") " "<=" " (" "(map_end)" ")", v1
, v2); } while (false)
;
36
Within the expansion of the macro 'CHECK_LE':
a
Assuming 'v1' is <= 'v2'
110 Header *h = GetHeader(res);
111 h->size = size;
112 h->map_beg = map_beg;
113 h->map_size = map_size;
114 uptr size_log = MostSignificantSetBitIndex(map_size);
115 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log))do { __sanitizer::u64 v1 = (__sanitizer::u64)((size_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)(((sizeof(stats.by_size_log)/sizeof
((stats.by_size_log)[0])))); if (__builtin_expect(!!(!(v1 <
v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 115, "(" "(size_log)" ") " "<" " (" "((sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])))"
")", v1, v2); } while (false)
;
37
Within the expansion of the macro 'CHECK_LT':
a
Assuming 'v1' is < 'v2'
116 {
117 SpinMutexLock l(&mutex_);
118 ptr_array_.EnsureSpace(n_chunks_);
119 uptr idx = n_chunks_++;
120 h->chunk_idx = idx;
121 chunks_[idx] = h;
122 chunks_sorted_ = false;
123 stats.n_allocs++;
124 stats.currently_allocated += map_size;
125 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
126 stats.by_size_log[size_log]++;
127 stat->Add(AllocatorStatAllocated, map_size);
128 stat->Add(AllocatorStatMapped, map_size);
129 }
130 return reinterpret_cast<void*>(res);
131 }
132
133 void Deallocate(AllocatorStats *stat, void *p) {
134 Header *h = GetHeader(p);
135 {
136 SpinMutexLock l(&mutex_);
137 uptr idx = h->chunk_idx;
138 CHECK_EQ(chunks_[idx], h)do { __sanitizer::u64 v1 = (__sanitizer::u64)((chunks_[idx]))
; __sanitizer::u64 v2 = (__sanitizer::u64)((h)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 138, "(" "(chunks_[idx])" ") " "==" " (" "(h)" ")", v1, v2)
; } while (false)
;
139 CHECK_LT(idx, n_chunks_)do { __sanitizer::u64 v1 = (__sanitizer::u64)((idx)); __sanitizer
::u64 v2 = (__sanitizer::u64)((n_chunks_)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 139, "(" "(idx)" ") " "<" " (" "(n_chunks_)" ")", v1, v2
); } while (false)
;
140 chunks_[idx] = chunks_[--n_chunks_];
141 chunks_[idx]->chunk_idx = idx;
142 chunks_sorted_ = false;
143 stats.n_frees++;
144 stats.currently_allocated -= h->map_size;
145 stat->Sub(AllocatorStatAllocated, h->map_size);
146 stat->Sub(AllocatorStatMapped, h->map_size);
147 }
148 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
149 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
150 }
151
152 uptr TotalMemoryUsed() {
153 SpinMutexLock l(&mutex_);
154 uptr res = 0;
155 for (uptr i = 0; i < n_chunks_; i++) {
156 Header *h = chunks_[i];
157 CHECK_EQ(h->chunk_idx, i)do { __sanitizer::u64 v1 = (__sanitizer::u64)((h->chunk_idx
)); __sanitizer::u64 v2 = (__sanitizer::u64)((i)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 157, "(" "(h->chunk_idx)" ") " "==" " (" "(i)" ")", v1, v2
); } while (false)
;
158 res += RoundUpMapSize(h->size);
159 }
160 return res;
161 }
162
163 bool PointerIsMine(const void *p) {
164 return GetBlockBegin(p) != nullptr;
165 }
166
167 uptr GetActuallyAllocatedSize(void *p) {
168 return RoundUpTo(GetHeader(p)->size, page_size_);
169 }
170
171 // At least page_size_/2 metadata bytes is available.
172 void *GetMetaData(const void *p) {
173 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
174 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
175 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
176 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(reinterpret_cast
<uptr>(p), page_size_))); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 176, "(" "(IsAligned(reinterpret_cast<uptr>(p), page_size_))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
177 }
178 return GetHeader(p) + 1;
179 }
180
181 void *GetBlockBegin(const void *ptr) {
182 uptr p = reinterpret_cast<uptr>(ptr);
183 SpinMutexLock l(&mutex_);
184 uptr nearest_chunk = 0;
185 // Cache-friendly linear search.
186 for (uptr i = 0; i < n_chunks_; i++) {
187 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
188 if (p < ch) continue; // p is at left to this chunk, skip it.
189 if (p - ch < p - nearest_chunk)
190 nearest_chunk = ch;
191 }
192 if (!nearest_chunk)
193 return nullptr;
194 Header *h = reinterpret_cast<Header *>(nearest_chunk);
195 CHECK_GE(nearest_chunk, h->map_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((h->map_beg));
if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 195, "(" "(nearest_chunk)" ") " ">=" " (" "(h->map_beg)"
")", v1, v2); } while (false)
;
196 CHECK_LT(nearest_chunk, h->map_beg + h->map_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((h->map_beg + h
->map_size)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 196, "(" "(nearest_chunk)" ") " "<" " (" "(h->map_beg + h->map_size)"
")", v1, v2); } while (false)
;
197 CHECK_LE(nearest_chunk, p)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((p)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 197, "(" "(nearest_chunk)" ") " "<=" " (" "(p)" ")", v1,
v2); } while (false)
;
198 if (h->map_beg + h->map_size <= p)
199 return nullptr;
200 return GetUser(h);
201 }
202
203 void EnsureSortedChunks() {
204 if (chunks_sorted_) return;
205 Sort(reinterpret_cast<uptr *>(chunks_), n_chunks_);
206 for (uptr i = 0; i < n_chunks_; i++)
207 chunks_[i]->chunk_idx = i;
208 chunks_sorted_ = true;
209 }
210
211 // This function does the same as GetBlockBegin, but is much faster.
212 // Must be called with the allocator locked.
213 void *GetBlockBeginFastLocked(void *ptr) {
214 mutex_.CheckLocked();
215 uptr p = reinterpret_cast<uptr>(ptr);
216 uptr n = n_chunks_;
217 if (!n) return nullptr;
218 EnsureSortedChunks();
219 auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
220 auto max_mmap_ =
221 reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
222 if (p < min_mmap_ || p >= max_mmap_)
223 return nullptr;
224 uptr beg = 0, end = n - 1;
225 // This loop is a log(n) lower_bound. It does not check for the exact match
226 // to avoid expensive cache-thrashing loads.
227 while (end - beg >= 2) {
228 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
229 if (p < reinterpret_cast<uptr>(chunks_[mid]))
230 end = mid - 1; // We are not interested in chunks_[mid].
231 else
232 beg = mid; // chunks_[mid] may still be what we want.
233 }
234
235 if (beg < end) {
236 CHECK_EQ(beg + 1, end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg + 1)); __sanitizer
::u64 v2 = (__sanitizer::u64)((end)); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 236, "(" "(beg + 1)" ") " "==" " (" "(end)" ")", v1, v2); }
while (false)
;
237 // There are 2 chunks left, choose one.
238 if (p >= reinterpret_cast<uptr>(chunks_[end]))
239 beg = end;
240 }
241
242 Header *h = chunks_[beg];
243 if (h->map_beg + h->map_size <= p || p < h->map_beg)
244 return nullptr;
245 return GetUser(h);
246 }
247
248 void PrintStats() {
249 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
250 "remains %zd (%zd K) max %zd M; by size logs: ",
251 stats.n_allocs, stats.n_allocs - stats.n_frees,
252 stats.currently_allocated >> 10, stats.max_allocated >> 20);
253 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log)(sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])); i++) {
254 uptr c = stats.by_size_log[i];
255 if (!c) continue;
256 Printf("%zd:%zd; ", i, c);
257 }
258 Printf("\n");
259 }
260
261 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
262 // introspection API.
263 void ForceLock() {
264 mutex_.Lock();
265 }
266
267 void ForceUnlock() {
268 mutex_.Unlock();
269 }
270
271 // Iterate over all existing chunks.
272 // The allocator must be locked when calling this function.
273 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
274 EnsureSortedChunks(); // Avoid doing the sort while iterating.
275 for (uptr i = 0; i < n_chunks_; i++) {
276 auto t = chunks_[i];
277 callback(reinterpret_cast<uptr>(GetUser(t)), arg);
278 // Consistency check: verify that the array did not change.
279 CHECK_EQ(chunks_[i], t)do { __sanitizer::u64 v1 = (__sanitizer::u64)((chunks_[i])); __sanitizer
::u64 v2 = (__sanitizer::u64)((t)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 279, "(" "(chunks_[i])" ") " "==" " (" "(t)" ")", v1, v2); }
while (false)
;
280 CHECK_EQ(chunks_[i]->chunk_idx, i)do { __sanitizer::u64 v1 = (__sanitizer::u64)((chunks_[i]->
chunk_idx)); __sanitizer::u64 v2 = (__sanitizer::u64)((i)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 280, "(" "(chunks_[i]->chunk_idx)" ") " "==" " (" "(i)" ")"
, v1, v2); } while (false)
;
281 }
282 }
283
284 private:
285 struct Header {
286 uptr map_beg;
287 uptr map_size;
288 uptr size;
289 uptr chunk_idx;
290 };
291
292 Header *GetHeader(uptr p) {
293 CHECK(IsAligned(p, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(p, page_size_
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 293, "(" "(IsAligned(p, page_size_))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
294 return reinterpret_cast<Header*>(p - page_size_);
295 }
296 Header *GetHeader(const void *p) {
297 return GetHeader(reinterpret_cast<uptr>(p));
298 }
299
300 void *GetUser(Header *h) {
301 CHECK(IsAligned((uptr)h, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)h, page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0
); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 301, "(" "(IsAligned((uptr)h, page_size_))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
302 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
303 }
304
305 uptr RoundUpMapSize(uptr size) {
306 return RoundUpTo(size, page_size_) + page_size_;
307 }
308
309 uptr page_size_;
310 Header **chunks_;
311 PtrArrayT ptr_array_;
312 uptr n_chunks_;
313 bool chunks_sorted_;
314 struct Stats {
315 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
316 } stats;
317 StaticSpinMutex mutex_;
318};
319