Bug Summary

File:projects/compiler-rt/lib/asan/asan_allocator.cc
Warning:line 493, column 46
Array access (from variable 'alloc_beg') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name asan_allocator.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu i686 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn362543/include -I /build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/projects/compiler-rt/lib/asan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn362543=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-06-05-060531-1271-1 -x c++ /build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc -faddrsig

/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc

1//===-- asan_allocator.cc -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Implementation of ASan's memory allocator, 2-nd version.
12// This variant uses the allocator from sanitizer_common, i.e. the one shared
13// with ThreadSanitizer and MemorySanitizer.
14//
15//===----------------------------------------------------------------------===//
16
17#include "asan_allocator.h"
18#include "asan_mapping.h"
19#include "asan_poisoning.h"
20#include "asan_report.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "sanitizer_common/sanitizer_allocator_checks.h"
24#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_errno.h"
26#include "sanitizer_common/sanitizer_flags.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29#include "sanitizer_common/sanitizer_stackdepot.h"
30#include "sanitizer_common/sanitizer_quarantine.h"
31#include "lsan/lsan_common.h"
32
33namespace __asan {
34
35// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
36// We use adaptive redzones: for larger allocation larger redzones are used.
37static u32 RZLog2Size(u32 rz_log) {
38 CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(!
(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 38, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while
(false)
;
39 return 16 << rz_log;
40}
41
42static u32 RZSize2Log(u32 rz_size) {
43 CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!(
!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 43, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); }
while (false)
;
44 CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(!
!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 44, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2)
; } while (false)
;
45 CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 45, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1
, v2); } while (false)
;
46 u32 res = Log2(rz_size) - 4;
47 CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 47, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")",
v1, v2); } while (false)
;
48 return res;
49}
50
51static AsanAllocator &get_allocator();
52
53// The memory chunk allocated from the underlying allocator looks like this:
54// L L L L L L H H U U U U U U R R
55// L -- left redzone words (0 or more bytes)
56// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57// U -- user memory.
58// R -- right redzone (0 or more bytes)
59// ChunkBase consists of ChunkHeader and other bytes that overlap with user
60// memory.
61
62// If the left redzone is greater than the ChunkHeader size we store a magic
63// value in the first uptr word of the memory block and store the address of
64// ChunkBase in the next uptr.
65// M B L L L L L L L L L H H U U U U U U
66// | ^
67// ---------------------|
68// M -- magic value kAllocBegMagic
69// B -- address of ChunkHeader pointing to the first 'H'
70static const uptr kAllocBegMagic = 0xCC6E96B9;
71
72struct ChunkHeader {
73 // 1-st 8 bytes.
74 u32 chunk_state : 8; // Must be first.
75 u32 alloc_tid : 24;
76
77 u32 free_tid : 24;
78 u32 from_memalign : 1;
79 u32 alloc_type : 2;
80 u32 rz_log : 3;
81 u32 lsan_tag : 2;
82 // 2-nd 8 bytes
83 // This field is used for small sizes. For large sizes it is equal to
84 // SizeClassMap::kMaxSize and the actual size is stored in the
85 // SecondaryAllocator's metadata.
86 u32 user_requested_size : 29;
87 // align < 8 -> 0
88 // else -> log2(min(align, 512)) - 2
89 u32 user_requested_alignment_log : 3;
90 u32 alloc_context_id;
91};
92
93struct ChunkBase : ChunkHeader {
94 // Header2, intersects with user memory.
95 u32 free_context_id;
96};
97
98static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
99static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
100COMPILER_CHECK(kChunkHeaderSize == 16)typedef char assertion_failed__100[2*(int)(kChunkHeaderSize ==
16)-1]
;
101COMPILER_CHECK(kChunkHeader2Size <= 16)typedef char assertion_failed__101[2*(int)(kChunkHeader2Size <=
16)-1]
;
102
103// Every chunk of memory allocated by this allocator can be in one of 3 states:
104// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107enum {
108 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
109 CHUNK_ALLOCATED = 2,
110 CHUNK_QUARANTINE = 3
111};
112
113struct AsanChunk: ChunkBase {
114 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
115 uptr UsedSize(bool locked_version = false) {
116 if (user_requested_size != SizeClassMap::kMaxSize)
117 return user_requested_size;
118 return *reinterpret_cast<uptr *>(
119 get_allocator().GetMetaData(AllocBeg(locked_version)));
120 }
121 void *AllocBeg(bool locked_version = false) {
122 if (from_memalign) {
123 if (locked_version)
124 return get_allocator().GetBlockBeginFastLocked(
125 reinterpret_cast<void *>(this));
126 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
127 }
128 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
129 }
130 bool AddrIsInside(uptr addr, bool locked_version = false) {
131 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
132 }
133};
134
135struct QuarantineCallback {
136 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
137 : cache_(cache),
138 stack_(stack) {
139 }
140
141 void Recycle(AsanChunk *m) {
142 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 142, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
143 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
144 CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 144, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
145 CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 145, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
146 PoisonShadow(m->Beg(),
147 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
148 kAsanHeapLeftRedzoneMagic);
149 void *p = reinterpret_cast<void *>(m->AllocBeg());
150 if (p != m) {
151 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
152 CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)
); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 152, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)"
")", v1, v2); } while (false)
;
153 // Clear the magic value, as allocator internals may overwrite the
154 // contents of deallocated chunk, confusing GetAsanChunk lookup.
155 alloc_magic[0] = 0;
156 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast
<uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 156, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))"
")", v1, v2); } while (false)
;
157 }
158
159 // Statistics.
160 AsanStats &thread_stats = GetCurrentThreadStats();
161 thread_stats.real_frees++;
162 thread_stats.really_freed += m->UsedSize();
163
164 get_allocator().Deallocate(cache_, p);
165 }
166
167 void *Allocate(uptr size) {
168 void *res = get_allocator().Allocate(cache_, size, 1);
169 // TODO(alekseys): Consider making quarantine OOM-friendly.
170 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
171 ReportOutOfMemory(size, stack_);
172 return res;
173 }
174
175 void Deallocate(void *p) {
176 get_allocator().Deallocate(cache_, p);
177 }
178
179 private:
180 AllocatorCache* const cache_;
181 BufferedStackTrace* const stack_;
182};
183
184typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
185typedef AsanQuarantine::Cache QuarantineCache;
186
187void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
188 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
189 // Statistics.
190 AsanStats &thread_stats = GetCurrentThreadStats();
191 thread_stats.mmaps++;
192 thread_stats.mmaped += size;
193}
194void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
195 PoisonShadow(p, size, 0);
196 // We are about to unmap a chunk of user memory.
197 // Mark the corresponding shadow memory as not needed.
198 FlushUnneededASanShadowMemory(p, size);
199 // Statistics.
200 AsanStats &thread_stats = GetCurrentThreadStats();
201 thread_stats.munmaps++;
202 thread_stats.munmaped += size;
203}
204
205// We can not use THREADLOCAL because it is not supported on some of the
206// platforms we care about (OSX 10.6, Android).
207// static THREADLOCAL AllocatorCache cache;
208AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
209 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 209, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
210 return &ms->allocator_cache;
211}
212
213QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
214 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 214, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
215 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache
))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms->
quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 215, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))"
")", v1, v2); } while (false)
;
216 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
217}
218
219void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
220 quarantine_size_mb = f->quarantine_size_mb;
221 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
222 min_redzone = f->redzone;
223 max_redzone = f->max_redzone;
224 may_return_null = cf->allocator_may_return_null;
225 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
226 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
227}
228
229void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
230 f->quarantine_size_mb = quarantine_size_mb;
231 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
232 f->redzone = min_redzone;
233 f->max_redzone = max_redzone;
234 cf->allocator_may_return_null = may_return_null;
235 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
236 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
237}
238
239struct Allocator {
240 static const uptr kMaxAllowedMallocSize =
241 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(3UL << 30);
242
243 AsanAllocator allocator;
244 AsanQuarantine quarantine;
245 StaticSpinMutex fallback_mutex;
246 AllocatorCache fallback_allocator_cache;
247 QuarantineCache fallback_quarantine_cache;
248
249 atomic_uint8_t rss_limit_exceeded;
250
251 // ------------------- Options --------------------------
252 atomic_uint16_t min_redzone;
253 atomic_uint16_t max_redzone;
254 atomic_uint8_t alloc_dealloc_mismatch;
255
256 // ------------------- Initialization ------------------------
257 explicit Allocator(LinkerInitialized)
258 : quarantine(LINKER_INITIALIZED),
259 fallback_quarantine_cache(LINKER_INITIALIZED) {}
260
261 void CheckOptions(const AllocatorOptions &options) const {
262 CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 262, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")"
, v1, v2); } while (false)
;
263 CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone
)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 263, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)"
")", v1, v2); } while (false)
;
264 CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 264, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)"
")", v1, v2); } while (false)
;
265 CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 265, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
266 CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 266, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
267 }
268
269 void SharedInitCode(const AllocatorOptions &options) {
270 CheckOptions(options);
271 quarantine.Init((uptr)options.quarantine_size_mb << 20,
272 (uptr)options.thread_local_quarantine_size_kb << 10);
273 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
274 memory_order_release);
275 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
276 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
277 }
278
279 void InitLinkerInitialized(const AllocatorOptions &options) {
280 SetAllocatorMayReturnNull(options.may_return_null);
281 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
282 SharedInitCode(options);
283 }
284
285 bool RssLimitExceeded() {
286 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
287 }
288
289 void SetRssLimitExceeded(bool limit_exceeded) {
290 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
291 }
292
293 void RePoisonChunk(uptr chunk) {
294 // This could be a user-facing chunk (with redzones), or some internal
295 // housekeeping chunk, like TransferBatch. Start by assuming the former.
296 AsanChunk *ac = GetAsanChunk((void *)chunk);
297 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
298 uptr beg = ac->Beg();
299 uptr end = ac->Beg() + ac->UsedSize(true);
300 uptr chunk_end = chunk + allocated_size;
301 if (chunk < beg && beg < end && end <= chunk_end &&
302 ac->chunk_state == CHUNK_ALLOCATED) {
303 // Looks like a valid AsanChunk in use, poison redzones only.
304 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
305 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
306 FastPoisonShadowPartialRightRedzone(
307 end_aligned_down, end - end_aligned_down,
308 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
309 } else {
310 // This is either not an AsanChunk or freed or quarantined AsanChunk.
311 // In either case, poison everything.
312 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
313 }
314 }
315
316 void ReInitialize(const AllocatorOptions &options) {
317 SetAllocatorMayReturnNull(options.may_return_null);
318 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
319 SharedInitCode(options);
320
321 // Poison all existing allocation's redzones.
322 if (CanPoisonMemory()) {
323 allocator.ForceLock();
324 allocator.ForEachChunk(
325 [](uptr chunk, void *alloc) {
326 ((Allocator *)alloc)->RePoisonChunk(chunk);
327 },
328 this);
329 allocator.ForceUnlock();
330 }
331 }
332
333 void GetOptions(AllocatorOptions *options) const {
334 options->quarantine_size_mb = quarantine.GetSize() >> 20;
335 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
336 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
337 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
338 options->may_return_null = AllocatorMayReturnNull();
339 options->alloc_dealloc_mismatch =
340 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
341 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
342 }
343
344 // -------------------- Helper methods. -------------------------
345 uptr ComputeRZLog(uptr user_requested_size) {
346 u32 rz_log =
347 user_requested_size <= 64 - 16 ? 0 :
348 user_requested_size <= 128 - 32 ? 1 :
349 user_requested_size <= 512 - 64 ? 2 :
350 user_requested_size <= 4096 - 128 ? 3 :
351 user_requested_size <= (1 << 14) - 256 ? 4 :
352 user_requested_size <= (1 << 15) - 512 ? 5 :
353 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
354 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
355 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
356 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
357 }
358
359 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
360 if (user_requested_alignment < 8)
361 return 0;
362 if (user_requested_alignment > 512)
363 user_requested_alignment = 512;
364 return Log2(user_requested_alignment) - 2;
365 }
366
367 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
368 if (user_requested_alignment_log == 0)
369 return 0;
370 return 1LL << (user_requested_alignment_log + 2);
371 }
372
373 // We have an address between two chunks, and we want to report just one.
374 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
375 AsanChunk *right_chunk) {
376 // Prefer an allocated chunk over freed chunk and freed chunk
377 // over available chunk.
378 if (left_chunk->chunk_state != right_chunk->chunk_state) {
379 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
380 return left_chunk;
381 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
382 return right_chunk;
383 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
384 return left_chunk;
385 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
386 return right_chunk;
387 }
388 // Same chunk_state: choose based on offset.
389 sptr l_offset = 0, r_offset = 0;
390 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 390, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
391 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 391, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
392 if (l_offset < r_offset)
393 return left_chunk;
394 return right_chunk;
395 }
396
397 // -------------------- Allocation/Deallocation routines ---------------
398 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
399 AllocType alloc_type, bool can_fill) {
400 if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0))
1
Assuming 'asan_inited' is not equal to 0
2
Taking false branch
401 AsanInitFromRtl();
402 if (RssLimitExceeded()) {
3
Assuming the condition is false
4
Taking false branch
403 if (AllocatorMayReturnNull())
404 return nullptr;
405 ReportRssLimitExceeded(stack);
406 }
407 Flags &fl = *flags();
408 CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 408, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
5
Assuming 'v1' is not equal to 'v2'
6
Taking false branch
7
Loop condition is false. Exiting loop
409 const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale);
410 const uptr user_requested_alignment_log =
411 ComputeUserRequestedAlignmentLog(alignment);
412 if (alignment < min_alignment)
8
Taking false branch
413 alignment = min_alignment;
414 if (size == 0) {
9
Assuming 'size' is not equal to 0
10
Taking false branch
415 // We'd be happy to avoid allocating memory for zero-size requests, but
416 // some programs/tests depend on this behavior and assume that malloc
417 // would not return NULL even for zero-size allocations. Moreover, it
418 // looks like operator new should never return NULL, and results of
419 // consecutive "new" calls must be different even if the allocated size
420 // is zero.
421 size = 1;
422 }
423 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 423, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
11
Taking false branch
12
Loop condition is false. Exiting loop
424 uptr rz_log = ComputeRZLog(size);
425 uptr rz_size = RZLog2Size(rz_log);
426 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
427 uptr needed_size = rounded_size + rz_size;
428 if (alignment > min_alignment)
13
Assuming 'alignment' is <= 'min_alignment'
14
Taking false branch
429 needed_size += alignment;
430 bool using_primary_allocator = true;
431 // If we are allocating from the secondary allocator, there will be no
432 // automatic right redzone, so add the right redzone manually.
433 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
15
Taking true branch
434 needed_size += rz_size;
435 using_primary_allocator = false;
436 }
437 CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size
, min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 437, "(" "(IsAligned(needed_size, min_alignment))" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
16
Taking false branch
17
Loop condition is false. Exiting loop
438 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
18
Assuming 'size' is <= 'kMaxAllowedMallocSize'
19
Assuming 'needed_size' is <= 'kMaxAllowedMallocSize'
20
Taking false branch
439 if (AllocatorMayReturnNull()) {
440 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
441 (void*)size);
442 return nullptr;
443 }
444 ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
445 stack);
446 }
447
448 AsanThread *t = GetCurrentThread();
449 void *allocated;
450 if (t) {
21
Assuming 't' is non-null
22
Taking true branch
451 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
452 allocated = allocator.Allocate(cache, needed_size, 8);
23
Calling 'CombinedAllocator::Allocate'
60
Returning from 'CombinedAllocator::Allocate'
61
Value assigned to 'allocated'
453 } else {
454 SpinMutexLock l(&fallback_mutex);
455 AllocatorCache *cache = &fallback_allocator_cache;
456 allocated = allocator.Allocate(cache, needed_size, 8);
457 }
458 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
62
Assuming 'allocated' is null
63
Taking true branch
459 SetAllocatorOutOfMemory();
460 if (AllocatorMayReturnNull())
64
Assuming the condition is false
65
Taking false branch
461 return nullptr;
462 ReportOutOfMemory(size, stack);
463 }
464
465 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShadowOffset32
))
== 0
&& CanPoisonMemory()) {
66
Assuming the condition is false
466 // Heap poisoning is enabled, but the allocator provides an unpoisoned
467 // chunk. This is possible if CanPoisonMemory() was false for some
468 // time, for example, due to flags()->start_disabled.
469 // Anyway, poison the block before using it for anything else.
470 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
471 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
472 }
473
474 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
67
'alloc_beg' initialized to 0
475 uptr alloc_end = alloc_beg + needed_size;
476 uptr beg_plus_redzone = alloc_beg + rz_size;
477 uptr user_beg = beg_plus_redzone;
478 if (!IsAligned(user_beg, alignment))
68
Taking false branch
479 user_beg = RoundUpTo(user_beg, alignment);
480 uptr user_end = user_beg + size;
481 CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer
::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 481, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1
, v2); } while (false)
;
69
Assuming 'v1' is <= 'v2'
70
Taking false branch
71
Loop condition is false. Exiting loop
482 uptr chunk_beg = user_beg - kChunkHeaderSize;
483 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
484 m->alloc_type = alloc_type;
485 m->rz_log = rz_log;
486 u32 alloc_tid = t ? t->tid() : 0;
72
'?' condition is true
487 m->alloc_tid = alloc_tid;
488 CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 488, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")"
, v1, v2); } while (false)
; // Does alloc_tid fit into the bitfield?
73
Taking false branch
74
Loop condition is false. Exiting loop
489 m->free_tid = kInvalidTid;
490 m->from_memalign = user_beg != beg_plus_redzone;
491 if (alloc_beg != chunk_beg) {
75
Assuming 'alloc_beg' is not equal to 'chunk_beg'
76
Taking true branch
492 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 *
sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 492, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)"
")", v1, v2); } while (false)
;
77
Assuming 'v1' is <= 'v2'
78
Taking false branch
79
Loop condition is false. Exiting loop
493 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
80
Array access (from variable 'alloc_beg') results in a null pointer dereference
494 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
495 }
496 if (using_primary_allocator) {
497 CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 497, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
498 m->user_requested_size = size;
499 CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 499, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
500 } else {
501 CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 501, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
502 m->user_requested_size = SizeClassMap::kMaxSize;
503 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
504 meta[0] = size;
505 meta[1] = chunk_beg;
506 }
507 m->user_requested_alignment_log = user_requested_alignment_log;
508
509 m->alloc_context_id = StackDepotPut(*stack);
510
511 uptr size_rounded_down_to_granularity =
512 RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
513 // Unpoison the bulk of the memory region.
514 if (size_rounded_down_to_granularity)
515 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
516 // Deal with the end of the region if size is not aligned to granularity.
517 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
518 u8 *shadow =
519 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
520 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0;
521 }
522
523 AsanStats &thread_stats = GetCurrentThreadStats();
524 thread_stats.mallocs++;
525 thread_stats.malloced += size;
526 thread_stats.malloced_redzones += needed_size - size;
527 if (needed_size > SizeClassMap::kMaxSize)
528 thread_stats.malloc_large++;
529 else
530 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
531
532 void *res = reinterpret_cast<void *>(user_beg);
533 if (can_fill && fl.max_malloc_fill_size) {
534 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
535 REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size);
536 }
537#if CAN_SANITIZE_LEAKS1
538 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
539 : __lsan::kDirectlyLeaked;
540#endif
541 // Must be the last mutation of metadata in this function.
542 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
543 ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook
(res, size); RunMallocHooks(res, size); } while (false)
;
544 return res;
545 }
546
547 // Set quarantine flag if chunk is allocated, issue ASan error report on
548 // available and quarantined chunks. Return true on success, false otherwise.
549 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
550 BufferedStackTrace *stack) {
551 u8 old_chunk_state = CHUNK_ALLOCATED;
552 // Flip the chunk_state atomically to avoid race on double-free.
553 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
554 CHUNK_QUARANTINE,
555 memory_order_acquire)) {
556 ReportInvalidFree(ptr, old_chunk_state, stack);
557 // It's not safe to push a chunk in quarantine on invalid free.
558 return false;
559 }
560 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED
)); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 560, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)"
")", v1, v2); } while (false)
;
561 return true;
562 }
563
564 // Expects the chunk to already be marked as quarantined by using
565 // AtomicallySetQuarantineFlagIfAllocated.
566 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
567 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 567, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
568 CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 568, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1
, v2); } while (false)
;
569 if (SANITIZER_WORDSIZE32 == 64) // On 32-bits this resides in user area.
570 CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 570, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
571 AsanThread *t = GetCurrentThread();
572 m->free_tid = t ? t->tid() : 0;
573 m->free_context_id = StackDepotPut(*stack);
574
575 Flags &fl = *flags();
576 if (fl.max_free_fill_size > 0) {
577 // We have to skip the chunk header, it contains free_context_id.
578 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
579 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
580 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
581 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
582 REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill);
583 }
584 }
585
586 // Poison the region.
587 PoisonShadow(m->Beg(),
588 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
589 kAsanHeapFreeMagic);
590
591 AsanStats &thread_stats = GetCurrentThreadStats();
592 thread_stats.frees++;
593 thread_stats.freed += m->UsedSize();
594
595 // Push into quarantine.
596 if (t) {
597 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
598 AllocatorCache *ac = GetAllocatorCache(ms);
599 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
600 m->UsedSize());
601 } else {
602 SpinMutexLock l(&fallback_mutex);
603 AllocatorCache *ac = &fallback_allocator_cache;
604 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
605 m, m->UsedSize());
606 }
607 }
608
609 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
610 BufferedStackTrace *stack, AllocType alloc_type) {
611 uptr p = reinterpret_cast<uptr>(ptr);
612 if (p == 0) return;
613
614 uptr chunk_beg = p - kChunkHeaderSize;
615 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
616
617 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
618 // malloc. Don't report an invalid free in this case.
619 if (SANITIZER_WINDOWS0 &&
620 !get_allocator().PointerIsMine(ptr)) {
621 if (!IsSystemHeapAddress(p))
622 ReportFreeNotMalloced(p, stack);
623 return;
624 }
625
626 ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr
); RunFreeHooks(ptr); } while (false)
;
627
628 // Must mark the chunk as quarantined before any changes to its metadata.
629 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
630 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
631
632 if (m->alloc_type != alloc_type) {
633 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
634 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
635 (AllocType)alloc_type);
636 }
637 } else {
638 if (flags()->new_delete_type_mismatch &&
639 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
640 ((delete_size && delete_size != m->UsedSize()) ||
641 ComputeUserRequestedAlignmentLog(delete_alignment) !=
642 m->user_requested_alignment_log)) {
643 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
644 }
645 }
646
647 QuarantineChunk(m, ptr, stack);
648 }
649
650 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
651 CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr &&
new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 651, "(" "(old_ptr && new_size)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
652 uptr p = reinterpret_cast<uptr>(old_ptr);
653 uptr chunk_beg = p - kChunkHeaderSize;
654 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
655
656 AsanStats &thread_stats = GetCurrentThreadStats();
657 thread_stats.reallocs++;
658 thread_stats.realloced += new_size;
659
660 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
661 if (new_ptr) {
662 u8 chunk_state = m->chunk_state;
663 if (chunk_state != CHUNK_ALLOCATED)
664 ReportInvalidFree(old_ptr, chunk_state, stack);
665 CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception
::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr
)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 665, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)"
")", v1, v2); } while (false)
;
666 uptr memcpy_size = Min(new_size, m->UsedSize());
667 // If realloc() races with free(), we may start copying freed memory.
668 // However, we will report racy double-free later anyway.
669 REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size);
670 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
671 }
672 return new_ptr;
673 }
674
675 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
676 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
677 if (AllocatorMayReturnNull())
678 return nullptr;
679 ReportCallocOverflow(nmemb, size, stack);
680 }
681 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
682 // If the memory comes from the secondary allocator no need to clear it
683 // as it comes directly from mmap.
684 if (ptr && allocator.FromPrimary(ptr))
685 REAL(memset)__interception::real_memset(ptr, 0, nmemb * size);
686 return ptr;
687 }
688
689 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
690 if (chunk_state == CHUNK_QUARANTINE)
691 ReportDoubleFree((uptr)ptr, stack);
692 else
693 ReportFreeNotMalloced((uptr)ptr, stack);
694 }
695
696 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
697 AllocatorCache *ac = GetAllocatorCache(ms);
698 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
699 allocator.SwallowCache(ac);
700 }
701
702 // -------------------------- Chunk lookup ----------------------
703
704 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
705 AsanChunk *GetAsanChunk(void *alloc_beg) {
706 if (!alloc_beg) return nullptr;
707 if (!allocator.FromPrimary(alloc_beg)) {
708 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
709 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
710 return m;
711 }
712 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
713 if (alloc_magic[0] == kAllocBegMagic)
714 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
715 return reinterpret_cast<AsanChunk *>(alloc_beg);
716 }
717
718 AsanChunk *GetAsanChunkByAddr(uptr p) {
719 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
720 return GetAsanChunk(alloc_beg);
721 }
722
723 // Allocator must be locked when this function is called.
724 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
725 void *alloc_beg =
726 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
727 return GetAsanChunk(alloc_beg);
728 }
729
730 uptr AllocationSize(uptr p) {
731 AsanChunk *m = GetAsanChunkByAddr(p);
732 if (!m) return 0;
733 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
734 if (m->Beg() != p) return 0;
735 return m->UsedSize();
736 }
737
738 AsanChunkView FindHeapChunkByAddress(uptr addr) {
739 AsanChunk *m1 = GetAsanChunkByAddr(addr);
740 if (!m1) return AsanChunkView(m1);
741 sptr offset = 0;
742 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
743 // The address is in the chunk's left redzone, so maybe it is actually
744 // a right buffer overflow from the other chunk to the left.
745 // Search a bit to the left to see if there is another chunk.
746 AsanChunk *m2 = nullptr;
747 for (uptr l = 1; l < GetPageSizeCached(); l++) {
748 m2 = GetAsanChunkByAddr(addr - l);
749 if (m2 == m1) continue; // Still the same chunk.
750 break;
751 }
752 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
753 m1 = ChooseChunk(addr, m2, m1);
754 }
755 return AsanChunkView(m1);
756 }
757
758 void Purge(BufferedStackTrace *stack) {
759 AsanThread *t = GetCurrentThread();
760 if (t) {
761 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
762 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
763 QuarantineCallback(GetAllocatorCache(ms),
764 stack));
765 }
766 {
767 SpinMutexLock l(&fallback_mutex);
768 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
769 QuarantineCallback(&fallback_allocator_cache,
770 stack));
771 }
772
773 allocator.ForceReleaseToOS();
774 }
775
776 void PrintStats() {
777 allocator.PrintStats();
778 quarantine.PrintStats();
779 }
780
781 void ForceLock() {
782 allocator.ForceLock();
783 fallback_mutex.Lock();
784 }
785
786 void ForceUnlock() {
787 fallback_mutex.Unlock();
788 allocator.ForceUnlock();
789 }
790};
791
792static Allocator instance(LINKER_INITIALIZED);
793
794static AsanAllocator &get_allocator() {
795 return instance.allocator;
796}
797
798bool AsanChunkView::IsValid() const {
799 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
800}
801bool AsanChunkView::IsAllocated() const {
802 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
803}
804bool AsanChunkView::IsQuarantined() const {
805 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
806}
807uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
808uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
809uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
810u32 AsanChunkView::UserRequestedAlignment() const {
811 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
812}
813uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
814uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
815AllocType AsanChunkView::GetAllocType() const {
816 return (AllocType)chunk_->alloc_type;
817}
818
819static StackTrace GetStackTraceFromId(u32 id) {
820 CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 820, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
821 StackTrace res = StackDepotGet(id);
822 CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 822, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
823 return res;
824}
825
826u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
827u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
828
829StackTrace AsanChunkView::GetAllocStack() const {
830 return GetStackTraceFromId(GetAllocStackId());
831}
832
833StackTrace AsanChunkView::GetFreeStack() const {
834 return GetStackTraceFromId(GetFreeStackId());
835}
836
837void InitializeAllocator(const AllocatorOptions &options) {
838 instance.InitLinkerInitialized(options);
839}
840
841void ReInitializeAllocator(const AllocatorOptions &options) {
842 instance.ReInitialize(options);
843}
844
845void GetAllocatorOptions(AllocatorOptions *options) {
846 instance.GetOptions(options);
847}
848
849AsanChunkView FindHeapChunkByAddress(uptr addr) {
850 return instance.FindHeapChunkByAddress(addr);
851}
852AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
853 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
854}
855
856void AsanThreadLocalMallocStorage::CommitBack() {
857 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
858 instance.CommitBack(this, &stack);
859}
860
861void PrintInternalAllocatorStats() {
862 instance.PrintStats();
863}
864
865void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
866 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
867}
868
869void asan_delete(void *ptr, uptr size, uptr alignment,
870 BufferedStackTrace *stack, AllocType alloc_type) {
871 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
872}
873
874void *asan_malloc(uptr size, BufferedStackTrace *stack) {
875 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
876}
877
878void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
879 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
880}
881
882void *asan_reallocarray(void *p, uptr nmemb, uptr size,
883 BufferedStackTrace *stack) {
884 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
885 errno(*__errno_location()) = errno_ENOMEM12;
886 if (AllocatorMayReturnNull())
887 return nullptr;
888 ReportReallocArrayOverflow(nmemb, size, stack);
889 }
890 return asan_realloc(p, nmemb * size, stack);
891}
892
893void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
894 if (!p)
895 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
896 if (size == 0) {
897 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
898 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
899 return nullptr;
900 }
901 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
902 size = 1;
903 }
904 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
905}
906
907void *asan_valloc(uptr size, BufferedStackTrace *stack) {
908 return SetErrnoOnNull(
909 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
910}
911
912void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
913 uptr PageSize = GetPageSizeCached();
914 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
915 errno(*__errno_location()) = errno_ENOMEM12;
916 if (AllocatorMayReturnNull())
917 return nullptr;
918 ReportPvallocOverflow(size, stack);
919 }
920 // pvalloc(0) should allocate one page.
921 size = size ? RoundUpTo(size, PageSize) : PageSize;
922 return SetErrnoOnNull(
923 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
924}
925
926void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
927 AllocType alloc_type) {
928 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
929 errno(*__errno_location()) = errno_EINVAL22;
930 if (AllocatorMayReturnNull())
931 return nullptr;
932 ReportInvalidAllocationAlignment(alignment, stack);
933 }
934 return SetErrnoOnNull(
935 instance.Allocate(size, alignment, stack, alloc_type, true));
936}
937
938void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
939 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
940 errno(*__errno_location()) = errno_EINVAL22;
941 if (AllocatorMayReturnNull())
942 return nullptr;
943 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
944 }
945 return SetErrnoOnNull(
946 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
947}
948
949int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
950 BufferedStackTrace *stack) {
951 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
952 if (AllocatorMayReturnNull())
953 return errno_EINVAL22;
954 ReportInvalidPosixMemalignAlignment(alignment, stack);
955 }
956 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
957 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
958 // OOM error is already taken care of by Allocate.
959 return errno_ENOMEM12;
960 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 960, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
961 *memptr = ptr;
962 return 0;
963}
964
965uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
966 if (!ptr) return 0;
967 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
968 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
969 GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags
()->fast_unwind_on_fatal)
;
970 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
971 }
972 return usable_size;
973}
974
975uptr asan_mz_size(const void *ptr) {
976 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
977}
978
979void asan_mz_force_lock() {
980 instance.ForceLock();
981}
982
983void asan_mz_force_unlock() {
984 instance.ForceUnlock();
985}
986
987void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
988 instance.SetRssLimitExceeded(limit_exceeded);
989}
990
991} // namespace __asan
992
993// --- Implementation of LSan-specific functions --- {{{1
994namespace __lsan {
995void LockAllocator() {
996 __asan::get_allocator().ForceLock();
997}
998
999void UnlockAllocator() {
1000 __asan::get_allocator().ForceUnlock();
1001}
1002
1003void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1004 *begin = (uptr)&__asan::get_allocator();
1005 *end = *begin + sizeof(__asan::get_allocator());
1006}
1007
1008uptr PointsIntoChunk(void* p) {
1009 uptr addr = reinterpret_cast<uptr>(p);
1010 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1011 if (!m) return 0;
1012 uptr chunk = m->Beg();
1013 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1014 return 0;
1015 if (m->AddrIsInside(addr, /*locked_version=*/true))
1016 return chunk;
1017 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1018 addr))
1019 return chunk;
1020 return 0;
1021}
1022
1023uptr GetUserBegin(uptr chunk) {
1024 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1025 CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/asan_allocator.cc"
, 1025, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
1026 return m->Beg();
1027}
1028
1029LsanMetadata::LsanMetadata(uptr chunk) {
1030 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1031}
1032
1033bool LsanMetadata::allocated() const {
1034 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1035 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1036}
1037
1038ChunkTag LsanMetadata::tag() const {
1039 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1040 return static_cast<ChunkTag>(m->lsan_tag);
1041}
1042
1043void LsanMetadata::set_tag(ChunkTag value) {
1044 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1045 m->lsan_tag = value;
1046}
1047
1048uptr LsanMetadata::requested_size() const {
1049 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1050 return m->UsedSize(/*locked_version=*/true);
1051}
1052
1053u32 LsanMetadata::stack_trace_id() const {
1054 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1055 return m->alloc_context_id;
1056}
1057
1058void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1059 __asan::get_allocator().ForEachChunk(callback, arg);
1060}
1061
1062IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1063 uptr addr = reinterpret_cast<uptr>(p);
1064 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1065 if (!m) return kIgnoreObjectInvalid;
1066 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1067 if (m->lsan_tag == kIgnored)
1068 return kIgnoreObjectAlreadyIgnored;
1069 m->lsan_tag = __lsan::kIgnored;
1070 return kIgnoreObjectSuccess;
1071 } else {
1072 return kIgnoreObjectInvalid;
1073 }
1074}
1075} // namespace __lsan
1076
1077// ---------------------- Interface ---------------- {{{1
1078using namespace __asan; // NOLINT
1079
1080// ASan allocator doesn't reserve extra bytes, so normally we would
1081// just return "size". We don't want to expose our redzone sizes, etc here.
1082uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1083 return size;
1084}
1085
1086int __sanitizer_get_ownership(const void *p) {
1087 uptr ptr = reinterpret_cast<uptr>(p);
1088 return instance.AllocationSize(ptr) > 0;
1089}
1090
1091uptr __sanitizer_get_allocated_size(const void *p) {
1092 if (!p) return 0;
1093 uptr ptr = reinterpret_cast<uptr>(p);
1094 uptr allocated_size = instance.AllocationSize(ptr);
1095 // Die if p is not malloced or if it is already freed.
1096 if (allocated_size == 0) {
1097 GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack
.size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp
= (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer
[0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack
.trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address
(0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer
::uptr) __builtin_frame_address(0), nullptr, common_flags()->
fast_unwind_on_fatal, kStackTraceMax); }
;
1098 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1099 }
1100 return allocated_size;
1101}
1102
1103void __sanitizer_purge_allocator() {
1104 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1105 instance.Purge(&stack);
1106}
1107
1108#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1109// Provide default (no-op) implementation of malloc hooks.
1110SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
1111 void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
{
1112 (void)ptr;
1113 (void)size;
1114}
1115
1116SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *ptr)
{
1117 (void)ptr;
1118}
1119#endif

/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
24class CombinedAllocator {
25 public:
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
32 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
33 stats_.InitLinkerInitialized();
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.InitLinkerInitialized();
36 }
37
38 void Init(s32 release_to_os_interval_ms) {
39 stats_.Init();
40 primary_.Init(release_to_os_interval_ms);
41 secondary_.Init();
42 }
43
44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
24
Assuming 'size' is not equal to 0
25
Taking false branch
47 size = 1;
48 if (size + alignment < size) {
26
Assuming the condition is false
27
Taking false branch
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
59 if (alignment > 8)
28
Taking false branch
60 size = RoundUpTo(size, alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 void *res;
68 if (primary_.CanAllocate(size, alignment))
29
Taking false branch
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
71 res = secondary_.Allocate(&stats_, original_size, alignment);
30
Calling 'LargeMmapAllocator::Allocate'
56
Returning from 'LargeMmapAllocator::Allocate'
57
Value assigned to 'res'
72 if (alignment > 8)
58
Taking false branch
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
74 return res;
59
Returning pointer (loaded from 'res')
75 }
76
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
79 }
80
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 }
84
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(new_size, old_size);
108 void *new_p = Allocate(cache, new_size, alignment);
109 if (new_p)
110 internal_memcpy(new_p, p, memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(void *p) {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(void *p) {
122 return primary_.PointerIsMine(p);
123 }
124
125 void *GetMetaData(const void *p) {
126 if (primary_.PointerIsMine(p))
127 return primary_.GetMetaData(p);
128 return secondary_.GetMetaData(p);
129 }
130
131 void *GetBlockBegin(const void *p) {
132 if (primary_.PointerIsMine(p))
133 return primary_.GetBlockBegin(p);
134 return secondary_.GetBlockBegin(p);
135 }
136
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p) {
140 if (primary_.PointerIsMine(p))
141 return primary_.GetBlockBegin(p);
142 return secondary_.GetBlockBeginFastLocked(p);
143 }
144
145 uptr GetActuallyAllocatedSize(void *p) {
146 if (primary_.PointerIsMine(p))
147 return primary_.GetActuallyAllocatedSize(p);
148 return secondary_.GetActuallyAllocatedSize(p);
149 }
150
151 uptr TotalMemoryUsed() {
152 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
153 }
154
155 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
156
157 void InitCache(AllocatorCache *cache) {
158 cache->Init(&stats_);
159 }
160
161 void DestroyCache(AllocatorCache *cache) {
162 cache->Destroy(&primary_, &stats_);
163 }
164
165 void SwallowCache(AllocatorCache *cache) {
166 cache->Drain(&primary_);
167 }
168
169 void GetStats(AllocatorStatCounters s) const {
170 stats_.Get(s);
171 }
172
173 void PrintStats() {
174 primary_.PrintStats();
175 secondary_.PrintStats();
176 }
177
178 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
179 // introspection API.
180 void ForceLock() {
181 primary_.ForceLock();
182 secondary_.ForceLock();
183 }
184
185 void ForceUnlock() {
186 secondary_.ForceUnlock();
187 primary_.ForceUnlock();
188 }
189
190 // Iterate over all existing chunks.
191 // The allocator must be locked when calling this function.
192 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
193 primary_.ForEachChunk(callback, arg);
194 secondary_.ForEachChunk(callback, arg);
195 }
196
197 private:
198 PrimaryAllocator primary_;
199 SecondaryAllocator secondary_;
200 AllocatorGlobalStats stats_;
201};

/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h

1//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
17// allocated chunks. To be used in memory constrained or not memory hungry cases
18// (currently, 32 bits and internal allocator).
19class LargeMmapAllocatorPtrArrayStatic {
20 public:
21 INLINEinline void *Init() { return &p_[0]; }
22 INLINEinline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((n)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kMaxNumChunks)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 22, "(" "(n)" ") " "<" " (" "(kMaxNumChunks)" ")", v1, v2
); } while (false)
; }
23 private:
24 static const int kMaxNumChunks = 1 << 15;
25 uptr p_[kMaxNumChunks];
26};
27
28// Much less restricted LargeMmapAllocator chunks list (comparing to
29// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
30// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
31// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
32class LargeMmapAllocatorPtrArrayDynamic {
33 public:
34 INLINEinline void *Init() {
35 uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
36 SecondaryAllocatorName);
37 CHECK(p)do { __sanitizer::u64 v1 = (__sanitizer::u64)((p)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 37, "(" "(p)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
38 return reinterpret_cast<void*>(p);
39 }
40
41 INLINEinline void EnsureSpace(uptr n) {
42 CHECK_LT(n, kMaxNumChunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((n)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kMaxNumChunks)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 42, "(" "(n)" ") " "<" " (" "(kMaxNumChunks)" ")", v1, v2
); } while (false)
;
43 DCHECK(n <= n_reserved_);
44 if (UNLIKELY(n == n_reserved_)__builtin_expect(!!(n == n_reserved_), 0)) {
45 address_range_.MapOrDie(
46 reinterpret_cast<uptr>(address_range_.base()) +
47 n_reserved_ * sizeof(uptr),
48 kChunksBlockCount * sizeof(uptr));
49 n_reserved_ += kChunksBlockCount;
50 }
51 }
52
53 private:
54 static const int kMaxNumChunks = 1 << 20;
55 static const int kChunksBlockCount = 1 << 14;
56 ReservedAddressRange address_range_;
57 uptr n_reserved_;
58};
59
60#if SANITIZER_WORDSIZE32 == 32
61typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
62#else
63typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
64#endif
65
66// This class can (de)allocate only large chunks of memory using mmap/unmap.
67// The main purpose of this allocator is to cover large and rare allocation
68// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
69template <class MapUnmapCallback = NoOpMapUnmapCallback,
70 class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
71 class AddressSpaceViewTy = LocalAddressSpaceView>
72class LargeMmapAllocator {
73 public:
74 using AddressSpaceView = AddressSpaceViewTy;
75 void InitLinkerInitialized() {
76 page_size_ = GetPageSizeCached();
77 chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
78 }
79
80 void Init() {
81 internal_memset(this, 0, sizeof(*this));
82 InitLinkerInitialized();
83 }
84
85 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
86 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 86, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")",
v1, v2); } while (false)
;
31
Taking false branch
32
Loop condition is false. Exiting loop
87 uptr map_size = RoundUpMapSize(size);
88 if (alignment > page_size_)
33
Assuming the condition is false
34
Taking false branch
89 map_size += alignment;
90 // Overflow.
91 if (map_size < size) {
35
Assuming 'map_size' is >= 'size'
36
Taking false branch
92 Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
93 "0x%zx bytes with 0x%zx alignment requested\n",
94 SanitizerToolName, map_size, alignment);
95 return nullptr;
96 }
97 uptr map_beg = reinterpret_cast<uptr>(
98 MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
99 if (!map_beg)
37
Assuming 'map_beg' is not equal to 0
38
Taking false branch
100 return nullptr;
101 CHECK(IsAligned(map_beg, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(map_beg
, page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 101, "(" "(IsAligned(map_beg, page_size_))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
39
Taking false branch
40
Loop condition is false. Exiting loop
102 MapUnmapCallback().OnMap(map_beg, map_size);
103 uptr map_end = map_beg + map_size;
104 uptr res = map_beg + page_size_;
105 if (res & (alignment - 1)) // Align.
41
Assuming the condition is false
42
Taking false branch
106 res += alignment - (res & (alignment - 1));
107 CHECK(IsAligned(res, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 107, "(" "(IsAligned(res, alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
43
Taking false branch
44
Loop condition is false. Exiting loop
108 CHECK(IsAligned(res, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 108, "(" "(IsAligned(res, page_size_))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
45
Taking false branch
46
Loop condition is false. Exiting loop
109 CHECK_GE(res + size, map_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res + size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((map_beg)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 109, "(" "(res + size)" ") " ">=" " (" "(map_beg)" ")", v1
, v2); } while (false)
;
47
Assuming 'v1' is >= 'v2'
48
Taking false branch
49
Loop condition is false. Exiting loop
110 CHECK_LE(res + size, map_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res + size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((map_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 110, "(" "(res + size)" ") " "<=" " (" "(map_end)" ")", v1
, v2); } while (false)
;
50
Assuming 'v1' is <= 'v2'
51
Taking false branch
52
Loop condition is false. Exiting loop
111 Header *h = GetHeader(res);
112 h->size = size;
113 h->map_beg = map_beg;
114 h->map_size = map_size;
115 uptr size_log = MostSignificantSetBitIndex(map_size);
116 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log))do { __sanitizer::u64 v1 = (__sanitizer::u64)((size_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)(((sizeof(stats.by_size_log)/sizeof
((stats.by_size_log)[0])))); if (__builtin_expect(!!(!(v1 <
v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 116, "(" "(size_log)" ") " "<" " (" "((sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])))"
")", v1, v2); } while (false)
;
53
Assuming 'v1' is < 'v2'
54
Taking false branch
55
Loop condition is false. Exiting loop
117 {
118 SpinMutexLock l(&mutex_);
119 ptr_array_.EnsureSpace(n_chunks_);
120 uptr idx = n_chunks_++;
121 h->chunk_idx = idx;
122 chunks_[idx] = h;
123 chunks_sorted_ = false;
124 stats.n_allocs++;
125 stats.currently_allocated += map_size;
126 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
127 stats.by_size_log[size_log]++;
128 stat->Add(AllocatorStatAllocated, map_size);
129 stat->Add(AllocatorStatMapped, map_size);
130 }
131 return reinterpret_cast<void*>(res);
132 }
133
134 void Deallocate(AllocatorStats *stat, void *p) {
135 Header *h = GetHeader(p);
136 {
137 SpinMutexLock l(&mutex_);
138 uptr idx = h->chunk_idx;
139 CHECK_EQ(chunks_[idx], h)do { __sanitizer::u64 v1 = (__sanitizer::u64)((chunks_[idx]))
; __sanitizer::u64 v2 = (__sanitizer::u64)((h)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 139, "(" "(chunks_[idx])" ") " "==" " (" "(h)" ")", v1, v2)
; } while (false)
;
140 CHECK_LT(idx, n_chunks_)do { __sanitizer::u64 v1 = (__sanitizer::u64)((idx)); __sanitizer
::u64 v2 = (__sanitizer::u64)((n_chunks_)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 140, "(" "(idx)" ") " "<" " (" "(n_chunks_)" ")", v1, v2
); } while (false)
;
141 chunks_[idx] = chunks_[--n_chunks_];
142 chunks_[idx]->chunk_idx = idx;
143 chunks_sorted_ = false;
144 stats.n_frees++;
145 stats.currently_allocated -= h->map_size;
146 stat->Sub(AllocatorStatAllocated, h->map_size);
147 stat->Sub(AllocatorStatMapped, h->map_size);
148 }
149 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
150 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
151 }
152
153 uptr TotalMemoryUsed() {
154 SpinMutexLock l(&mutex_);
155 uptr res = 0;
156 for (uptr i = 0; i < n_chunks_; i++) {
157 Header *h = chunks_[i];
158 CHECK_EQ(h->chunk_idx, i)do { __sanitizer::u64 v1 = (__sanitizer::u64)((h->chunk_idx
)); __sanitizer::u64 v2 = (__sanitizer::u64)((i)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 158, "(" "(h->chunk_idx)" ") " "==" " (" "(i)" ")", v1, v2
); } while (false)
;
159 res += RoundUpMapSize(h->size);
160 }
161 return res;
162 }
163
164 bool PointerIsMine(const void *p) {
165 return GetBlockBegin(p) != nullptr;
166 }
167
168 uptr GetActuallyAllocatedSize(void *p) {
169 return RoundUpTo(GetHeader(p)->size, page_size_);
170 }
171
172 // At least page_size_/2 metadata bytes is available.
173 void *GetMetaData(const void *p) {
174 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
175 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
176 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
177 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(reinterpret_cast
<uptr>(p), page_size_))); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 177, "(" "(IsAligned(reinterpret_cast<uptr>(p), page_size_))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
178 }
179 return GetHeader(p) + 1;
180 }
181
182 void *GetBlockBegin(const void *ptr) {
183 uptr p = reinterpret_cast<uptr>(ptr);
184 SpinMutexLock l(&mutex_);
185 uptr nearest_chunk = 0;
186 Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
187 // Cache-friendly linear search.
188 for (uptr i = 0; i < n_chunks_; i++) {
189 uptr ch = reinterpret_cast<uptr>(chunks[i]);
190 if (p < ch) continue; // p is at left to this chunk, skip it.
191 if (p - ch < p - nearest_chunk)
192 nearest_chunk = ch;
193 }
194 if (!nearest_chunk)
195 return nullptr;
196 const Header *h =
197 AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
198 Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
199 CHECK_GE(nearest_chunk, h->map_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((h->map_beg));
if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 199, "(" "(nearest_chunk)" ") " ">=" " (" "(h->map_beg)"
")", v1, v2); } while (false)
;
200 CHECK_LT(nearest_chunk, h->map_beg + h->map_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((h->map_beg + h
->map_size)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 200, "(" "(nearest_chunk)" ") " "<" " (" "(h->map_beg + h->map_size)"
")", v1, v2); } while (false)
;
201 CHECK_LE(nearest_chunk, p)do { __sanitizer::u64 v1 = (__sanitizer::u64)((nearest_chunk)
); __sanitizer::u64 v2 = (__sanitizer::u64)((p)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 201, "(" "(nearest_chunk)" ") " "<=" " (" "(p)" ")", v1,
v2); } while (false)
;
202 if (h->map_beg + h->map_size <= p)
203 return nullptr;
204 return GetUser(h_ptr);
205 }
206
207 void EnsureSortedChunks() {
208 if (chunks_sorted_) return;
209 Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
210 Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
211 for (uptr i = 0; i < n_chunks_; i++)
212 AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
213 chunks_sorted_ = true;
214 }
215
216 // This function does the same as GetBlockBegin, but is much faster.
217 // Must be called with the allocator locked.
218 void *GetBlockBeginFastLocked(void *ptr) {
219 mutex_.CheckLocked();
220 uptr p = reinterpret_cast<uptr>(ptr);
221 uptr n = n_chunks_;
222 if (!n) return nullptr;
223 EnsureSortedChunks();
224 Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
225 auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
226 auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
227 AddressSpaceView::Load(chunks[n - 1])->map_size;
228 if (p < min_mmap_ || p >= max_mmap_)
229 return nullptr;
230 uptr beg = 0, end = n - 1;
231 // This loop is a log(n) lower_bound. It does not check for the exact match
232 // to avoid expensive cache-thrashing loads.
233 while (end - beg >= 2) {
234 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
235 if (p < reinterpret_cast<uptr>(chunks[mid]))
236 end = mid - 1; // We are not interested in chunks[mid].
237 else
238 beg = mid; // chunks[mid] may still be what we want.
239 }
240
241 if (beg < end) {
242 CHECK_EQ(beg + 1, end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg + 1)); __sanitizer
::u64 v2 = (__sanitizer::u64)((end)); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 242, "(" "(beg + 1)" ") " "==" " (" "(end)" ")", v1, v2); }
while (false)
;
243 // There are 2 chunks left, choose one.
244 if (p >= reinterpret_cast<uptr>(chunks[end]))
245 beg = end;
246 }
247
248 const Header *h = AddressSpaceView::Load(chunks[beg]);
249 Header *h_ptr = chunks[beg];
250 if (h->map_beg + h->map_size <= p || p < h->map_beg)
251 return nullptr;
252 return GetUser(h_ptr);
253 }
254
255 void PrintStats() {
256 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
257 "remains %zd (%zd K) max %zd M; by size logs: ",
258 stats.n_allocs, stats.n_allocs - stats.n_frees,
259 stats.currently_allocated >> 10, stats.max_allocated >> 20);
260 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log)(sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])); i++) {
261 uptr c = stats.by_size_log[i];
262 if (!c) continue;
263 Printf("%zd:%zd; ", i, c);
264 }
265 Printf("\n");
266 }
267
268 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
269 // introspection API.
270 void ForceLock() {
271 mutex_.Lock();
272 }
273
274 void ForceUnlock() {
275 mutex_.Unlock();
276 }
277
278 // Iterate over all existing chunks.
279 // The allocator must be locked when calling this function.
280 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
281 EnsureSortedChunks(); // Avoid doing the sort while iterating.
282 const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
283 for (uptr i = 0; i < n_chunks_; i++) {
284 const Header *t = chunks[i];
285 callback(reinterpret_cast<uptr>(GetUser(t)), arg);
286 // Consistency check: verify that the array did not change.
287 CHECK_EQ(chunks[i], t)do { __sanitizer::u64 v1 = (__sanitizer::u64)((chunks[i])); __sanitizer
::u64 v2 = (__sanitizer::u64)((t)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 287, "(" "(chunks[i])" ") " "==" " (" "(t)" ")", v1, v2); }
while (false)
;
288 CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i)do { __sanitizer::u64 v1 = (__sanitizer::u64)((AddressSpaceView
::Load(chunks[i])->chunk_idx)); __sanitizer::u64 v2 = (__sanitizer
::u64)((i)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 288, "(" "(AddressSpaceView::Load(chunks[i])->chunk_idx)"
") " "==" " (" "(i)" ")", v1, v2); } while (false)
;
289 }
290 }
291
292 private:
293 struct Header {
294 uptr map_beg;
295 uptr map_size;
296 uptr size;
297 uptr chunk_idx;
298 };
299
300 Header *GetHeader(uptr p) {
301 CHECK(IsAligned(p, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(p, page_size_
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 301, "(" "(IsAligned(p, page_size_))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
302 return reinterpret_cast<Header*>(p - page_size_);
303 }
304 Header *GetHeader(const void *p) {
305 return GetHeader(reinterpret_cast<uptr>(p));
306 }
307
308 void *GetUser(const Header *h) {
309 CHECK(IsAligned((uptr)h, page_size_))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)h, page_size_))); __sanitizer::u64 v2 = (__sanitizer::u64)(0
); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-9~svn362543/projects/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_secondary.h"
, 309, "(" "(IsAligned((uptr)h, page_size_))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
310 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
311 }
312
313 uptr RoundUpMapSize(uptr size) {
314 return RoundUpTo(size, page_size_) + page_size_;
315 }
316
317 uptr page_size_;
318 Header **chunks_;
319 PtrArrayT ptr_array_;
320 uptr n_chunks_;
321 bool chunks_sorted_;
322 struct Stats {
323 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
324 } stats;
325 StaticSpinMutex mutex_;
326};