Bug Summary

File:compiler-rt/lib/asan/asan_allocator.cpp
Warning:line 510, column 46
Array access (from variable 'alloc_beg') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name asan_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu i686 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D ASAN_DYNAMIC=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/asan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -ftls-model=initial-exec -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp

1//===-- asan_allocator.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Implementation of ASan's memory allocator, 2-nd version.
12// This variant uses the allocator from sanitizer_common, i.e. the one shared
13// with ThreadSanitizer and MemorySanitizer.
14//
15//===----------------------------------------------------------------------===//
16
17#include "asan_allocator.h"
18#include "asan_mapping.h"
19#include "asan_poisoning.h"
20#include "asan_report.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "sanitizer_common/sanitizer_allocator_checks.h"
24#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_errno.h"
26#include "sanitizer_common/sanitizer_flags.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29#include "sanitizer_common/sanitizer_stackdepot.h"
30#include "sanitizer_common/sanitizer_quarantine.h"
31#include "lsan/lsan_common.h"
32
33namespace __asan {
34
35// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
36// We use adaptive redzones: for larger allocation larger redzones are used.
37static u32 RZLog2Size(u32 rz_log) {
38 CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(!
(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 38, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while
(false)
;
39 return 16 << rz_log;
40}
41
42static u32 RZSize2Log(u32 rz_size) {
43 CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!(
!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 43, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); }
while (false)
;
44 CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(!
!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 44, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2)
; } while (false)
;
45 CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 45, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1
, v2); } while (false)
;
46 u32 res = Log2(rz_size) - 4;
47 CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 47, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")",
v1, v2); } while (false)
;
48 return res;
49}
50
51static AsanAllocator &get_allocator();
52
53// The memory chunk allocated from the underlying allocator looks like this:
54// L L L L L L H H U U U U U U R R
55// L -- left redzone words (0 or more bytes)
56// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
57// U -- user memory.
58// R -- right redzone (0 or more bytes)
59// ChunkBase consists of ChunkHeader and other bytes that overlap with user
60// memory.
61
62// If the left redzone is greater than the ChunkHeader size we store a magic
63// value in the first uptr word of the memory block and store the address of
64// ChunkBase in the next uptr.
65// M B L L L L L L L L L H H U U U U U U
66// | ^
67// ---------------------|
68// M -- magic value kAllocBegMagic
69// B -- address of ChunkHeader pointing to the first 'H'
70static const uptr kAllocBegMagic = 0xCC6E96B9;
71
72struct ChunkHeader {
73 // 1-st 8 bytes.
74 u32 chunk_state : 8; // Must be first.
75 u32 alloc_tid : 24;
76
77 u32 free_tid : 24;
78 u32 from_memalign : 1;
79 u32 alloc_type : 2;
80 u32 rz_log : 3;
81 u32 lsan_tag : 2;
82 // 2-nd 8 bytes
83 // This field is used for small sizes. For large sizes it is equal to
84 // SizeClassMap::kMaxSize and the actual size is stored in the
85 // SecondaryAllocator's metadata.
86 u32 user_requested_size : 29;
87 // align < 8 -> 0
88 // else -> log2(min(align, 512)) - 2
89 u32 user_requested_alignment_log : 3;
90 u32 alloc_context_id;
91};
92
93struct ChunkBase : ChunkHeader {
94 // Header2, intersects with user memory.
95 u32 free_context_id;
96};
97
98static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
99static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
100COMPILER_CHECK(kChunkHeaderSize == 16)typedef char assertion_failed__100[2*(int)(kChunkHeaderSize ==
16)-1]
;
101COMPILER_CHECK(kChunkHeader2Size <= 16)typedef char assertion_failed__101[2*(int)(kChunkHeader2Size <=
16)-1]
;
102
103// Every chunk of memory allocated by this allocator can be in one of 3 states:
104// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
107enum {
108 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
109 CHUNK_ALLOCATED = 2,
110 CHUNK_QUARANTINE = 3
111};
112
113struct AsanChunk: ChunkBase {
114 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
115 uptr UsedSize(bool locked_version = false) {
116 if (user_requested_size != SizeClassMap::kMaxSize)
117 return user_requested_size;
118 return *reinterpret_cast<uptr *>(
119 get_allocator().GetMetaData(AllocBeg(locked_version)));
120 }
121 void *AllocBeg(bool locked_version = false) {
122 if (from_memalign) {
123 if (locked_version)
124 return get_allocator().GetBlockBeginFastLocked(
125 reinterpret_cast<void *>(this));
126 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
127 }
128 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
129 }
130 bool AddrIsInside(uptr addr, bool locked_version = false) {
131 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
132 }
133};
134
135struct QuarantineCallback {
136 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
137 : cache_(cache),
138 stack_(stack) {
139 }
140
141 void Recycle(AsanChunk *m) {
142 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 142, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
143 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
144 CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 144, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
145 CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 145, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
146 PoisonShadow(m->Beg(),
147 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
148 kAsanHeapLeftRedzoneMagic);
149 void *p = reinterpret_cast<void *>(m->AllocBeg());
150 if (p != m) {
151 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
152 CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)
); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 152, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)"
")", v1, v2); } while (false)
;
153 // Clear the magic value, as allocator internals may overwrite the
154 // contents of deallocated chunk, confusing GetAsanChunk lookup.
155 alloc_magic[0] = 0;
156 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast
<uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 156, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))"
")", v1, v2); } while (false)
;
157 }
158
159 // Statistics.
160 AsanStats &thread_stats = GetCurrentThreadStats();
161 thread_stats.real_frees++;
162 thread_stats.really_freed += m->UsedSize();
163
164 get_allocator().Deallocate(cache_, p);
165 }
166
167 void *Allocate(uptr size) {
168 void *res = get_allocator().Allocate(cache_, size, 1);
169 // TODO(alekseys): Consider making quarantine OOM-friendly.
170 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
171 ReportOutOfMemory(size, stack_);
172 return res;
173 }
174
175 void Deallocate(void *p) {
176 get_allocator().Deallocate(cache_, p);
177 }
178
179 private:
180 AllocatorCache* const cache_;
181 BufferedStackTrace* const stack_;
182};
183
184typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
185typedef AsanQuarantine::Cache QuarantineCache;
186
187void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
188 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
189 // Statistics.
190 AsanStats &thread_stats = GetCurrentThreadStats();
191 thread_stats.mmaps++;
192 thread_stats.mmaped += size;
193}
194void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
195 PoisonShadow(p, size, 0);
196 // We are about to unmap a chunk of user memory.
197 // Mark the corresponding shadow memory as not needed.
198 FlushUnneededASanShadowMemory(p, size);
199 // Statistics.
200 AsanStats &thread_stats = GetCurrentThreadStats();
201 thread_stats.munmaps++;
202 thread_stats.munmaped += size;
203}
204
205// We can not use THREADLOCAL because it is not supported on some of the
206// platforms we care about (OSX 10.6, Android).
207// static THREADLOCAL AllocatorCache cache;
208AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
209 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 209, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
210 return &ms->allocator_cache;
211}
212
213QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
214 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 214, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
215 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache
))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms->
quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 215, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))"
")", v1, v2); } while (false)
;
216 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
217}
218
219void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
220 quarantine_size_mb = f->quarantine_size_mb;
221 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
222 min_redzone = f->redzone;
223 max_redzone = f->max_redzone;
224 may_return_null = cf->allocator_may_return_null;
225 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
226 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
227}
228
229void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
230 f->quarantine_size_mb = quarantine_size_mb;
231 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
232 f->redzone = min_redzone;
233 f->max_redzone = max_redzone;
234 cf->allocator_may_return_null = may_return_null;
235 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
236 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
237}
238
239struct Allocator {
240 static const uptr kMaxAllowedMallocSize =
241 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(3UL << 30);
242
243 AsanAllocator allocator;
244 AsanQuarantine quarantine;
245 StaticSpinMutex fallback_mutex;
246 AllocatorCache fallback_allocator_cache;
247 QuarantineCache fallback_quarantine_cache;
248
249 uptr max_user_defined_malloc_size;
250 atomic_uint8_t rss_limit_exceeded;
251
252 // ------------------- Options --------------------------
253 atomic_uint16_t min_redzone;
254 atomic_uint16_t max_redzone;
255 atomic_uint8_t alloc_dealloc_mismatch;
256
257 // ------------------- Initialization ------------------------
258 explicit Allocator(LinkerInitialized)
259 : quarantine(LINKER_INITIALIZED),
260 fallback_quarantine_cache(LINKER_INITIALIZED) {}
261
262 void CheckOptions(const AllocatorOptions &options) const {
263 CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 263, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")"
, v1, v2); } while (false)
;
264 CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone
)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 264, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)"
")", v1, v2); } while (false)
;
265 CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 265, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)"
")", v1, v2); } while (false)
;
266 CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 266, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
267 CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 267, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
268 }
269
270 void SharedInitCode(const AllocatorOptions &options) {
271 CheckOptions(options);
272 quarantine.Init((uptr)options.quarantine_size_mb << 20,
273 (uptr)options.thread_local_quarantine_size_kb << 10);
274 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
275 memory_order_release);
276 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
277 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
278 }
279
280 void InitLinkerInitialized(const AllocatorOptions &options) {
281 SetAllocatorMayReturnNull(options.may_return_null);
282 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
283 SharedInitCode(options);
284 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
285 ? common_flags()->max_allocation_size_mb
286 << 20
287 : kMaxAllowedMallocSize;
288 }
289
290 bool RssLimitExceeded() {
291 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
4
Calling 'atomic_load<__sanitizer::atomic_uint8_t>'
7
Returning from 'atomic_load<__sanitizer::atomic_uint8_t>'
8
Returning value, which participates in a condition later
292 }
293
294 void SetRssLimitExceeded(bool limit_exceeded) {
295 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
296 }
297
298 void RePoisonChunk(uptr chunk) {
299 // This could be a user-facing chunk (with redzones), or some internal
300 // housekeeping chunk, like TransferBatch. Start by assuming the former.
301 AsanChunk *ac = GetAsanChunk((void *)chunk);
302 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
303 uptr beg = ac->Beg();
304 uptr end = ac->Beg() + ac->UsedSize(true);
305 uptr chunk_end = chunk + allocated_size;
306 if (chunk < beg && beg < end && end <= chunk_end &&
307 ac->chunk_state == CHUNK_ALLOCATED) {
308 // Looks like a valid AsanChunk in use, poison redzones only.
309 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
310 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
311 FastPoisonShadowPartialRightRedzone(
312 end_aligned_down, end - end_aligned_down,
313 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
314 } else {
315 // This is either not an AsanChunk or freed or quarantined AsanChunk.
316 // In either case, poison everything.
317 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
318 }
319 }
320
321 void ReInitialize(const AllocatorOptions &options) {
322 SetAllocatorMayReturnNull(options.may_return_null);
323 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
324 SharedInitCode(options);
325
326 // Poison all existing allocation's redzones.
327 if (CanPoisonMemory()) {
328 allocator.ForceLock();
329 allocator.ForEachChunk(
330 [](uptr chunk, void *alloc) {
331 ((Allocator *)alloc)->RePoisonChunk(chunk);
332 },
333 this);
334 allocator.ForceUnlock();
335 }
336 }
337
338 void GetOptions(AllocatorOptions *options) const {
339 options->quarantine_size_mb = quarantine.GetSize() >> 20;
340 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
341 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
342 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
343 options->may_return_null = AllocatorMayReturnNull();
344 options->alloc_dealloc_mismatch =
345 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
346 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
347 }
348
349 // -------------------- Helper methods. -------------------------
350 uptr ComputeRZLog(uptr user_requested_size) {
351 u32 rz_log =
352 user_requested_size <= 64 - 16 ? 0 :
353 user_requested_size <= 128 - 32 ? 1 :
354 user_requested_size <= 512 - 64 ? 2 :
355 user_requested_size <= 4096 - 128 ? 3 :
356 user_requested_size <= (1 << 14) - 256 ? 4 :
357 user_requested_size <= (1 << 15) - 512 ? 5 :
358 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
359 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
360 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
361 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
362 }
363
364 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
365 if (user_requested_alignment < 8)
366 return 0;
367 if (user_requested_alignment > 512)
368 user_requested_alignment = 512;
369 return Log2(user_requested_alignment) - 2;
370 }
371
372 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
373 if (user_requested_alignment_log == 0)
374 return 0;
375 return 1LL << (user_requested_alignment_log + 2);
376 }
377
378 // We have an address between two chunks, and we want to report just one.
379 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
380 AsanChunk *right_chunk) {
381 // Prefer an allocated chunk over freed chunk and freed chunk
382 // over available chunk.
383 if (left_chunk->chunk_state != right_chunk->chunk_state) {
384 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
385 return left_chunk;
386 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
387 return right_chunk;
388 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
389 return left_chunk;
390 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
391 return right_chunk;
392 }
393 // Same chunk_state: choose based on offset.
394 sptr l_offset = 0, r_offset = 0;
395 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 395, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
396 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 396, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
397 if (l_offset < r_offset)
398 return left_chunk;
399 return right_chunk;
400 }
401
402 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
403 AsanChunk *m = GetAsanChunkByAddr(addr);
404 if (!m) return false;
405 if (m->chunk_state != CHUNK_ALLOCATED) return false;
406 if (m->Beg() != addr) return false;
407 atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
408 memory_order_relaxed);
409 return true;
410 }
411
412 // -------------------- Allocation/Deallocation routines ---------------
413 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
414 AllocType alloc_type, bool can_fill) {
415 if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0))
1
Assuming 'asan_inited' is not equal to 0
2
Taking false branch
416 AsanInitFromRtl();
417 if (RssLimitExceeded()) {
3
Calling 'Allocator::RssLimitExceeded'
9
Returning from 'Allocator::RssLimitExceeded'
10
Assuming the condition is false
11
Taking false branch
418 if (AllocatorMayReturnNull())
419 return nullptr;
420 ReportRssLimitExceeded(stack);
421 }
422 Flags &fl = *flags();
423 CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 423, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
12
Assuming 'v1' is not equal to 'v2'
13
Taking false branch
14
Loop condition is false. Exiting loop
424 const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale);
425 const uptr user_requested_alignment_log =
426 ComputeUserRequestedAlignmentLog(alignment);
427 if (alignment
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
< min_alignment)
15
Taking false branch
428 alignment = min_alignment;
429 if (size == 0) {
16
Assuming 'size' is not equal to 0
17
Taking false branch
430 // We'd be happy to avoid allocating memory for zero-size requests, but
431 // some programs/tests depend on this behavior and assume that malloc
432 // would not return NULL even for zero-size allocations. Moreover, it
433 // looks like operator new should never return NULL, and results of
434 // consecutive "new" calls must be different even if the allocated size
435 // is zero.
436 size = 1;
437 }
438 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 438, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
18
Taking false branch
19
Loop condition is false. Exiting loop
439 uptr rz_log = ComputeRZLog(size);
440 uptr rz_size = RZLog2Size(rz_log);
441 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
442 uptr needed_size = rounded_size + rz_size;
443 if (alignment > min_alignment)
20
Assuming 'alignment' is <= 'min_alignment'
21
Taking false branch
444 needed_size += alignment;
445 bool using_primary_allocator = true;
446 // If we are allocating from the secondary allocator, there will be no
447 // automatic right redzone, so add the right redzone manually.
448 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
22
Taking true branch
449 needed_size += rz_size;
450 using_primary_allocator = false;
451 }
452 CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size
, min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 452, "(" "(IsAligned(needed_size, min_alignment))" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
23
Taking false branch
24
Loop condition is false. Exiting loop
453 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
25
Assuming 'size' is <= 'kMaxAllowedMallocSize'
26
Assuming 'needed_size' is <= 'kMaxAllowedMallocSize'
28
Taking false branch
454 size > max_user_defined_malloc_size) {
27
Assuming 'size' is <= field 'max_user_defined_malloc_size'
455 if (AllocatorMayReturnNull()) {
456 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
457 (void*)size);
458 return nullptr;
459 }
460 uptr malloc_limit =
461 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
462 ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
463 }
464
465 AsanThread *t = GetCurrentThread();
466 void *allocated;
467 if (t) {
29
Assuming 't' is non-null
30
Taking true branch
468 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
469 allocated = allocator.Allocate(cache, needed_size, 8);
31
Calling 'CombinedAllocator::Allocate'
45
Returning from 'CombinedAllocator::Allocate'
46
Value assigned to 'allocated'
470 } else {
471 SpinMutexLock l(&fallback_mutex);
472 AllocatorCache *cache = &fallback_allocator_cache;
473 allocated = allocator.Allocate(cache, needed_size, 8);
474 }
475 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
47
Assuming 'allocated' is null
48
Taking true branch
476 SetAllocatorOutOfMemory();
477 if (AllocatorMayReturnNull())
49
Assuming the condition is false
50
Taking false branch
478 return nullptr;
479 ReportOutOfMemory(size, stack);
480 }
481
482 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShadowOffset32
))
== 0
&& CanPoisonMemory()) {
51
Assuming the condition is false
483 // Heap poisoning is enabled, but the allocator provides an unpoisoned
484 // chunk. This is possible if CanPoisonMemory() was false for some
485 // time, for example, due to flags()->start_disabled.
486 // Anyway, poison the block before using it for anything else.
487 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
488 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
489 }
490
491 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
52
'alloc_beg' initialized to 0
492 uptr alloc_end = alloc_beg + needed_size;
493 uptr beg_plus_redzone = alloc_beg + rz_size;
494 uptr user_beg = beg_plus_redzone;
495 if (!IsAligned(user_beg, alignment))
53
Taking false branch
496 user_beg = RoundUpTo(user_beg, alignment);
497 uptr user_end = user_beg + size;
498 CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer
::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 498, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1
, v2); } while (false)
;
54
Assuming 'v1' is <= 'v2'
55
Taking false branch
56
Loop condition is false. Exiting loop
499 uptr chunk_beg = user_beg - kChunkHeaderSize;
500 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
501 m->alloc_type = alloc_type;
502 m->rz_log = rz_log;
503 u32 alloc_tid = t
56.1
't' is non-null
56.1
't' is non-null
56.1
't' is non-null
56.1
't' is non-null
? t->tid() : 0;
57
'?' condition is true
504 m->alloc_tid = alloc_tid;
505 CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 505, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")"
, v1, v2); } while (false)
; // Does alloc_tid fit into the bitfield?
58
Taking false branch
59
Loop condition is false. Exiting loop
506 m->free_tid = kInvalidTid;
507 m->from_memalign = user_beg != beg_plus_redzone;
508 if (alloc_beg != chunk_beg) {
60
Assuming 'alloc_beg' is not equal to 'chunk_beg'
61
Taking true branch
509 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 *
sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 509, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)"
")", v1, v2); } while (false)
;
62
Assuming 'v1' is <= 'v2'
63
Taking false branch
64
Loop condition is false. Exiting loop
510 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
65
Array access (from variable 'alloc_beg') results in a null pointer dereference
511 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
512 }
513 if (using_primary_allocator) {
514 CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 514, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
515 m->user_requested_size = size;
516 CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 516, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
517 } else {
518 CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 518, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
519 m->user_requested_size = SizeClassMap::kMaxSize;
520 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
521 meta[0] = size;
522 meta[1] = chunk_beg;
523 }
524 m->user_requested_alignment_log = user_requested_alignment_log;
525
526 m->alloc_context_id = StackDepotPut(*stack);
527
528 uptr size_rounded_down_to_granularity =
529 RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
530 // Unpoison the bulk of the memory region.
531 if (size_rounded_down_to_granularity)
532 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
533 // Deal with the end of the region if size is not aligned to granularity.
534 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
535 u8 *shadow =
536 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
537 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0;
538 }
539
540 AsanStats &thread_stats = GetCurrentThreadStats();
541 thread_stats.mallocs++;
542 thread_stats.malloced += size;
543 thread_stats.malloced_redzones += needed_size - size;
544 if (needed_size > SizeClassMap::kMaxSize)
545 thread_stats.malloc_large++;
546 else
547 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
548
549 void *res = reinterpret_cast<void *>(user_beg);
550 if (can_fill && fl.max_malloc_fill_size) {
551 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
552 REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size);
553 }
554#if CAN_SANITIZE_LEAKS1
555 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
556 : __lsan::kDirectlyLeaked;
557#endif
558 // Must be the last mutation of metadata in this function.
559 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
560 ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook
(res, size); RunMallocHooks(res, size); } while (false)
;
561 return res;
562 }
563
564 // Set quarantine flag if chunk is allocated, issue ASan error report on
565 // available and quarantined chunks. Return true on success, false otherwise.
566 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
567 BufferedStackTrace *stack) {
568 u8 old_chunk_state = CHUNK_ALLOCATED;
569 // Flip the chunk_state atomically to avoid race on double-free.
570 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
571 CHUNK_QUARANTINE,
572 memory_order_acquire)) {
573 ReportInvalidFree(ptr, old_chunk_state, stack);
574 // It's not safe to push a chunk in quarantine on invalid free.
575 return false;
576 }
577 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED
)); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 577, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)"
")", v1, v2); } while (false)
;
578 return true;
579 }
580
581 // Expects the chunk to already be marked as quarantined by using
582 // AtomicallySetQuarantineFlagIfAllocated.
583 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
584 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 584, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
585 CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 585, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1
, v2); } while (false)
;
586 if (SANITIZER_WORDSIZE32 == 64) // On 32-bits this resides in user area.
587 CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 587, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
588 AsanThread *t = GetCurrentThread();
589 m->free_tid = t ? t->tid() : 0;
590 m->free_context_id = StackDepotPut(*stack);
591
592 Flags &fl = *flags();
593 if (fl.max_free_fill_size > 0) {
594 // We have to skip the chunk header, it contains free_context_id.
595 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
596 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
597 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
598 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
599 REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill);
600 }
601 }
602
603 // Poison the region.
604 PoisonShadow(m->Beg(),
605 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
606 kAsanHeapFreeMagic);
607
608 AsanStats &thread_stats = GetCurrentThreadStats();
609 thread_stats.frees++;
610 thread_stats.freed += m->UsedSize();
611
612 // Push into quarantine.
613 if (t) {
614 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
615 AllocatorCache *ac = GetAllocatorCache(ms);
616 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
617 m->UsedSize());
618 } else {
619 SpinMutexLock l(&fallback_mutex);
620 AllocatorCache *ac = &fallback_allocator_cache;
621 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
622 m, m->UsedSize());
623 }
624 }
625
626 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
627 BufferedStackTrace *stack, AllocType alloc_type) {
628 uptr p = reinterpret_cast<uptr>(ptr);
629 if (p == 0) return;
630
631 uptr chunk_beg = p - kChunkHeaderSize;
632 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
633
634 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
635 // malloc. Don't report an invalid free in this case.
636 if (SANITIZER_WINDOWS0 &&
637 !get_allocator().PointerIsMine(ptr)) {
638 if (!IsSystemHeapAddress(p))
639 ReportFreeNotMalloced(p, stack);
640 return;
641 }
642
643 ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr
); RunFreeHooks(ptr); } while (false)
;
644
645 // Must mark the chunk as quarantined before any changes to its metadata.
646 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
647 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
648
649 if (m->alloc_type != alloc_type) {
650 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
651 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
652 (AllocType)alloc_type);
653 }
654 } else {
655 if (flags()->new_delete_type_mismatch &&
656 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
657 ((delete_size && delete_size != m->UsedSize()) ||
658 ComputeUserRequestedAlignmentLog(delete_alignment) !=
659 m->user_requested_alignment_log)) {
660 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
661 }
662 }
663
664 QuarantineChunk(m, ptr, stack);
665 }
666
667 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
668 CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr &&
new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 668, "(" "(old_ptr && new_size)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
669 uptr p = reinterpret_cast<uptr>(old_ptr);
670 uptr chunk_beg = p - kChunkHeaderSize;
671 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
672
673 AsanStats &thread_stats = GetCurrentThreadStats();
674 thread_stats.reallocs++;
675 thread_stats.realloced += new_size;
676
677 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
678 if (new_ptr) {
679 u8 chunk_state = m->chunk_state;
680 if (chunk_state != CHUNK_ALLOCATED)
681 ReportInvalidFree(old_ptr, chunk_state, stack);
682 CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception
::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr
)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 682, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)"
")", v1, v2); } while (false)
;
683 uptr memcpy_size = Min(new_size, m->UsedSize());
684 // If realloc() races with free(), we may start copying freed memory.
685 // However, we will report racy double-free later anyway.
686 REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size);
687 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
688 }
689 return new_ptr;
690 }
691
692 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
693 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
694 if (AllocatorMayReturnNull())
695 return nullptr;
696 ReportCallocOverflow(nmemb, size, stack);
697 }
698 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
699 // If the memory comes from the secondary allocator no need to clear it
700 // as it comes directly from mmap.
701 if (ptr && allocator.FromPrimary(ptr))
702 REAL(memset)__interception::real_memset(ptr, 0, nmemb * size);
703 return ptr;
704 }
705
706 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
707 if (chunk_state == CHUNK_QUARANTINE)
708 ReportDoubleFree((uptr)ptr, stack);
709 else
710 ReportFreeNotMalloced((uptr)ptr, stack);
711 }
712
713 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
714 AllocatorCache *ac = GetAllocatorCache(ms);
715 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
716 allocator.SwallowCache(ac);
717 }
718
719 // -------------------------- Chunk lookup ----------------------
720
721 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
722 AsanChunk *GetAsanChunk(void *alloc_beg) {
723 if (!alloc_beg) return nullptr;
724 if (!allocator.FromPrimary(alloc_beg)) {
725 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
726 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
727 return m;
728 }
729 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
730 if (alloc_magic[0] == kAllocBegMagic)
731 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
732 return reinterpret_cast<AsanChunk *>(alloc_beg);
733 }
734
735 AsanChunk *GetAsanChunkByAddr(uptr p) {
736 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
737 return GetAsanChunk(alloc_beg);
738 }
739
740 // Allocator must be locked when this function is called.
741 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
742 void *alloc_beg =
743 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
744 return GetAsanChunk(alloc_beg);
745 }
746
747 uptr AllocationSize(uptr p) {
748 AsanChunk *m = GetAsanChunkByAddr(p);
749 if (!m) return 0;
750 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
751 if (m->Beg() != p) return 0;
752 return m->UsedSize();
753 }
754
755 AsanChunkView FindHeapChunkByAddress(uptr addr) {
756 AsanChunk *m1 = GetAsanChunkByAddr(addr);
757 if (!m1) return AsanChunkView(m1);
758 sptr offset = 0;
759 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
760 // The address is in the chunk's left redzone, so maybe it is actually
761 // a right buffer overflow from the other chunk to the left.
762 // Search a bit to the left to see if there is another chunk.
763 AsanChunk *m2 = nullptr;
764 for (uptr l = 1; l < GetPageSizeCached(); l++) {
765 m2 = GetAsanChunkByAddr(addr - l);
766 if (m2 == m1) continue; // Still the same chunk.
767 break;
768 }
769 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
770 m1 = ChooseChunk(addr, m2, m1);
771 }
772 return AsanChunkView(m1);
773 }
774
775 void Purge(BufferedStackTrace *stack) {
776 AsanThread *t = GetCurrentThread();
777 if (t) {
778 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
779 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
780 QuarantineCallback(GetAllocatorCache(ms),
781 stack));
782 }
783 {
784 SpinMutexLock l(&fallback_mutex);
785 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
786 QuarantineCallback(&fallback_allocator_cache,
787 stack));
788 }
789
790 allocator.ForceReleaseToOS();
791 }
792
793 void PrintStats() {
794 allocator.PrintStats();
795 quarantine.PrintStats();
796 }
797
798 void ForceLock() {
799 allocator.ForceLock();
800 fallback_mutex.Lock();
801 }
802
803 void ForceUnlock() {
804 fallback_mutex.Unlock();
805 allocator.ForceUnlock();
806 }
807};
808
809static Allocator instance(LINKER_INITIALIZED);
810
811static AsanAllocator &get_allocator() {
812 return instance.allocator;
813}
814
815bool AsanChunkView::IsValid() const {
816 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
817}
818bool AsanChunkView::IsAllocated() const {
819 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
820}
821bool AsanChunkView::IsQuarantined() const {
822 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
823}
824uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
825uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
826uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
827u32 AsanChunkView::UserRequestedAlignment() const {
828 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
829}
830uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
831uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
832AllocType AsanChunkView::GetAllocType() const {
833 return (AllocType)chunk_->alloc_type;
834}
835
836static StackTrace GetStackTraceFromId(u32 id) {
837 CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 837, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
838 StackTrace res = StackDepotGet(id);
839 CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 839, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
840 return res;
841}
842
843u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
844u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
845
846StackTrace AsanChunkView::GetAllocStack() const {
847 return GetStackTraceFromId(GetAllocStackId());
848}
849
850StackTrace AsanChunkView::GetFreeStack() const {
851 return GetStackTraceFromId(GetFreeStackId());
852}
853
854void InitializeAllocator(const AllocatorOptions &options) {
855 instance.InitLinkerInitialized(options);
856}
857
858void ReInitializeAllocator(const AllocatorOptions &options) {
859 instance.ReInitialize(options);
860}
861
862void GetAllocatorOptions(AllocatorOptions *options) {
863 instance.GetOptions(options);
864}
865
866AsanChunkView FindHeapChunkByAddress(uptr addr) {
867 return instance.FindHeapChunkByAddress(addr);
868}
869AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
870 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
871}
872
873void AsanThreadLocalMallocStorage::CommitBack() {
874 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
875 instance.CommitBack(this, &stack);
876}
877
878void PrintInternalAllocatorStats() {
879 instance.PrintStats();
880}
881
882void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
883 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
884}
885
886void asan_delete(void *ptr, uptr size, uptr alignment,
887 BufferedStackTrace *stack, AllocType alloc_type) {
888 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
889}
890
891void *asan_malloc(uptr size, BufferedStackTrace *stack) {
892 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
893}
894
895void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
896 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
897}
898
899void *asan_reallocarray(void *p, uptr nmemb, uptr size,
900 BufferedStackTrace *stack) {
901 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
902 errno(*__errno_location()) = errno_ENOMEM12;
903 if (AllocatorMayReturnNull())
904 return nullptr;
905 ReportReallocArrayOverflow(nmemb, size, stack);
906 }
907 return asan_realloc(p, nmemb * size, stack);
908}
909
910void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
911 if (!p)
912 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
913 if (size == 0) {
914 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
915 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
916 return nullptr;
917 }
918 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
919 size = 1;
920 }
921 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
922}
923
924void *asan_valloc(uptr size, BufferedStackTrace *stack) {
925 return SetErrnoOnNull(
926 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
927}
928
929void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
930 uptr PageSize = GetPageSizeCached();
931 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
932 errno(*__errno_location()) = errno_ENOMEM12;
933 if (AllocatorMayReturnNull())
934 return nullptr;
935 ReportPvallocOverflow(size, stack);
936 }
937 // pvalloc(0) should allocate one page.
938 size = size ? RoundUpTo(size, PageSize) : PageSize;
939 return SetErrnoOnNull(
940 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
941}
942
943void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
944 AllocType alloc_type) {
945 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
946 errno(*__errno_location()) = errno_EINVAL22;
947 if (AllocatorMayReturnNull())
948 return nullptr;
949 ReportInvalidAllocationAlignment(alignment, stack);
950 }
951 return SetErrnoOnNull(
952 instance.Allocate(size, alignment, stack, alloc_type, true));
953}
954
955void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
956 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
957 errno(*__errno_location()) = errno_EINVAL22;
958 if (AllocatorMayReturnNull())
959 return nullptr;
960 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
961 }
962 return SetErrnoOnNull(
963 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
964}
965
966int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
967 BufferedStackTrace *stack) {
968 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
969 if (AllocatorMayReturnNull())
970 return errno_EINVAL22;
971 ReportInvalidPosixMemalignAlignment(alignment, stack);
972 }
973 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
974 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
975 // OOM error is already taken care of by Allocate.
976 return errno_ENOMEM12;
977 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 977, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
978 *memptr = ptr;
979 return 0;
980}
981
982uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
983 if (!ptr) return 0;
984 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
985 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
986 GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags
()->fast_unwind_on_fatal)
;
987 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
988 }
989 return usable_size;
990}
991
992uptr asan_mz_size(const void *ptr) {
993 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
994}
995
996void asan_mz_force_lock() {
997 instance.ForceLock();
998}
999
1000void asan_mz_force_unlock() {
1001 instance.ForceUnlock();
1002}
1003
1004void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
1005 instance.SetRssLimitExceeded(limit_exceeded);
1006}
1007
1008} // namespace __asan
1009
1010// --- Implementation of LSan-specific functions --- {{{1
1011namespace __lsan {
1012void LockAllocator() {
1013 __asan::get_allocator().ForceLock();
1014}
1015
1016void UnlockAllocator() {
1017 __asan::get_allocator().ForceUnlock();
1018}
1019
1020void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1021 *begin = (uptr)&__asan::get_allocator();
1022 *end = *begin + sizeof(__asan::get_allocator());
1023}
1024
1025uptr PointsIntoChunk(void* p) {
1026 uptr addr = reinterpret_cast<uptr>(p);
1027 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1028 if (!m) return 0;
1029 uptr chunk = m->Beg();
1030 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1031 return 0;
1032 if (m->AddrIsInside(addr, /*locked_version=*/true))
1033 return chunk;
1034 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1035 addr))
1036 return chunk;
1037 return 0;
1038}
1039
1040uptr GetUserBegin(uptr chunk) {
1041 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1042 CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/asan_allocator.cpp"
, 1042, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
1043 return m->Beg();
1044}
1045
1046LsanMetadata::LsanMetadata(uptr chunk) {
1047 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1048}
1049
1050bool LsanMetadata::allocated() const {
1051 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1052 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1053}
1054
1055ChunkTag LsanMetadata::tag() const {
1056 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1057 return static_cast<ChunkTag>(m->lsan_tag);
1058}
1059
1060void LsanMetadata::set_tag(ChunkTag value) {
1061 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1062 m->lsan_tag = value;
1063}
1064
1065uptr LsanMetadata::requested_size() const {
1066 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1067 return m->UsedSize(/*locked_version=*/true);
1068}
1069
1070u32 LsanMetadata::stack_trace_id() const {
1071 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1072 return m->alloc_context_id;
1073}
1074
1075void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1076 __asan::get_allocator().ForEachChunk(callback, arg);
1077}
1078
1079IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1080 uptr addr = reinterpret_cast<uptr>(p);
1081 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1082 if (!m) return kIgnoreObjectInvalid;
1083 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1084 if (m->lsan_tag == kIgnored)
1085 return kIgnoreObjectAlreadyIgnored;
1086 m->lsan_tag = __lsan::kIgnored;
1087 return kIgnoreObjectSuccess;
1088 } else {
1089 return kIgnoreObjectInvalid;
1090 }
1091}
1092} // namespace __lsan
1093
1094// ---------------------- Interface ---------------- {{{1
1095using namespace __asan;
1096
1097// ASan allocator doesn't reserve extra bytes, so normally we would
1098// just return "size". We don't want to expose our redzone sizes, etc here.
1099uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1100 return size;
1101}
1102
1103int __sanitizer_get_ownership(const void *p) {
1104 uptr ptr = reinterpret_cast<uptr>(p);
1105 return instance.AllocationSize(ptr) > 0;
1106}
1107
1108uptr __sanitizer_get_allocated_size(const void *p) {
1109 if (!p) return 0;
1110 uptr ptr = reinterpret_cast<uptr>(p);
1111 uptr allocated_size = instance.AllocationSize(ptr);
1112 // Die if p is not malloced or if it is already freed.
1113 if (allocated_size == 0) {
1114 GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack
.size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp
= (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer
[0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack
.trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address
(0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer
::uptr) __builtin_frame_address(0), nullptr, common_flags()->
fast_unwind_on_fatal, kStackTraceMax); }
;
1115 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1116 }
1117 return allocated_size;
1118}
1119
1120void __sanitizer_purge_allocator() {
1121 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1122 instance.Purge(&stack);
1123}
1124
1125int __asan_update_allocation_context(void* addr) {
1126 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1127 return instance.UpdateAllocationStack((uptr)addr, &stack);
1128}
1129
1130#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1131// Provide default (no-op) implementation of malloc hooks.
1132SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
1133 void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
{
1134 (void)ptr;
1135 (void)size;
1136}
1137
1138SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *ptr)
{
1139 (void)ptr;
1140}
1141#endif

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_atomic_clang_x86.h

1//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10// Not intended for direct inclusion. Include sanitizer_atomic.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ATOMIC_CLANG_X86_H
15#define SANITIZER_ATOMIC_CLANG_X86_H
16
17namespace __sanitizer {
18
19INLINEinline void proc_yield(int cnt) {
20 __asm__ __volatile__("" ::: "memory");
21 for (int i = 0; i < cnt; i++)
22 __asm__ __volatile__("pause");
23 __asm__ __volatile__("" ::: "memory");
24}
25
26template<typename T>
27INLINEinline typename T::Type atomic_load(
28 const volatile T *a, memory_order mo) {
29 DCHECK(mo & (memory_order_relaxed | memory_order_consume
30 | memory_order_acquire | memory_order_seq_cst));
31 DCHECK(!((uptr)a % sizeof(*a)));
32 typename T::Type v;
33
34 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
35 // Assume that aligned loads are atomic.
36 if (mo
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
== memory_order_relaxed) {
5
Taking true branch
37 v = a->val_dont_use;
38 } else if (mo == memory_order_consume) {
39 // Assume that processor respects data dependencies
40 // (and that compiler won't break them).
41 __asm__ __volatile__("" ::: "memory");
42 v = a->val_dont_use;
43 __asm__ __volatile__("" ::: "memory");
44 } else if (mo == memory_order_acquire) {
45 __asm__ __volatile__("" ::: "memory");
46 v = a->val_dont_use;
47 // On x86 loads are implicitly acquire.
48 __asm__ __volatile__("" ::: "memory");
49 } else { // seq_cst
50 // On x86 plain MOV is enough for seq_cst store.
51 __asm__ __volatile__("" ::: "memory");
52 v = a->val_dont_use;
53 __asm__ __volatile__("" ::: "memory");
54 }
55 } else {
56 // 64-bit load on 32-bit platform.
57 __asm__ __volatile__(
58 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
59 "movq %%mm0, %0;" // (ptr could be read-only)
60 "emms;" // Empty mmx state/Reset FP regs
61 : "=m" (v)
62 : "m" (a->val_dont_use)
63 : // mark the mmx registers as clobbered
64#ifdef __MMX__
65 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
66#endif // #ifdef __MMX__
67 "memory");
68 }
69 return v;
6
Returning value (loaded from 'v'), which participates in a condition later
70}
71
72template<typename T>
73INLINEinline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
74 DCHECK(mo & (memory_order_relaxed | memory_order_release
75 | memory_order_seq_cst));
76 DCHECK(!((uptr)a % sizeof(*a)));
77
78 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
79 // Assume that aligned loads are atomic.
80 if (mo == memory_order_relaxed) {
81 a->val_dont_use = v;
82 } else if (mo == memory_order_release) {
83 // On x86 stores are implicitly release.
84 __asm__ __volatile__("" ::: "memory");
85 a->val_dont_use = v;
86 __asm__ __volatile__("" ::: "memory");
87 } else { // seq_cst
88 // On x86 stores are implicitly release.
89 __asm__ __volatile__("" ::: "memory");
90 a->val_dont_use = v;
91 __sync_synchronize();
92 }
93 } else {
94 // 64-bit store on 32-bit platform.
95 __asm__ __volatile__(
96 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
97 "movq %%mm0, %0;"
98 "emms;" // Empty mmx state/Reset FP regs
99 : "=m" (a->val_dont_use)
100 : "m" (v)
101 : // mark the mmx registers as clobbered
102#ifdef __MMX__
103 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
104#endif // #ifdef __MMX__
105 "memory");
106 if (mo == memory_order_seq_cst)
107 __sync_synchronize();
108 }
109}
110
111} // namespace __sanitizer
112
113#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
24class CombinedAllocator {
25 public:
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
32 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
33 stats_.InitLinkerInitialized();
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.InitLinkerInitialized();
36 }
37
38 void Init(s32 release_to_os_interval_ms) {
39 stats_.Init();
40 primary_.Init(release_to_os_interval_ms);
41 secondary_.Init();
42 }
43
44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
32
Assuming 'size' is not equal to 0
33
Taking false branch
47 size = 1;
48 if (size + alignment < size) {
34
Assuming the condition is false
35
Taking false branch
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
59 if (alignment
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
> 8)
36
Taking false branch
60 size = RoundUpTo(size, alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 void *res;
68 if (primary_.CanAllocate(size, alignment))
37
Calling 'SizeClassAllocator32::CanAllocate'
40
Returning from 'SizeClassAllocator32::CanAllocate'
41
Taking false branch
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
71 res = secondary_.Allocate(&stats_, original_size, alignment);
42
Value assigned to 'res'
72 if (alignment
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
> 8)
43
Taking false branch
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
74 return res;
44
Returning pointer (loaded from 'res')
75 }
76
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
79 }
80
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 }
84
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(new_size, old_size);
108 void *new_p = Allocate(cache, new_size, alignment);
109 if (new_p)
110 internal_memcpy(new_p, p, memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(void *p) {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(void *p) {
122 return primary_.PointerIsMine(p);
123 }
124
125 void *GetMetaData(const void *p) {
126 if (primary_.PointerIsMine(p))
127 return primary_.GetMetaData(p);
128 return secondary_.GetMetaData(p);
129 }
130
131 void *GetBlockBegin(const void *p) {
132 if (primary_.PointerIsMine(p))
133 return primary_.GetBlockBegin(p);
134 return secondary_.GetBlockBegin(p);
135 }
136
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p) {
140 if (primary_.PointerIsMine(p))
141 return primary_.GetBlockBegin(p);
142 return secondary_.GetBlockBeginFastLocked(p);
143 }
144
145 uptr GetActuallyAllocatedSize(void *p) {
146 if (primary_.PointerIsMine(p))
147 return primary_.GetActuallyAllocatedSize(p);
148 return secondary_.GetActuallyAllocatedSize(p);
149 }
150
151 uptr TotalMemoryUsed() {
152 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
153 }
154
155 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
156
157 void InitCache(AllocatorCache *cache) {
158 cache->Init(&stats_);
159 }
160
161 void DestroyCache(AllocatorCache *cache) {
162 cache->Destroy(&primary_, &stats_);
163 }
164
165 void SwallowCache(AllocatorCache *cache) {
166 cache->Drain(&primary_);
167 }
168
169 void GetStats(AllocatorStatCounters s) const {
170 stats_.Get(s);
171 }
172
173 void PrintStats() {
174 primary_.PrintStats();
175 secondary_.PrintStats();
176 }
177
178 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
179 // introspection API.
180 void ForceLock() {
181 primary_.ForceLock();
182 secondary_.ForceLock();
183 }
184
185 void ForceUnlock() {
186 secondary_.ForceUnlock();
187 primary_.ForceUnlock();
188 }
189
190 // Iterate over all existing chunks.
191 // The allocator must be locked when calling this function.
192 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
193 primary_.ForEachChunk(callback, arg);
194 secondary_.ForEachChunk(callback, arg);
195 }
196
197 private:
198 PrimaryAllocator primary_;
199 SecondaryAllocator secondary_;
200 AllocatorGlobalStats stats_;
201};

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h

1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17
18// SizeClassAllocator32 -- allocator for 32-bit address space.
19// This allocator can theoretically be used on 64-bit arch, but there it is less
20// efficient than SizeClassAllocator64.
21//
22// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23// be returned by MmapOrDie().
24//
25// Region:
26// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27// kRegionSize).
28// Since the regions are aligned by kRegionSize, there are exactly
29// kNumPossibleRegions possible regions in the address space and so we keep
30// a ByteMap possible_regions to store the size classes of each Region.
31// 0 size class means the region is not used by the allocator.
32//
33// One Region is used to allocate chunks of a single size class.
34// A Region looks like this:
35// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36//
37// In order to avoid false sharing the objects of this class should be
38// chache-line aligned.
39
40struct SizeClassAllocator32FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 kUseSeparateSizeClassForBatch = 2,
44 };
45};
46
47template <class Params>
48class SizeClassAllocator32 {
49 private:
50 static const u64 kTwoLevelByteMapSize1 =
51 (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
53
54 public:
55 using AddressSpaceView = typename Params::AddressSpaceView;
56 static const uptr kSpaceBeg = Params::kSpaceBeg;
57 static const u64 kSpaceSize = Params::kSpaceSize;
58 static const uptr kMetadataSize = Params::kMetadataSize;
59 typedef typename Params::SizeClassMap SizeClassMap;
60 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
61 typedef typename Params::MapUnmapCallback MapUnmapCallback;
62 using ByteMap = typename conditional<
63 (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
64 FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
65 AddressSpaceView>,
66 TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
67
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||typedef char assertion_failed__69[2*(int)(!0 || (kSpaceSize &
(kSpaceSize - 1)) == 0)-1]
69 (kSpaceSize & (kSpaceSize - 1)) == 0)typedef char assertion_failed__69[2*(int)(!0 || (kSpaceSize &
(kSpaceSize - 1)) == 0)-1]
;
70
71 static const bool kRandomShuffleChunks = Params::kFlags &
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
73 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
75
76 struct TransferBatch {
77 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
78 void SetFromArray(void *batch[], uptr count) {
79 DCHECK_LE(count, kMaxNumCached);
80 count_ = count;
81 for (uptr i = 0; i < count; i++)
82 batch_[i] = batch[i];
83 }
84 uptr Count() const { return count_; }
85 void Clear() { count_ = 0; }
86 void Add(void *ptr) {
87 batch_[count_++] = ptr;
88 DCHECK_LE(count_, kMaxNumCached);
89 }
90 void CopyToArray(void *to_batch[]) const {
91 for (uptr i = 0, n = Count(); i < n; i++)
92 to_batch[i] = batch_[i];
93 }
94
95 // How much memory do we need for a batch containing n elements.
96 static uptr AllocationSizeRequiredForNElements(uptr n) {
97 return sizeof(uptr) * 2 + sizeof(void *) * n;
98 }
99 static uptr MaxCached(uptr size) {
100 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
101 }
102
103 TransferBatch *next;
104
105 private:
106 uptr count_;
107 void *batch_[kMaxNumCached];
108 };
109
110 static const uptr kBatchSize = sizeof(TransferBatch);
111 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)typedef char assertion_failed__111[2*(int)((kBatchSize & (
kBatchSize - 1)) == 0)-1]
;
112 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))typedef char assertion_failed__112[2*(int)(kBatchSize == SizeClassMap
::kMaxNumCachedHint * sizeof(uptr))-1]
;
113
114 static uptr ClassIdToSize(uptr class_id) {
115 return (class_id == SizeClassMap::kBatchClassID) ?
116 kBatchSize : SizeClassMap::Size(class_id);
117 }
118
119 typedef SizeClassAllocator32<Params> ThisT;
120 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
121
122 void Init(s32 release_to_os_interval_ms) {
123 possible_regions.Init();
124 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
125 }
126
127 s32 ReleaseToOSIntervalMs() const {
128 return kReleaseToOSIntervalNever;
129 }
130
131 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
132 // This is empty here. Currently only implemented in 64-bit allocator.
133 }
134
135 void ForceReleaseToOS() {
136 // Currently implemented in 64-bit allocator only.
137 }
138
139 void *MapWithCallback(uptr size) {
140 void *res = MmapOrDie(size, PrimaryAllocatorName);
141 MapUnmapCallback().OnMap((uptr)res, size);
142 return res;
143 }
144
145 void UnmapWithCallback(uptr beg, uptr size) {
146 MapUnmapCallback().OnUnmap(beg, size);
147 UnmapOrDie(reinterpret_cast<void *>(beg), size);
148 }
149
150 static bool CanAllocate(uptr size, uptr alignment) {
151 return size <= SizeClassMap::kMaxSize &&
38
Assuming 'size' is > 'kMaxSize'
39
Returning zero, which participates in a condition later
152 alignment <= SizeClassMap::kMaxSize;
153 }
154
155 void *GetMetaData(const void *p) {
156 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 156, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
157 uptr mem = reinterpret_cast<uptr>(p);
158 uptr beg = ComputeRegionBeg(mem);
159 uptr size = ClassIdToSize(GetSizeClass(p));
160 u32 offset = mem - beg;
161 uptr n = offset / (u32)size; // 32-bit division
162 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
163 return reinterpret_cast<void*>(meta);
164 }
165
166 NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
167 uptr class_id) {
168 DCHECK_LT(class_id, kNumClasses);
169 SizeClassInfo *sci = GetSizeClassInfo(class_id);
170 SpinMutexLock l(&sci->mutex);
171 if (sci->free_list.empty()) {
172 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id)
), 0)
)
173 return nullptr;
174 DCHECK(!sci->free_list.empty());
175 }
176 TransferBatch *b = sci->free_list.front();
177 sci->free_list.pop_front();
178 return b;
179 }
180
181 NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id,
182 TransferBatch *b) {
183 DCHECK_LT(class_id, kNumClasses);
184 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 184, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
185 SizeClassInfo *sci = GetSizeClassInfo(class_id);
186 SpinMutexLock l(&sci->mutex);
187 sci->free_list.push_front(b);
188 }
189
190 bool PointerIsMine(const void *p) {
191 uptr mem = reinterpret_cast<uptr>(p);
192 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
193 mem &= (kSpaceSize - 1);
194 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
195 return false;
196 return GetSizeClass(p) != 0;
197 }
198
199 uptr GetSizeClass(const void *p) {
200 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
201 }
202
203 void *GetBlockBegin(const void *p) {
204 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 204, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
205 uptr mem = reinterpret_cast<uptr>(p);
206 uptr beg = ComputeRegionBeg(mem);
207 uptr size = ClassIdToSize(GetSizeClass(p));
208 u32 offset = mem - beg;
209 u32 n = offset / (u32)size; // 32-bit division
210 uptr res = beg + (n * (u32)size);
211 return reinterpret_cast<void*>(res);
212 }
213
214 uptr GetActuallyAllocatedSize(void *p) {
215 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 215, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
216 return ClassIdToSize(GetSizeClass(p));
217 }
218
219 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
220
221 uptr TotalMemoryUsed() {
222 // No need to lock here.
223 uptr res = 0;
224 for (uptr i = 0; i < kNumPossibleRegions; i++)
225 if (possible_regions[i])
226 res += kRegionSize;
227 return res;
228 }
229
230 void TestOnlyUnmap() {
231 for (uptr i = 0; i < kNumPossibleRegions; i++)
232 if (possible_regions[i])
233 UnmapWithCallback((i * kRegionSize), kRegionSize);
234 }
235
236 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
237 // introspection API.
238 void ForceLock() {
239 for (uptr i = 0; i < kNumClasses; i++) {
240 GetSizeClassInfo(i)->mutex.Lock();
241 }
242 }
243
244 void ForceUnlock() {
245 for (int i = kNumClasses - 1; i >= 0; i--) {
246 GetSizeClassInfo(i)->mutex.Unlock();
247 }
248 }
249
250 // Iterate over all existing chunks.
251 // The allocator must be locked when calling this function.
252 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
253 for (uptr region = 0; region < kNumPossibleRegions; region++)
254 if (possible_regions[region]) {
255 uptr chunk_size = ClassIdToSize(possible_regions[region]);
256 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
257 uptr region_beg = region * kRegionSize;
258 for (uptr chunk = region_beg;
259 chunk < region_beg + max_chunks_in_region * chunk_size;
260 chunk += chunk_size) {
261 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
262 callback(chunk, arg);
263 }
264 }
265 }
266
267 void PrintStats() {}
268
269 static uptr AdditionalSize() { return 0; }
270
271 typedef SizeClassMap SizeClassMapT;
272 static const uptr kNumClasses = SizeClassMap::kNumClasses;
273
274 private:
275 static const uptr kRegionSize = 1 << kRegionSizeLog;
276 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
277
278 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo {
279 StaticSpinMutex mutex;
280 IntrusiveList<TransferBatch> free_list;
281 u32 rand_state;
282 };
283 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)typedef char assertion_failed__283[2*(int)(sizeof(SizeClassInfo
) % kCacheLineSize == 0)-1]
;
284
285 uptr ComputeRegionId(uptr mem) const {
286 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
287 mem &= (kSpaceSize - 1);
288 const uptr res = mem >> kRegionSizeLog;
289 CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 289, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")"
, v1, v2); } while (false)
;
290 return res;
291 }
292
293 uptr ComputeRegionBeg(uptr mem) {
294 return mem & ~(kRegionSize - 1);
295 }
296
297 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
298 DCHECK_LT(class_id, kNumClasses);
299 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
300 kRegionSize, kRegionSize, PrimaryAllocatorName));
301 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
302 return 0;
303 MapUnmapCallback().OnMap(res, kRegionSize);
304 stat->Add(AllocatorStatMapped, kRegionSize);
305 CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 305, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
306 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
307 return res;
308 }
309
310 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
311 DCHECK_LT(class_id, kNumClasses);
312 return &size_class_info_array[class_id];
313 }
314
315 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
316 TransferBatch **current_batch, uptr max_count,
317 uptr *pointers_array, uptr count) {
318 // If using a separate class for batches, we do not need to shuffle it.
319 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
320 class_id != SizeClassMap::kBatchClassID))
321 RandomShuffle(pointers_array, count, &sci->rand_state);
322 TransferBatch *b = *current_batch;
323 for (uptr i = 0; i < count; i++) {
324 if (!b) {
325 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
326 if (UNLIKELY(!b)__builtin_expect(!!(!b), 0))
327 return false;
328 b->Clear();
329 }
330 b->Add((void*)pointers_array[i]);
331 if (b->Count() == max_count) {
332 sci->free_list.push_back(b);
333 b = nullptr;
334 }
335 }
336 *current_batch = b;
337 return true;
338 }
339
340 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
341 SizeClassInfo *sci, uptr class_id) {
342 const uptr region = AllocateRegion(stat, class_id);
343 if (UNLIKELY(!region)__builtin_expect(!!(!region), 0))
344 return false;
345 if (kRandomShuffleChunks)
346 if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0))
347 // The random state is initialized from ASLR (PIE) and time.
348 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
349 const uptr size = ClassIdToSize(class_id);
350 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
351 const uptr max_count = TransferBatch::MaxCached(size);
352 DCHECK_GT(max_count, 0);
353 TransferBatch *b = nullptr;
354 constexpr uptr kShuffleArraySize = 48;
355 uptr shuffle_array[kShuffleArraySize];
356 uptr count = 0;
357 for (uptr i = region; i < region + n_chunks * size; i += size) {
358 shuffle_array[count++] = i;
359 if (count == kShuffleArraySize) {
360 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
361 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
362 return false;
363 count = 0;
364 }
365 }
366 if (count) {
367 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
368 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
369 return false;
370 }
371 if (b) {
372 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 372, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
373 sci->free_list.push_back(b);
374 }
375 return true;
376 }
377
378 ByteMap possible_regions;
379 SizeClassInfo size_class_info_array[kNumClasses];
380};