Bug Summary

File:compiler-rt/lib/asan/asan_allocator.cpp
Warning:line 511, column 46
Array access (from variable 'alloc_beg') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name asan_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu i686 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D ASAN_DYNAMIC=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/build-llvm/projects/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan -I /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/llvm/include -I /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/build-llvm/projects/compiler-rt/lib/asan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -ftls-model=initial-exec -fno-builtin -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-04-233312-30236-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp

/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp

1//===-- asan_allocator.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Implementation of ASan's memory allocator, 2-nd version.
12// This variant uses the allocator from sanitizer_common, i.e. the one shared
13// with ThreadSanitizer and MemorySanitizer.
14//
15//===----------------------------------------------------------------------===//
16
17#include "asan_allocator.h"
18
19#include "asan_mapping.h"
20#include "asan_poisoning.h"
21#include "asan_report.h"
22#include "asan_stack.h"
23#include "asan_thread.h"
24#include "lsan/lsan_common.h"
25#include "sanitizer_common/sanitizer_allocator_checks.h"
26#include "sanitizer_common/sanitizer_allocator_interface.h"
27#include "sanitizer_common/sanitizer_errno.h"
28#include "sanitizer_common/sanitizer_flags.h"
29#include "sanitizer_common/sanitizer_internal_defs.h"
30#include "sanitizer_common/sanitizer_list.h"
31#include "sanitizer_common/sanitizer_quarantine.h"
32#include "sanitizer_common/sanitizer_stackdepot.h"
33
34namespace __asan {
35
36// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37// We use adaptive redzones: for larger allocation larger redzones are used.
38static u32 RZLog2Size(u32 rz_log) {
39 CHECK_LT(rz_log, 8)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_log)); __sanitizer
::u64 v2 = (__sanitizer::u64)((8)); if (__builtin_expect(!!(!
(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 39, "(" "(rz_log)" ") " "<" " (" "(8)" ")", v1, v2); } while
(false)
;
40 return 16 << rz_log;
41}
42
43static u32 RZSize2Log(u32 rz_size) {
44 CHECK_GE(rz_size, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect(!!(
!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 44, "(" "(rz_size)" ") " ">=" " (" "(16)" ")", v1, v2); }
while (false)
;
45 CHECK_LE(rz_size, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect(!
!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 45, "(" "(rz_size)" ") " "<=" " (" "(2048)" ")", v1, v2)
; } while (false)
;
46 CHECK(IsPowerOfTwo(rz_size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(rz_size
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 46, "(" "(IsPowerOfTwo(rz_size))" ") " "!=" " (" "0" ")", v1
, v2); } while (false)
;
47 u32 res = Log2(rz_size) - 4;
48 CHECK_EQ(rz_size, RZLog2Size(res))do { __sanitizer::u64 v1 = (__sanitizer::u64)((rz_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((RZLog2Size(res))); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 48, "(" "(rz_size)" ") " "==" " (" "(RZLog2Size(res))" ")",
v1, v2); } while (false)
;
49 return res;
50}
51
52static AsanAllocator &get_allocator();
53
54// The memory chunk allocated from the underlying allocator looks like this:
55// L L L L L L H H U U U U U U R R
56// L -- left redzone words (0 or more bytes)
57// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
58// U -- user memory.
59// R -- right redzone (0 or more bytes)
60// ChunkBase consists of ChunkHeader and other bytes that overlap with user
61// memory.
62
63// If the left redzone is greater than the ChunkHeader size we store a magic
64// value in the first uptr word of the memory block and store the address of
65// ChunkBase in the next uptr.
66// M B L L L L L L L L L H H U U U U U U
67// | ^
68// ---------------------|
69// M -- magic value kAllocBegMagic
70// B -- address of ChunkHeader pointing to the first 'H'
71static const uptr kAllocBegMagic = 0xCC6E96B9;
72
73struct ChunkHeader {
74 // 1-st 8 bytes.
75 u32 chunk_state : 8; // Must be first.
76 u32 alloc_tid : 24;
77
78 u32 free_tid : 24;
79 u32 from_memalign : 1;
80 u32 alloc_type : 2;
81 u32 rz_log : 3;
82 u32 lsan_tag : 2;
83 // 2-nd 8 bytes
84 // This field is used for small sizes. For large sizes it is equal to
85 // SizeClassMap::kMaxSize and the actual size is stored in the
86 // SecondaryAllocator's metadata.
87 u32 user_requested_size : 29;
88 // align < 8 -> 0
89 // else -> log2(min(align, 512)) - 2
90 u32 user_requested_alignment_log : 3;
91 u32 alloc_context_id;
92};
93
94struct ChunkBase : ChunkHeader {
95 // Header2, intersects with user memory.
96 u32 free_context_id;
97};
98
99static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
100static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
101COMPILER_CHECK(kChunkHeaderSize == 16)static_assert(kChunkHeaderSize == 16, "");
102COMPILER_CHECK(kChunkHeader2Size <= 16)static_assert(kChunkHeader2Size <= 16, "");
103
104// Every chunk of memory allocated by this allocator can be in one of 3 states:
105// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
106// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
107// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
108enum {
109 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
110 CHUNK_ALLOCATED = 2,
111 CHUNK_QUARANTINE = 3
112};
113
114struct AsanChunk: ChunkBase {
115 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
116 uptr UsedSize(bool locked_version = false) {
117 if (user_requested_size != SizeClassMap::kMaxSize)
118 return user_requested_size;
119 return *reinterpret_cast<uptr *>(
120 get_allocator().GetMetaData(AllocBeg(locked_version)));
121 }
122 void *AllocBeg(bool locked_version = false) {
123 if (from_memalign) {
124 if (locked_version)
125 return get_allocator().GetBlockBeginFastLocked(
126 reinterpret_cast<void *>(this));
127 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
128 }
129 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
130 }
131 bool AddrIsInside(uptr addr, bool locked_version = false) {
132 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
133 }
134};
135
136struct QuarantineCallback {
137 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
138 : cache_(cache),
139 stack_(stack) {
140 }
141
142 void Recycle(AsanChunk *m) {
143 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 143, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
144 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
145 CHECK_NE(m->alloc_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 145, "(" "(m->alloc_tid)" ") " "!=" " (" "(kInvalidTid)"
")", v1, v2); } while (false)
;
146 CHECK_NE(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 146, "(" "(m->free_tid)" ") " "!=" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
147 PoisonShadow(m->Beg(),
148 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
149 kAsanHeapLeftRedzoneMagic);
150 void *p = reinterpret_cast<void *>(m->AllocBeg());
151 if (p != m) {
152 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
153 CHECK_EQ(alloc_magic[0], kAllocBegMagic)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[0]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kAllocBegMagic)
); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 153, "(" "(alloc_magic[0])" ") " "==" " (" "(kAllocBegMagic)"
")", v1, v2); } while (false)
;
154 // Clear the magic value, as allocator internals may overwrite the
155 // contents of deallocated chunk, confusing GetAsanChunk lookup.
156 alloc_magic[0] = 0;
157 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m))do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_magic[1]
)); __sanitizer::u64 v2 = (__sanitizer::u64)((reinterpret_cast
<uptr>(m))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 157, "(" "(alloc_magic[1])" ") " "==" " (" "(reinterpret_cast<uptr>(m))"
")", v1, v2); } while (false)
;
158 }
159
160 // Statistics.
161 AsanStats &thread_stats = GetCurrentThreadStats();
162 thread_stats.real_frees++;
163 thread_stats.really_freed += m->UsedSize();
164
165 get_allocator().Deallocate(cache_, p);
166 }
167
168 void *Allocate(uptr size) {
169 void *res = get_allocator().Allocate(cache_, size, 1);
170 // TODO(alekseys): Consider making quarantine OOM-friendly.
171 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
172 ReportOutOfMemory(size, stack_);
173 return res;
174 }
175
176 void Deallocate(void *p) {
177 get_allocator().Deallocate(cache_, p);
178 }
179
180 private:
181 AllocatorCache* const cache_;
182 BufferedStackTrace* const stack_;
183};
184
185typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
186typedef AsanQuarantine::Cache QuarantineCache;
187
188void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
189 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
190 // Statistics.
191 AsanStats &thread_stats = GetCurrentThreadStats();
192 thread_stats.mmaps++;
193 thread_stats.mmaped += size;
194}
195void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
196 PoisonShadow(p, size, 0);
197 // We are about to unmap a chunk of user memory.
198 // Mark the corresponding shadow memory as not needed.
199 FlushUnneededASanShadowMemory(p, size);
200 // Statistics.
201 AsanStats &thread_stats = GetCurrentThreadStats();
202 thread_stats.munmaps++;
203 thread_stats.munmaped += size;
204}
205
206// We can not use THREADLOCAL because it is not supported on some of the
207// platforms we care about (OSX 10.6, Android).
208// static THREADLOCAL AllocatorCache cache;
209AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
210 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 210, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
211 return &ms->allocator_cache;
212}
213
214QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
215 CHECK(ms)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ms)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 215, "(" "(ms)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
216 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache))do { __sanitizer::u64 v1 = (__sanitizer::u64)((sizeof(QuarantineCache
))); __sanitizer::u64 v2 = (__sanitizer::u64)((sizeof(ms->
quarantine_cache))); if (__builtin_expect(!!(!(v1 <= v2)),
0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 216, "(" "(sizeof(QuarantineCache))" ") " "<=" " (" "(sizeof(ms->quarantine_cache))"
")", v1, v2); } while (false)
;
217 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
218}
219
220void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
221 quarantine_size_mb = f->quarantine_size_mb;
222 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
223 min_redzone = f->redzone;
224 max_redzone = f->max_redzone;
225 may_return_null = cf->allocator_may_return_null;
226 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
227 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
228}
229
230void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
231 f->quarantine_size_mb = quarantine_size_mb;
232 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
233 f->redzone = min_redzone;
234 f->max_redzone = max_redzone;
235 cf->allocator_may_return_null = may_return_null;
236 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
237 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
238}
239
240struct Allocator {
241 static const uptr kMaxAllowedMallocSize =
242 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40)(3UL << 30);
243
244 AsanAllocator allocator;
245 AsanQuarantine quarantine;
246 StaticSpinMutex fallback_mutex;
247 AllocatorCache fallback_allocator_cache;
248 QuarantineCache fallback_quarantine_cache;
249
250 uptr max_user_defined_malloc_size;
251 atomic_uint8_t rss_limit_exceeded;
252
253 // ------------------- Options --------------------------
254 atomic_uint16_t min_redzone;
255 atomic_uint16_t max_redzone;
256 atomic_uint8_t alloc_dealloc_mismatch;
257
258 // ------------------- Initialization ------------------------
259 explicit Allocator(LinkerInitialized)
260 : quarantine(LINKER_INITIALIZED),
261 fallback_quarantine_cache(LINKER_INITIALIZED) {}
262
263 void CheckOptions(const AllocatorOptions &options) const {
264 CHECK_GE(options.min_redzone, 16)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.min_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((16)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 264, "(" "(options.min_redzone)" ") " ">=" " (" "(16)" ")"
, v1, v2); } while (false)
;
265 CHECK_GE(options.max_redzone, options.min_redzone)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((options.min_redzone
)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 265, "(" "(options.max_redzone)" ") " ">=" " (" "(options.min_redzone)"
")", v1, v2); } while (false)
;
266 CHECK_LE(options.max_redzone, 2048)do { __sanitizer::u64 v1 = (__sanitizer::u64)((options.max_redzone
)); __sanitizer::u64 v2 = (__sanitizer::u64)((2048)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 266, "(" "(options.max_redzone)" ") " "<=" " (" "(2048)"
")", v1, v2); } while (false)
;
267 CHECK(IsPowerOfTwo(options.min_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.min_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 267, "(" "(IsPowerOfTwo(options.min_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
268 CHECK(IsPowerOfTwo(options.max_redzone))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(options
.max_redzone))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 268, "(" "(IsPowerOfTwo(options.max_redzone))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
269 }
270
271 void SharedInitCode(const AllocatorOptions &options) {
272 CheckOptions(options);
273 quarantine.Init((uptr)options.quarantine_size_mb << 20,
274 (uptr)options.thread_local_quarantine_size_kb << 10);
275 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
276 memory_order_release);
277 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
278 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
279 }
280
281 void InitLinkerInitialized(const AllocatorOptions &options) {
282 SetAllocatorMayReturnNull(options.may_return_null);
283 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
284 SharedInitCode(options);
285 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
286 ? common_flags()->max_allocation_size_mb
287 << 20
288 : kMaxAllowedMallocSize;
289 }
290
291 bool RssLimitExceeded() {
292 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
4
Calling 'atomic_load<__sanitizer::atomic_uint8_t>'
7
Returning from 'atomic_load<__sanitizer::atomic_uint8_t>'
8
Returning value, which participates in a condition later
293 }
294
295 void SetRssLimitExceeded(bool limit_exceeded) {
296 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
297 }
298
299 void RePoisonChunk(uptr chunk) {
300 // This could be a user-facing chunk (with redzones), or some internal
301 // housekeeping chunk, like TransferBatch. Start by assuming the former.
302 AsanChunk *ac = GetAsanChunk((void *)chunk);
303 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
304 uptr beg = ac->Beg();
305 uptr end = ac->Beg() + ac->UsedSize(true);
306 uptr chunk_end = chunk + allocated_size;
307 if (chunk < beg && beg < end && end <= chunk_end &&
308 ac->chunk_state == CHUNK_ALLOCATED) {
309 // Looks like a valid AsanChunk in use, poison redzones only.
310 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
311 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
312 FastPoisonShadowPartialRightRedzone(
313 end_aligned_down, end - end_aligned_down,
314 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
315 } else {
316 // This is either not an AsanChunk or freed or quarantined AsanChunk.
317 // In either case, poison everything.
318 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
319 }
320 }
321
322 void ReInitialize(const AllocatorOptions &options) {
323 SetAllocatorMayReturnNull(options.may_return_null);
324 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
325 SharedInitCode(options);
326
327 // Poison all existing allocation's redzones.
328 if (CanPoisonMemory()) {
329 allocator.ForceLock();
330 allocator.ForEachChunk(
331 [](uptr chunk, void *alloc) {
332 ((Allocator *)alloc)->RePoisonChunk(chunk);
333 },
334 this);
335 allocator.ForceUnlock();
336 }
337 }
338
339 void GetOptions(AllocatorOptions *options) const {
340 options->quarantine_size_mb = quarantine.GetSize() >> 20;
341 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
342 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
343 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
344 options->may_return_null = AllocatorMayReturnNull();
345 options->alloc_dealloc_mismatch =
346 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
347 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
348 }
349
350 // -------------------- Helper methods. -------------------------
351 uptr ComputeRZLog(uptr user_requested_size) {
352 u32 rz_log =
353 user_requested_size <= 64 - 16 ? 0 :
354 user_requested_size <= 128 - 32 ? 1 :
355 user_requested_size <= 512 - 64 ? 2 :
356 user_requested_size <= 4096 - 128 ? 3 :
357 user_requested_size <= (1 << 14) - 256 ? 4 :
358 user_requested_size <= (1 << 15) - 512 ? 5 :
359 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
360 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
361 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
362 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
363 }
364
365 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
366 if (user_requested_alignment < 8)
367 return 0;
368 if (user_requested_alignment > 512)
369 user_requested_alignment = 512;
370 return Log2(user_requested_alignment) - 2;
371 }
372
373 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
374 if (user_requested_alignment_log == 0)
375 return 0;
376 return 1LL << (user_requested_alignment_log + 2);
377 }
378
379 // We have an address between two chunks, and we want to report just one.
380 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
381 AsanChunk *right_chunk) {
382 // Prefer an allocated chunk over freed chunk and freed chunk
383 // over available chunk.
384 if (left_chunk->chunk_state != right_chunk->chunk_state) {
385 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
386 return left_chunk;
387 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
388 return right_chunk;
389 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
390 return left_chunk;
391 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
392 return right_chunk;
393 }
394 // Same chunk_state: choose based on offset.
395 sptr l_offset = 0, r_offset = 0;
396 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
left_chunk).AddrIsAtRight(addr, 1, &l_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 396, "(" "(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
397 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))do { __sanitizer::u64 v1 = (__sanitizer::u64)((AsanChunkView(
right_chunk).AddrIsAtLeft(addr, 1, &r_offset))); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 397, "(" "(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset))"
") " "!=" " (" "0" ")", v1, v2); } while (false)
;
398 if (l_offset < r_offset)
399 return left_chunk;
400 return right_chunk;
401 }
402
403 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
404 AsanChunk *m = GetAsanChunkByAddr(addr);
405 if (!m) return false;
406 if (m->chunk_state != CHUNK_ALLOCATED) return false;
407 if (m->Beg() != addr) return false;
408 atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
409 memory_order_relaxed);
410 return true;
411 }
412
413 // -------------------- Allocation/Deallocation routines ---------------
414 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
415 AllocType alloc_type, bool can_fill) {
416 if (UNLIKELY(!asan_inited)__builtin_expect(!!(!asan_inited), 0))
1
Assuming 'asan_inited' is not equal to 0
2
Taking false branch
417 AsanInitFromRtl();
418 if (RssLimitExceeded()) {
3
Calling 'Allocator::RssLimitExceeded'
9
Returning from 'Allocator::RssLimitExceeded'
10
Assuming the condition is false
11
Taking false branch
419 if (AllocatorMayReturnNull())
420 return nullptr;
421 ReportRssLimitExceeded(stack);
422 }
423 Flags &fl = *flags();
424 CHECK(stack)do { __sanitizer::u64 v1 = (__sanitizer::u64)((stack)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 424, "(" "(stack)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
12
Assuming 'v1' is not equal to 'v2'
13
Taking false branch
14
Loop condition is false. Exiting loop
425 const uptr min_alignment = SHADOW_GRANULARITY(1ULL << kDefaultShadowScale);
426 const uptr user_requested_alignment_log =
427 ComputeUserRequestedAlignmentLog(alignment);
428 if (alignment
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
14.1
'alignment' is >= 'min_alignment'
< min_alignment)
15
Taking false branch
429 alignment = min_alignment;
430 if (size == 0) {
16
Assuming 'size' is not equal to 0
17
Taking false branch
431 // We'd be happy to avoid allocating memory for zero-size requests, but
432 // some programs/tests depend on this behavior and assume that malloc
433 // would not return NULL even for zero-size allocations. Moreover, it
434 // looks like operator new should never return NULL, and results of
435 // consecutive "new" calls must be different even if the allocated size
436 // is zero.
437 size = 1;
438 }
439 CHECK(IsPowerOfTwo(alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsPowerOfTwo(alignment
))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 439, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")"
, v1, v2); } while (false)
;
18
Taking false branch
19
Loop condition is false. Exiting loop
440 uptr rz_log = ComputeRZLog(size);
441 uptr rz_size = RZLog2Size(rz_log);
442 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
443 uptr needed_size = rounded_size + rz_size;
444 if (alignment > min_alignment)
20
Assuming 'alignment' is <= 'min_alignment'
21
Taking false branch
445 needed_size += alignment;
446 bool using_primary_allocator = true;
447 // If we are allocating from the secondary allocator, there will be no
448 // automatic right redzone, so add the right redzone manually.
449 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
22
Taking true branch
450 needed_size += rz_size;
451 using_primary_allocator = false;
452 }
453 CHECK(IsAligned(needed_size, min_alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(needed_size
, min_alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 453, "(" "(IsAligned(needed_size, min_alignment))" ") " "!="
" (" "0" ")", v1, v2); } while (false)
;
23
Taking false branch
24
Loop condition is false. Exiting loop
454 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
25
Assuming 'size' is <= 'kMaxAllowedMallocSize'
26
Assuming 'needed_size' is <= 'kMaxAllowedMallocSize'
28
Taking false branch
455 size > max_user_defined_malloc_size) {
27
Assuming 'size' is <= field 'max_user_defined_malloc_size'
456 if (AllocatorMayReturnNull()) {
457 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
458 (void*)size);
459 return nullptr;
460 }
461 uptr malloc_limit =
462 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
463 ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
464 }
465
466 AsanThread *t = GetCurrentThread();
467 void *allocated;
468 if (t) {
29
Assuming 't' is non-null
30
Taking true branch
469 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
470 allocated = allocator.Allocate(cache, needed_size, 8);
31
Calling 'CombinedAllocator::Allocate'
45
Returning from 'CombinedAllocator::Allocate'
46
Value assigned to 'allocated'
471 } else {
472 SpinMutexLock l(&fallback_mutex);
473 AllocatorCache *cache = &fallback_allocator_cache;
474 allocated = allocator.Allocate(cache, needed_size, 8);
475 }
476 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
47
Assuming 'allocated' is null
48
Taking true branch
477 SetAllocatorOutOfMemory();
478 if (AllocatorMayReturnNull())
49
Assuming the condition is false
50
Taking false branch
479 return nullptr;
480 ReportOutOfMemory(size, stack);
481 }
482
483 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated)((((uptr)allocated) >> kDefaultShadowScale) + (kDefaultShadowOffset32
))
== 0
&& CanPoisonMemory()) {
51
Assuming the condition is false
484 // Heap poisoning is enabled, but the allocator provides an unpoisoned
485 // chunk. This is possible if CanPoisonMemory() was false for some
486 // time, for example, due to flags()->start_disabled.
487 // Anyway, poison the block before using it for anything else.
488 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
489 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
490 }
491
492 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
52
'alloc_beg' initialized to 0
493 uptr alloc_end = alloc_beg + needed_size;
494 uptr beg_plus_redzone = alloc_beg + rz_size;
495 uptr user_beg = beg_plus_redzone;
496 if (!IsAligned(user_beg, alignment))
53
Taking false branch
497 user_beg = RoundUpTo(user_beg, alignment);
498 uptr user_end = user_beg + size;
499 CHECK_LE(user_end, alloc_end)do { __sanitizer::u64 v1 = (__sanitizer::u64)((user_end)); __sanitizer
::u64 v2 = (__sanitizer::u64)((alloc_end)); if (__builtin_expect
(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 499, "(" "(user_end)" ") " "<=" " (" "(alloc_end)" ")", v1
, v2); } while (false)
;
54
Assuming 'v1' is <= 'v2'
55
Taking false branch
56
Loop condition is false. Exiting loop
500 uptr chunk_beg = user_beg - kChunkHeaderSize;
501 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
502 m->alloc_type = alloc_type;
503 m->rz_log = rz_log;
504 u32 alloc_tid = t
56.1
't' is non-null
56.1
't' is non-null
56.1
't' is non-null
56.1
't' is non-null
? t->tid() : 0;
57
'?' condition is true
505 m->alloc_tid = alloc_tid;
506 CHECK_EQ(alloc_tid, m->alloc_tid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_tid)); __sanitizer
::u64 v2 = (__sanitizer::u64)((m->alloc_tid)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 506, "(" "(alloc_tid)" ") " "==" " (" "(m->alloc_tid)" ")"
, v1, v2); } while (false)
; // Does alloc_tid fit into the bitfield?
58
Taking false branch
59
Loop condition is false. Exiting loop
507 m->free_tid = kInvalidTid;
508 m->from_memalign = user_beg != beg_plus_redzone;
509 if (alloc_beg != chunk_beg) {
60
Assuming 'alloc_beg' is not equal to 'chunk_beg'
61
Taking true branch
510 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((alloc_beg+ 2 *
sizeof(uptr))); __sanitizer::u64 v2 = (__sanitizer::u64)((chunk_beg
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 510, "(" "(alloc_beg+ 2 * sizeof(uptr))" ") " "<=" " (" "(chunk_beg)"
")", v1, v2); } while (false)
;
62
Assuming 'v1' is <= 'v2'
63
Taking false branch
64
Loop condition is false. Exiting loop
511 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
65
Array access (from variable 'alloc_beg') results in a null pointer dereference
512 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
513 }
514 if (using_primary_allocator) {
515 CHECK(size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((size)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 515, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (
false)
;
516 m->user_requested_size = size;
517 CHECK(allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 517, "(" "(allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
518 } else {
519 CHECK(!allocator.FromPrimary(allocated))do { __sanitizer::u64 v1 = (__sanitizer::u64)((!allocator.FromPrimary
(allocated))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if
(__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 519, "(" "(!allocator.FromPrimary(allocated))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
520 m->user_requested_size = SizeClassMap::kMaxSize;
521 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
522 meta[0] = size;
523 meta[1] = chunk_beg;
524 }
525 m->user_requested_alignment_log = user_requested_alignment_log;
526
527 m->alloc_context_id = StackDepotPut(*stack);
528
529 uptr size_rounded_down_to_granularity =
530 RoundDownTo(size, SHADOW_GRANULARITY(1ULL << kDefaultShadowScale));
531 // Unpoison the bulk of the memory region.
532 if (size_rounded_down_to_granularity)
533 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
534 // Deal with the end of the region if size is not aligned to granularity.
535 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
536 u8 *shadow =
537 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
538 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY(1ULL << kDefaultShadowScale) - 1)) : 0;
539 }
540
541 AsanStats &thread_stats = GetCurrentThreadStats();
542 thread_stats.mallocs++;
543 thread_stats.malloced += size;
544 thread_stats.malloced_redzones += needed_size - size;
545 if (needed_size > SizeClassMap::kMaxSize)
546 thread_stats.malloc_large++;
547 else
548 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
549
550 void *res = reinterpret_cast<void *>(user_beg);
551 if (can_fill && fl.max_malloc_fill_size) {
552 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
553 REAL(memset)__interception::real_memset(res, fl.malloc_fill_byte, fill_size);
554 }
555#if CAN_SANITIZE_LEAKS1
556 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
557 : __lsan::kDirectlyLeaked;
558#endif
559 // Must be the last mutation of metadata in this function.
560 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
561 ASAN_MALLOC_HOOK(res, size)do { if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook
(res, size); RunMallocHooks(res, size); } while (false)
;
562 return res;
563 }
564
565 // Set quarantine flag if chunk is allocated, issue ASan error report on
566 // available and quarantined chunks. Return true on success, false otherwise.
567 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
568 BufferedStackTrace *stack) {
569 u8 old_chunk_state = CHUNK_ALLOCATED;
570 // Flip the chunk_state atomically to avoid race on double-free.
571 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
572 CHUNK_QUARANTINE,
573 memory_order_acquire)) {
574 ReportInvalidFree(ptr, old_chunk_state, stack);
575 // It's not safe to push a chunk in quarantine on invalid free.
576 return false;
577 }
578 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state)do { __sanitizer::u64 v1 = (__sanitizer::u64)((CHUNK_ALLOCATED
)); __sanitizer::u64 v2 = (__sanitizer::u64)((old_chunk_state
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 578, "(" "(CHUNK_ALLOCATED)" ") " "==" " (" "(old_chunk_state)"
")", v1, v2); } while (false)
;
579 return true;
580 }
581
582 // Expects the chunk to already be marked as quarantined by using
583 // AtomicallySetQuarantineFlagIfAllocated.
584 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
585 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->chunk_state
)); __sanitizer::u64 v2 = (__sanitizer::u64)((CHUNK_QUARANTINE
)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 585, "(" "(m->chunk_state)" ") " "==" " (" "(CHUNK_QUARANTINE)"
")", v1, v2); } while (false)
;
586 CHECK_GE(m->alloc_tid, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->alloc_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 586, "(" "(m->alloc_tid)" ") " ">=" " (" "(0)" ")", v1
, v2); } while (false)
;
587 if (SANITIZER_WORDSIZE32 == 64) // On 32-bits this resides in user area.
588 CHECK_EQ(m->free_tid, kInvalidTid)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m->free_tid
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kInvalidTid)); if
(__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 588, "(" "(m->free_tid)" ") " "==" " (" "(kInvalidTid)" ")"
, v1, v2); } while (false)
;
589 AsanThread *t = GetCurrentThread();
590 m->free_tid = t ? t->tid() : 0;
591 m->free_context_id = StackDepotPut(*stack);
592
593 Flags &fl = *flags();
594 if (fl.max_free_fill_size > 0) {
595 // We have to skip the chunk header, it contains free_context_id.
596 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
597 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
598 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
599 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
600 REAL(memset)__interception::real_memset((void *)scribble_start, fl.free_fill_byte, size_to_fill);
601 }
602 }
603
604 // Poison the region.
605 PoisonShadow(m->Beg(),
606 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY(1ULL << kDefaultShadowScale)),
607 kAsanHeapFreeMagic);
608
609 AsanStats &thread_stats = GetCurrentThreadStats();
610 thread_stats.frees++;
611 thread_stats.freed += m->UsedSize();
612
613 // Push into quarantine.
614 if (t) {
615 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
616 AllocatorCache *ac = GetAllocatorCache(ms);
617 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
618 m->UsedSize());
619 } else {
620 SpinMutexLock l(&fallback_mutex);
621 AllocatorCache *ac = &fallback_allocator_cache;
622 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
623 m, m->UsedSize());
624 }
625 }
626
627 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
628 BufferedStackTrace *stack, AllocType alloc_type) {
629 uptr p = reinterpret_cast<uptr>(ptr);
630 if (p == 0) return;
631
632 uptr chunk_beg = p - kChunkHeaderSize;
633 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
634
635 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
636 // malloc. Don't report an invalid free in this case.
637 if (SANITIZER_WINDOWS0 &&
638 !get_allocator().PointerIsMine(ptr)) {
639 if (!IsSystemHeapAddress(p))
640 ReportFreeNotMalloced(p, stack);
641 return;
642 }
643
644 ASAN_FREE_HOOK(ptr)do { if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr
); RunFreeHooks(ptr); } while (false)
;
645
646 // Must mark the chunk as quarantined before any changes to its metadata.
647 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
648 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
649
650 if (m->alloc_type != alloc_type) {
651 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
652 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
653 (AllocType)alloc_type);
654 }
655 } else {
656 if (flags()->new_delete_type_mismatch &&
657 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
658 ((delete_size && delete_size != m->UsedSize()) ||
659 ComputeUserRequestedAlignmentLog(delete_alignment) !=
660 m->user_requested_alignment_log)) {
661 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
662 }
663 }
664
665 QuarantineChunk(m, ptr, stack);
666 }
667
668 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
669 CHECK(old_ptr && new_size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((old_ptr &&
new_size)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 669, "(" "(old_ptr && new_size)" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
670 uptr p = reinterpret_cast<uptr>(old_ptr);
671 uptr chunk_beg = p - kChunkHeaderSize;
672 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
673
674 AsanStats &thread_stats = GetCurrentThreadStats();
675 thread_stats.reallocs++;
676 thread_stats.realloced += new_size;
677
678 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
679 if (new_ptr) {
680 u8 chunk_state = m->chunk_state;
681 if (chunk_state != CHUNK_ALLOCATED)
682 ReportInvalidFree(old_ptr, chunk_state, stack);
683 CHECK_NE(REAL(memcpy), nullptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((__interception
::real_memcpy)); __sanitizer::u64 v2 = (__sanitizer::u64)((nullptr
)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 683, "(" "(__interception::real_memcpy)" ") " "!=" " (" "(nullptr)"
")", v1, v2); } while (false)
;
684 uptr memcpy_size = Min(new_size, m->UsedSize());
685 // If realloc() races with free(), we may start copying freed memory.
686 // However, we will report racy double-free later anyway.
687 REAL(memcpy)__interception::real_memcpy(new_ptr, old_ptr, memcpy_size);
688 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
689 }
690 return new_ptr;
691 }
692
693 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
694 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
695 if (AllocatorMayReturnNull())
696 return nullptr;
697 ReportCallocOverflow(nmemb, size, stack);
698 }
699 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
700 // If the memory comes from the secondary allocator no need to clear it
701 // as it comes directly from mmap.
702 if (ptr && allocator.FromPrimary(ptr))
703 REAL(memset)__interception::real_memset(ptr, 0, nmemb * size);
704 return ptr;
705 }
706
707 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
708 if (chunk_state == CHUNK_QUARANTINE)
709 ReportDoubleFree((uptr)ptr, stack);
710 else
711 ReportFreeNotMalloced((uptr)ptr, stack);
712 }
713
714 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
715 AllocatorCache *ac = GetAllocatorCache(ms);
716 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
717 allocator.SwallowCache(ac);
718 }
719
720 // -------------------------- Chunk lookup ----------------------
721
722 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
723 AsanChunk *GetAsanChunk(void *alloc_beg) {
724 if (!alloc_beg) return nullptr;
725 if (!allocator.FromPrimary(alloc_beg)) {
726 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
727 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
728 return m;
729 }
730 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
731 if (alloc_magic[0] == kAllocBegMagic)
732 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
733 // FIXME: This is either valid small chunk with tiny redzone or invalid
734 // chunk which is beeing allocated/deallocated. The latter case should
735 // return nullptr like secondary allocator does.
736 return reinterpret_cast<AsanChunk *>(alloc_beg);
737 }
738
739 AsanChunk *GetAsanChunkDebug(void *alloc_beg) {
740 if (!alloc_beg) return nullptr;
741 if (!allocator.FromPrimary(alloc_beg)) {
742 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
743 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
744 Printf("GetAsanChunkDebug1 alloc_beg %p meta %p m %p\n", alloc_beg, meta, m);
745 return m;
746 }
747 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
748 Printf(
749 "GetAsanChunkDebug2 alloc_beg %p alloc_magic %p alloc_magic[0] %p "
750 "alloc_magic[1] %p\n",
751 alloc_beg, alloc_magic, alloc_magic[0], alloc_magic[1]);
752 if (alloc_magic[0] == kAllocBegMagic)
753 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
754 return reinterpret_cast<AsanChunk *>(alloc_beg);
755 }
756
757
758 AsanChunk *GetAsanChunkByAddr(uptr p) {
759 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
760 return GetAsanChunk(alloc_beg);
761 }
762
763 // Allocator must be locked when this function is called.
764 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
765 void *alloc_beg =
766 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
767 return GetAsanChunk(alloc_beg);
768 }
769
770 AsanChunk *GetAsanChunkByAddrFastLockedDebug(uptr p) {
771 void *alloc_beg =
772 allocator.GetBlockBeginFastLockedDebug(reinterpret_cast<void *>(p));
773 Printf("GetAsanChunkByAddrFastLockedDebug p %p alloc_beg %p\n", p, alloc_beg);
774 return GetAsanChunkDebug(alloc_beg);
775 }
776
777 uptr AllocationSize(uptr p) {
778 AsanChunk *m = GetAsanChunkByAddr(p);
779 if (!m) return 0;
780 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
781 if (m->Beg() != p) return 0;
782 return m->UsedSize();
783 }
784
785 AsanChunkView FindHeapChunkByAddress(uptr addr) {
786 AsanChunk *m1 = GetAsanChunkByAddr(addr);
787 if (!m1) return AsanChunkView(m1);
788 sptr offset = 0;
789 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
790 // The address is in the chunk's left redzone, so maybe it is actually
791 // a right buffer overflow from the other chunk to the left.
792 // Search a bit to the left to see if there is another chunk.
793 AsanChunk *m2 = nullptr;
794 for (uptr l = 1; l < GetPageSizeCached(); l++) {
795 m2 = GetAsanChunkByAddr(addr - l);
796 if (m2 == m1) continue; // Still the same chunk.
797 break;
798 }
799 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
800 m1 = ChooseChunk(addr, m2, m1);
801 }
802 return AsanChunkView(m1);
803 }
804
805 void Purge(BufferedStackTrace *stack) {
806 AsanThread *t = GetCurrentThread();
807 if (t) {
808 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
809 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
810 QuarantineCallback(GetAllocatorCache(ms),
811 stack));
812 }
813 {
814 SpinMutexLock l(&fallback_mutex);
815 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
816 QuarantineCallback(&fallback_allocator_cache,
817 stack));
818 }
819
820 allocator.ForceReleaseToOS();
821 }
822
823 void PrintStats() {
824 allocator.PrintStats();
825 quarantine.PrintStats();
826 }
827
828 void ForceLock() {
829 allocator.ForceLock();
830 fallback_mutex.Lock();
831 }
832
833 void ForceUnlock() {
834 fallback_mutex.Unlock();
835 allocator.ForceUnlock();
836 }
837};
838
839static Allocator instance(LINKER_INITIALIZED);
840
841static AsanAllocator &get_allocator() {
842 return instance.allocator;
843}
844
845bool AsanChunkView::IsValid() const {
846 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
847}
848bool AsanChunkView::IsAllocated() const {
849 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
850}
851bool AsanChunkView::IsQuarantined() const {
852 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
853}
854uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
855uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
856uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
857u32 AsanChunkView::UserRequestedAlignment() const {
858 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
859}
860uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
861uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
862AllocType AsanChunkView::GetAllocType() const {
863 return (AllocType)chunk_->alloc_type;
864}
865
866static StackTrace GetStackTraceFromId(u32 id) {
867 CHECK(id)do { __sanitizer::u64 v1 = (__sanitizer::u64)((id)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 867, "(" "(id)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
868 StackTrace res = StackDepotGet(id);
869 CHECK(res.trace)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res.trace)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 869, "(" "(res.trace)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
870 return res;
871}
872
873u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
874u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
875
876StackTrace AsanChunkView::GetAllocStack() const {
877 return GetStackTraceFromId(GetAllocStackId());
878}
879
880StackTrace AsanChunkView::GetFreeStack() const {
881 return GetStackTraceFromId(GetFreeStackId());
882}
883
884void InitializeAllocator(const AllocatorOptions &options) {
885 instance.InitLinkerInitialized(options);
886}
887
888void ReInitializeAllocator(const AllocatorOptions &options) {
889 instance.ReInitialize(options);
890}
891
892void GetAllocatorOptions(AllocatorOptions *options) {
893 instance.GetOptions(options);
894}
895
896AsanChunkView FindHeapChunkByAddress(uptr addr) {
897 return instance.FindHeapChunkByAddress(addr);
898}
899AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
900 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
901}
902
903void AsanThreadLocalMallocStorage::CommitBack() {
904 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
905 instance.CommitBack(this, &stack);
906}
907
908void PrintInternalAllocatorStats() {
909 instance.PrintStats();
910}
911
912void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
913 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
914}
915
916void asan_delete(void *ptr, uptr size, uptr alignment,
917 BufferedStackTrace *stack, AllocType alloc_type) {
918 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
919}
920
921void *asan_malloc(uptr size, BufferedStackTrace *stack) {
922 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
923}
924
925void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
926 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
927}
928
929void *asan_reallocarray(void *p, uptr nmemb, uptr size,
930 BufferedStackTrace *stack) {
931 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
932 errno(*__errno_location()) = errno_ENOMEM12;
933 if (AllocatorMayReturnNull())
934 return nullptr;
935 ReportReallocArrayOverflow(nmemb, size, stack);
936 }
937 return asan_realloc(p, nmemb * size, stack);
938}
939
940void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
941 if (!p)
942 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
943 if (size == 0) {
944 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
945 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
946 return nullptr;
947 }
948 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
949 size = 1;
950 }
951 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
952}
953
954void *asan_valloc(uptr size, BufferedStackTrace *stack) {
955 return SetErrnoOnNull(
956 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
957}
958
959void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
960 uptr PageSize = GetPageSizeCached();
961 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
962 errno(*__errno_location()) = errno_ENOMEM12;
963 if (AllocatorMayReturnNull())
964 return nullptr;
965 ReportPvallocOverflow(size, stack);
966 }
967 // pvalloc(0) should allocate one page.
968 size = size ? RoundUpTo(size, PageSize) : PageSize;
969 return SetErrnoOnNull(
970 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
971}
972
973void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
974 AllocType alloc_type) {
975 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
976 errno(*__errno_location()) = errno_EINVAL22;
977 if (AllocatorMayReturnNull())
978 return nullptr;
979 ReportInvalidAllocationAlignment(alignment, stack);
980 }
981 return SetErrnoOnNull(
982 instance.Allocate(size, alignment, stack, alloc_type, true));
983}
984
985void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
986 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
987 errno(*__errno_location()) = errno_EINVAL22;
988 if (AllocatorMayReturnNull())
989 return nullptr;
990 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
991 }
992 return SetErrnoOnNull(
993 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
994}
995
996int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
997 BufferedStackTrace *stack) {
998 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
999 if (AllocatorMayReturnNull())
1000 return errno_EINVAL22;
1001 ReportInvalidPosixMemalignAlignment(alignment, stack);
1002 }
1003 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1004 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
1005 // OOM error is already taken care of by Allocate.
1006 return errno_ENOMEM12;
1007 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 1007, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
1008 *memptr = ptr;
1009 return 0;
1010}
1011
1012uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1013 if (!ptr) return 0;
1014 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1015 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1016 GET_STACK_TRACE_FATAL(pc, bp)BufferedStackTrace stack; stack.Unwind(pc, bp, nullptr, common_flags
()->fast_unwind_on_fatal)
;
1017 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1018 }
1019 return usable_size;
1020}
1021
1022uptr asan_mz_size(const void *ptr) {
1023 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1024}
1025
1026void asan_mz_force_lock() {
1027 instance.ForceLock();
1028}
1029
1030void asan_mz_force_unlock() {
1031 instance.ForceUnlock();
1032}
1033
1034void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
1035 instance.SetRssLimitExceeded(limit_exceeded);
1036}
1037
1038} // namespace __asan
1039
1040// --- Implementation of LSan-specific functions --- {{{1
1041namespace __lsan {
1042void LockAllocator() {
1043 __asan::get_allocator().ForceLock();
1044}
1045
1046void UnlockAllocator() {
1047 __asan::get_allocator().ForceUnlock();
1048}
1049
1050void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1051 *begin = (uptr)&__asan::get_allocator();
1052 *end = *begin + sizeof(__asan::get_allocator());
1053}
1054
1055uptr PointsIntoChunk(void* p) {
1056 uptr addr = reinterpret_cast<uptr>(p);
1057 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1058 if (!m) return 0;
1059 uptr chunk = m->Beg();
1060 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1061 return 0;
1062 if (m->AddrIsInside(addr, /*locked_version=*/true))
1063 return chunk;
1064 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1065 addr))
1066 return chunk;
1067 return 0;
1068}
1069
1070// Debug code. Delete once issue #1193 is chased down.
1071extern "C" SANITIZER_WEAK_ATTRIBUTE__attribute__((weak)) const char *__lsan_current_stage;
1072
1073void GetUserBeginDebug(uptr chunk) {
1074 Printf("GetUserBeginDebug1 chunk %p\n", chunk);
1075 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLockedDebug(chunk);
1076 Printf("GetUserBeginDebug2 m %p\n", m);
1077}
1078
1079uptr GetUserBegin(uptr chunk) {
1080 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1081 if (!m) {
1082 Printf(
1083 "ASAN is about to crash with a CHECK failure.\n"
1084 "The ASAN developers are trying to chase down this bug,\n"
1085 "so if you've encountered this bug please let us know.\n"
1086 "See also: https://github.com/google/sanitizers/issues/1193\n"
1087 "Internal ref b/149237057\n"
1088 "chunk: %p caller %p __lsan_current_stage %s\n",
1089 chunk, GET_CALLER_PC()(__sanitizer::uptr) __builtin_return_address(0), __lsan_current_stage);
1090 GetUserBeginDebug(chunk);
1091 }
1092 CHECK(m)do { __sanitizer::u64 v1 = (__sanitizer::u64)((m)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/asan_allocator.cpp"
, 1092, "(" "(m)" ") " "!=" " (" "0" ")", v1, v2); } while (false
)
;
1093 return m->Beg();
1094}
1095
1096LsanMetadata::LsanMetadata(uptr chunk) {
1097 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1098}
1099
1100bool LsanMetadata::allocated() const {
1101 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1102 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1103}
1104
1105ChunkTag LsanMetadata::tag() const {
1106 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1107 return static_cast<ChunkTag>(m->lsan_tag);
1108}
1109
1110void LsanMetadata::set_tag(ChunkTag value) {
1111 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1112 m->lsan_tag = value;
1113}
1114
1115uptr LsanMetadata::requested_size() const {
1116 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1117 return m->UsedSize(/*locked_version=*/true);
1118}
1119
1120u32 LsanMetadata::stack_trace_id() const {
1121 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1122 return m->alloc_context_id;
1123}
1124
1125void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1126 __asan::get_allocator().ForEachChunk(callback, arg);
1127}
1128
1129IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1130 uptr addr = reinterpret_cast<uptr>(p);
1131 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1132 if (!m) return kIgnoreObjectInvalid;
1133 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1134 if (m->lsan_tag == kIgnored)
1135 return kIgnoreObjectAlreadyIgnored;
1136 m->lsan_tag = __lsan::kIgnored;
1137 return kIgnoreObjectSuccess;
1138 } else {
1139 return kIgnoreObjectInvalid;
1140 }
1141}
1142} // namespace __lsan
1143
1144// ---------------------- Interface ---------------- {{{1
1145using namespace __asan;
1146
1147// ASan allocator doesn't reserve extra bytes, so normally we would
1148// just return "size". We don't want to expose our redzone sizes, etc here.
1149uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1150 return size;
1151}
1152
1153int __sanitizer_get_ownership(const void *p) {
1154 uptr ptr = reinterpret_cast<uptr>(p);
1155 return instance.AllocationSize(ptr) > 0;
1156}
1157
1158uptr __sanitizer_get_allocated_size(const void *p) {
1159 if (!p) return 0;
1160 uptr ptr = reinterpret_cast<uptr>(p);
1161 uptr allocated_size = instance.AllocationSize(ptr);
1162 // Die if p is not malloced or if it is already freed.
1163 if (allocated_size == 0) {
1164 GET_STACK_TRACE_FATAL_HEREBufferedStackTrace stack; if (kStackTraceMax <= 2) { stack
.size = kStackTraceMax; if (kStackTraceMax > 0) { stack.top_frame_bp
= (__sanitizer::uptr) __builtin_frame_address(0); stack.trace_buffer
[0] = StackTrace::GetCurrentPc(); if (kStackTraceMax > 1) stack
.trace_buffer[1] = (__sanitizer::uptr) __builtin_return_address
(0); } } else { stack.Unwind(StackTrace::GetCurrentPc(), (__sanitizer
::uptr) __builtin_frame_address(0), nullptr, common_flags()->
fast_unwind_on_fatal, kStackTraceMax); }
;
1165 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1166 }
1167 return allocated_size;
1168}
1169
1170void __sanitizer_purge_allocator() {
1171 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1172 instance.Purge(&stack);
1173}
1174
1175int __asan_update_allocation_context(void* addr) {
1176 GET_STACK_TRACE_MALLOCBufferedStackTrace stack; if (GetMallocContextSize() <= 2)
{ stack.size = GetMallocContextSize(); if (GetMallocContextSize
() > 0) { stack.top_frame_bp = (__sanitizer::uptr) __builtin_frame_address
(0); stack.trace_buffer[0] = StackTrace::GetCurrentPc(); if (
GetMallocContextSize() > 1) stack.trace_buffer[1] = (__sanitizer
::uptr) __builtin_return_address(0); } } else { stack.Unwind(
StackTrace::GetCurrentPc(), (__sanitizer::uptr) __builtin_frame_address
(0), nullptr, common_flags()->fast_unwind_on_malloc, GetMallocContextSize
()); }
;
1177 return instance.UpdateAllocationStack((uptr)addr, &stack);
1178}
1179
1180#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
1181// Provide default (no-op) implementation of malloc hooks.
1182SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
1183 void *ptr, uptr size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *ptr, uptr size)
{
1184 (void)ptr;
1185 (void)size;
1186}
1187
1188SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *ptr)
{
1189 (void)ptr;
1190}
1191#endif

/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_atomic_clang_x86.h

1//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10// Not intended for direct inclusion. Include sanitizer_atomic.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ATOMIC_CLANG_X86_H
15#define SANITIZER_ATOMIC_CLANG_X86_H
16
17namespace __sanitizer {
18
19INLINEinline void proc_yield(int cnt) {
20 __asm__ __volatile__("" ::: "memory");
21 for (int i = 0; i < cnt; i++)
22 __asm__ __volatile__("pause");
23 __asm__ __volatile__("" ::: "memory");
24}
25
26template<typename T>
27INLINEinline typename T::Type atomic_load(
28 const volatile T *a, memory_order mo) {
29 DCHECK(mo & (memory_order_relaxed | memory_order_consume
30 | memory_order_acquire | memory_order_seq_cst));
31 DCHECK(!((uptr)a % sizeof(*a)));
32 typename T::Type v;
33
34 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
35 // Assume that aligned loads are atomic.
36 if (mo
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
4.1
'mo' is equal to memory_order_relaxed
== memory_order_relaxed) {
5
Taking true branch
37 v = a->val_dont_use;
38 } else if (mo == memory_order_consume) {
39 // Assume that processor respects data dependencies
40 // (and that compiler won't break them).
41 __asm__ __volatile__("" ::: "memory");
42 v = a->val_dont_use;
43 __asm__ __volatile__("" ::: "memory");
44 } else if (mo == memory_order_acquire) {
45 __asm__ __volatile__("" ::: "memory");
46 v = a->val_dont_use;
47 // On x86 loads are implicitly acquire.
48 __asm__ __volatile__("" ::: "memory");
49 } else { // seq_cst
50 // On x86 plain MOV is enough for seq_cst store.
51 __asm__ __volatile__("" ::: "memory");
52 v = a->val_dont_use;
53 __asm__ __volatile__("" ::: "memory");
54 }
55 } else {
56 // 64-bit load on 32-bit platform.
57 __asm__ __volatile__(
58 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
59 "movq %%mm0, %0;" // (ptr could be read-only)
60 "emms;" // Empty mmx state/Reset FP regs
61 : "=m" (v)
62 : "m" (a->val_dont_use)
63 : // mark the mmx registers as clobbered
64#ifdef __MMX__
65 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
66#endif // #ifdef __MMX__
67 "memory");
68 }
69 return v;
6
Returning value (loaded from 'v'), which participates in a condition later
70}
71
72template<typename T>
73INLINEinline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
74 DCHECK(mo & (memory_order_relaxed | memory_order_release
75 | memory_order_seq_cst));
76 DCHECK(!((uptr)a % sizeof(*a)));
77
78 if (sizeof(*a) < 8 || sizeof(void*) == 8) {
79 // Assume that aligned loads are atomic.
80 if (mo == memory_order_relaxed) {
81 a->val_dont_use = v;
82 } else if (mo == memory_order_release) {
83 // On x86 stores are implicitly release.
84 __asm__ __volatile__("" ::: "memory");
85 a->val_dont_use = v;
86 __asm__ __volatile__("" ::: "memory");
87 } else { // seq_cst
88 // On x86 stores are implicitly release.
89 __asm__ __volatile__("" ::: "memory");
90 a->val_dont_use = v;
91 __sync_synchronize();
92 }
93 } else {
94 // 64-bit store on 32-bit platform.
95 __asm__ __volatile__(
96 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
97 "movq %%mm0, %0;"
98 "emms;" // Empty mmx state/Reset FP regs
99 : "=m" (a->val_dont_use)
100 : "m" (v)
101 : // mark the mmx registers as clobbered
102#ifdef __MMX__
103 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
104#endif // #ifdef __MMX__
105 "memory");
106 if (mo == memory_order_seq_cst)
107 __sync_synchronize();
108 }
109}
110
111} // namespace __sanitizer
112
113#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
24class CombinedAllocator {
25 public:
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
32 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
33 stats_.InitLinkerInitialized();
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.InitLinkerInitialized();
36 }
37
38 void Init(s32 release_to_os_interval_ms) {
39 stats_.Init();
40 primary_.Init(release_to_os_interval_ms);
41 secondary_.Init();
42 }
43
44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
32
Assuming 'size' is not equal to 0
33
Taking false branch
47 size = 1;
48 if (size + alignment < size) {
34
Assuming the condition is false
35
Taking false branch
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
59 if (alignment
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
35.1
'alignment' is <= 8
> 8)
36
Taking false branch
60 size = RoundUpTo(size, alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 void *res;
68 if (primary_.CanAllocate(size, alignment))
37
Calling 'SizeClassAllocator32::CanAllocate'
40
Returning from 'SizeClassAllocator32::CanAllocate'
41
Taking false branch
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
71 res = secondary_.Allocate(&stats_, original_size, alignment);
42
Value assigned to 'res'
72 if (alignment
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
42.1
'alignment' is <= 8
> 8)
43
Taking false branch
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 73, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
74 return res;
44
Returning pointer (loaded from 'res')
75 }
76
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
79 }
80
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 }
84
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_combined.h"
, 105, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(new_size, old_size);
108 void *new_p = Allocate(cache, new_size, alignment);
109 if (new_p)
110 internal_memcpy(new_p, p, memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(void *p) {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(void *p) {
122 return primary_.PointerIsMine(p);
123 }
124
125 void *GetMetaData(const void *p) {
126 if (primary_.PointerIsMine(p))
127 return primary_.GetMetaData(p);
128 return secondary_.GetMetaData(p);
129 }
130
131 void *GetBlockBegin(const void *p) {
132 if (primary_.PointerIsMine(p))
133 return primary_.GetBlockBegin(p);
134 return secondary_.GetBlockBegin(p);
135 }
136
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p) {
140 if (primary_.PointerIsMine(p))
141 return primary_.GetBlockBegin(p);
142 return secondary_.GetBlockBeginFastLocked(p);
143 }
144
145 void *GetBlockBeginFastLockedDebug(void *p) {
146 if (primary_.PointerIsMine(p))
147 return primary_.GetBlockBeginDebug(p);
148 return secondary_.GetBlockBeginFastLocked(p);
149 }
150
151
152 uptr GetActuallyAllocatedSize(void *p) {
153 if (primary_.PointerIsMine(p))
154 return primary_.GetActuallyAllocatedSize(p);
155 return secondary_.GetActuallyAllocatedSize(p);
156 }
157
158 uptr TotalMemoryUsed() {
159 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
160 }
161
162 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
163
164 void InitCache(AllocatorCache *cache) {
165 cache->Init(&stats_);
166 }
167
168 void DestroyCache(AllocatorCache *cache) {
169 cache->Destroy(&primary_, &stats_);
170 }
171
172 void SwallowCache(AllocatorCache *cache) {
173 cache->Drain(&primary_);
174 }
175
176 void GetStats(AllocatorStatCounters s) const {
177 stats_.Get(s);
178 }
179
180 void PrintStats() {
181 primary_.PrintStats();
182 secondary_.PrintStats();
183 }
184
185 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
186 // introspection API.
187 void ForceLock() {
188 primary_.ForceLock();
189 secondary_.ForceLock();
190 }
191
192 void ForceUnlock() {
193 secondary_.ForceUnlock();
194 primary_.ForceUnlock();
195 }
196
197 // Iterate over all existing chunks.
198 // The allocator must be locked when calling this function.
199 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
200 primary_.ForEachChunk(callback, arg);
201 secondary_.ForEachChunk(callback, arg);
202 }
203
204 private:
205 PrimaryAllocator primary_;
206 SecondaryAllocator secondary_;
207 AllocatorGlobalStats stats_;
208};

/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h

1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17
18// SizeClassAllocator32 -- allocator for 32-bit address space.
19// This allocator can theoretically be used on 64-bit arch, but there it is less
20// efficient than SizeClassAllocator64.
21//
22// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23// be returned by MmapOrDie().
24//
25// Region:
26// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27// kRegionSize).
28// Since the regions are aligned by kRegionSize, there are exactly
29// kNumPossibleRegions possible regions in the address space and so we keep
30// a ByteMap possible_regions to store the size classes of each Region.
31// 0 size class means the region is not used by the allocator.
32//
33// One Region is used to allocate chunks of a single size class.
34// A Region looks like this:
35// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36//
37// In order to avoid false sharing the objects of this class should be
38// chache-line aligned.
39
40struct SizeClassAllocator32FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 kUseSeparateSizeClassForBatch = 2,
44 };
45};
46
47template <class Params>
48class SizeClassAllocator32 {
49 private:
50 static const u64 kTwoLevelByteMapSize1 =
51 (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
53
54 public:
55 using AddressSpaceView = typename Params::AddressSpaceView;
56 static const uptr kSpaceBeg = Params::kSpaceBeg;
57 static const u64 kSpaceSize = Params::kSpaceSize;
58 static const uptr kMetadataSize = Params::kMetadataSize;
59 typedef typename Params::SizeClassMap SizeClassMap;
60 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
61 typedef typename Params::MapUnmapCallback MapUnmapCallback;
62 using ByteMap = typename conditional<
63 (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
64 FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
65 AddressSpaceView>,
66 TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
67
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0,
"")
69 (kSpaceSize & (kSpaceSize - 1)) == 0)static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0,
"")
;
70
71 static const bool kRandomShuffleChunks = Params::kFlags &
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
73 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
75
76 struct TransferBatch {
77 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
78 void SetFromArray(void *batch[], uptr count) {
79 DCHECK_LE(count, kMaxNumCached);
80 count_ = count;
81 for (uptr i = 0; i < count; i++)
82 batch_[i] = batch[i];
83 }
84 uptr Count() const { return count_; }
85 void Clear() { count_ = 0; }
86 void Add(void *ptr) {
87 batch_[count_++] = ptr;
88 DCHECK_LE(count_, kMaxNumCached);
89 }
90 void CopyToArray(void *to_batch[]) const {
91 for (uptr i = 0, n = Count(); i < n; i++)
92 to_batch[i] = batch_[i];
93 }
94
95 // How much memory do we need for a batch containing n elements.
96 static uptr AllocationSizeRequiredForNElements(uptr n) {
97 return sizeof(uptr) * 2 + sizeof(void *) * n;
98 }
99 static uptr MaxCached(uptr size) {
100 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
101 }
102
103 TransferBatch *next;
104
105 private:
106 uptr count_;
107 void *batch_[kMaxNumCached];
108 };
109
110 static const uptr kBatchSize = sizeof(TransferBatch);
111 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)static_assert((kBatchSize & (kBatchSize - 1)) == 0, "");
112 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))static_assert(kBatchSize == SizeClassMap::kMaxNumCachedHint *
sizeof(uptr), "")
;
113
114 static uptr ClassIdToSize(uptr class_id) {
115 return (class_id == SizeClassMap::kBatchClassID) ?
116 kBatchSize : SizeClassMap::Size(class_id);
117 }
118
119 typedef SizeClassAllocator32<Params> ThisT;
120 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
121
122 void Init(s32 release_to_os_interval_ms) {
123 possible_regions.Init();
124 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
125 }
126
127 s32 ReleaseToOSIntervalMs() const {
128 return kReleaseToOSIntervalNever;
129 }
130
131 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
132 // This is empty here. Currently only implemented in 64-bit allocator.
133 }
134
135 void ForceReleaseToOS() {
136 // Currently implemented in 64-bit allocator only.
137 }
138
139 void *MapWithCallback(uptr size) {
140 void *res = MmapOrDie(size, PrimaryAllocatorName);
141 MapUnmapCallback().OnMap((uptr)res, size);
142 return res;
143 }
144
145 void UnmapWithCallback(uptr beg, uptr size) {
146 MapUnmapCallback().OnUnmap(beg, size);
147 UnmapOrDie(reinterpret_cast<void *>(beg), size);
148 }
149
150 static bool CanAllocate(uptr size, uptr alignment) {
151 return size <= SizeClassMap::kMaxSize &&
38
Assuming 'size' is > 'kMaxSize'
39
Returning zero, which participates in a condition later
152 alignment <= SizeClassMap::kMaxSize;
153 }
154
155 void *GetMetaData(const void *p) {
156 CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize)
); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 156, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2);
} while (false)
;
157 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 157, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
158 uptr mem = reinterpret_cast<uptr>(p);
159 uptr beg = ComputeRegionBeg(mem);
160 uptr size = ClassIdToSize(GetSizeClass(p));
161 u32 offset = mem - beg;
162 uptr n = offset / (u32)size; // 32-bit division
163 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
164 return reinterpret_cast<void*>(meta);
165 }
166
167 NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
168 uptr class_id) {
169 DCHECK_LT(class_id, kNumClasses);
170 SizeClassInfo *sci = GetSizeClassInfo(class_id);
171 SpinMutexLock l(&sci->mutex);
172 if (sci->free_list.empty()) {
173 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id)
), 0)
)
174 return nullptr;
175 DCHECK(!sci->free_list.empty());
176 }
177 TransferBatch *b = sci->free_list.front();
178 sci->free_list.pop_front();
179 return b;
180 }
181
182 NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id,
183 TransferBatch *b) {
184 DCHECK_LT(class_id, kNumClasses);
185 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 185, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
186 SizeClassInfo *sci = GetSizeClassInfo(class_id);
187 SpinMutexLock l(&sci->mutex);
188 sci->free_list.push_front(b);
189 }
190
191 bool PointerIsMine(const void *p) {
192 uptr mem = reinterpret_cast<uptr>(p);
193 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
194 mem &= (kSpaceSize - 1);
195 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
196 return false;
197 return GetSizeClass(p) != 0;
198 }
199
200 uptr GetSizeClass(const void *p) {
201 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
202 }
203
204 void *GetBlockBegin(const void *p) {
205 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 205, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
206 uptr mem = reinterpret_cast<uptr>(p);
207 uptr beg = ComputeRegionBeg(mem);
208 uptr size = ClassIdToSize(GetSizeClass(p));
209 u32 offset = mem - beg;
210 u32 n = offset / (u32)size; // 32-bit division
211 uptr res = beg + (n * (u32)size);
212 return reinterpret_cast<void*>(res);
213 }
214 void *GetBlockBeginDebug(const void *p) { return GetBlockBegin(p); }
215
216 uptr GetActuallyAllocatedSize(void *p) {
217 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 217, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
218 return ClassIdToSize(GetSizeClass(p));
219 }
220
221 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
222
223 uptr TotalMemoryUsed() {
224 // No need to lock here.
225 uptr res = 0;
226 for (uptr i = 0; i < kNumPossibleRegions; i++)
227 if (possible_regions[i])
228 res += kRegionSize;
229 return res;
230 }
231
232 void TestOnlyUnmap() {
233 for (uptr i = 0; i < kNumPossibleRegions; i++)
234 if (possible_regions[i])
235 UnmapWithCallback((i * kRegionSize), kRegionSize);
236 }
237
238 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
239 // introspection API.
240 void ForceLock() {
241 for (uptr i = 0; i < kNumClasses; i++) {
242 GetSizeClassInfo(i)->mutex.Lock();
243 }
244 }
245
246 void ForceUnlock() {
247 for (int i = kNumClasses - 1; i >= 0; i--) {
248 GetSizeClassInfo(i)->mutex.Unlock();
249 }
250 }
251
252 // Iterate over all existing chunks.
253 // The allocator must be locked when calling this function.
254 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
255 for (uptr region = 0; region < kNumPossibleRegions; region++)
256 if (possible_regions[region]) {
257 uptr chunk_size = ClassIdToSize(possible_regions[region]);
258 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
259 uptr region_beg = region * kRegionSize;
260 for (uptr chunk = region_beg;
261 chunk < region_beg + max_chunks_in_region * chunk_size;
262 chunk += chunk_size) {
263 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
264 callback(chunk, arg);
265 }
266 }
267 }
268
269 void PrintStats() {}
270
271 static uptr AdditionalSize() { return 0; }
272
273 typedef SizeClassMap SizeClassMapT;
274 static const uptr kNumClasses = SizeClassMap::kNumClasses;
275
276 private:
277 static const uptr kRegionSize = 1 << kRegionSizeLog;
278 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
279
280 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo {
281 StaticSpinMutex mutex;
282 IntrusiveList<TransferBatch> free_list;
283 u32 rand_state;
284 };
285 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)static_assert(sizeof(SizeClassInfo) % kCacheLineSize == 0, ""
)
;
286
287 uptr ComputeRegionId(uptr mem) const {
288 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
289 mem &= (kSpaceSize - 1);
290 const uptr res = mem >> kRegionSizeLog;
291 CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 291, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")"
, v1, v2); } while (false)
;
292 return res;
293 }
294
295 uptr ComputeRegionBeg(uptr mem) {
296 return mem & ~(kRegionSize - 1);
297 }
298
299 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
300 DCHECK_LT(class_id, kNumClasses);
301 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
302 kRegionSize, kRegionSize, PrimaryAllocatorName));
303 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
304 return 0;
305 MapUnmapCallback().OnMap(res, kRegionSize);
306 stat->Add(AllocatorStatMapped, kRegionSize);
307 CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 307, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
308 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
309 return res;
310 }
311
312 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
313 DCHECK_LT(class_id, kNumClasses);
314 return &size_class_info_array[class_id];
315 }
316
317 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
318 TransferBatch **current_batch, uptr max_count,
319 uptr *pointers_array, uptr count) {
320 // If using a separate class for batches, we do not need to shuffle it.
321 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
322 class_id != SizeClassMap::kBatchClassID))
323 RandomShuffle(pointers_array, count, &sci->rand_state);
324 TransferBatch *b = *current_batch;
325 for (uptr i = 0; i < count; i++) {
326 if (!b) {
327 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
328 if (UNLIKELY(!b)__builtin_expect(!!(!b), 0))
329 return false;
330 b->Clear();
331 }
332 b->Add((void*)pointers_array[i]);
333 if (b->Count() == max_count) {
334 sci->free_list.push_back(b);
335 b = nullptr;
336 }
337 }
338 *current_batch = b;
339 return true;
340 }
341
342 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
343 SizeClassInfo *sci, uptr class_id) {
344 const uptr region = AllocateRegion(stat, class_id);
345 if (UNLIKELY(!region)__builtin_expect(!!(!region), 0))
346 return false;
347 if (kRandomShuffleChunks)
348 if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0))
349 // The random state is initialized from ASLR (PIE) and time.
350 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
351 const uptr size = ClassIdToSize(class_id);
352 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
353 const uptr max_count = TransferBatch::MaxCached(size);
354 DCHECK_GT(max_count, 0);
355 TransferBatch *b = nullptr;
356 constexpr uptr kShuffleArraySize = 48;
357 uptr shuffle_array[kShuffleArraySize];
358 uptr count = 0;
359 for (uptr i = region; i < region + n_chunks * size; i += size) {
360 shuffle_array[count++] = i;
361 if (count == kShuffleArraySize) {
362 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
363 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
364 return false;
365 count = 0;
366 }
367 }
368 if (count) {
369 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
370 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
371 return false;
372 }
373 if (b) {
374 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200904100631+97866b8de81/compiler-rt/lib/asan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 374, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
375 sci->free_list.push_back(b);
376 }
377 return true;
378 }
379
380 ByteMap possible_regions;
381 SizeClassInfo size_class_info_array[kNumClasses];
382};