Bug Summary

File:projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h
Warning:line 195, column 20
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name hwasan_allocator.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -ffreestanding -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D HWASAN_WITH_INTERCEPTORS=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/hwasan -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/hwasan -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc

1//===-- hwasan_allocator.cc ------------------------- ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of HWAddressSanitizer.
11//
12// HWAddressSanitizer allocator.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_stackdepot.h"
18#include "hwasan.h"
19#include "hwasan_allocator.h"
20#include "hwasan_mapping.h"
21#include "hwasan_thread.h"
22#include "hwasan_report.h"
23
24namespace __hwasan {
25
26static Allocator allocator;
27static AllocatorCache fallback_allocator_cache;
28static SpinMutex fallback_mutex;
29static atomic_uint8_t hwasan_allocator_tagging_enabled;
30
31static const tag_t kFallbackAllocTag = 0xBB;
32static const tag_t kFallbackFreeTag = 0xBC;
33
34bool HwasanChunkView::IsAllocated() const {
35 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
36}
37
38uptr HwasanChunkView::Beg() const {
39 return block_;
40}
41uptr HwasanChunkView::End() const {
42 return Beg() + UsedSize();
43}
44uptr HwasanChunkView::UsedSize() const {
45 return metadata_->requested_size;
46}
47u32 HwasanChunkView::GetAllocStackId() const {
48 return metadata_->alloc_context_id;
49}
50
51uptr HwasanChunkView::ActualSize() const {
52 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
53}
54
55bool HwasanChunkView::FromSmallHeap() const {
56 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
57}
58
59void GetAllocatorStats(AllocatorStatCounters s) {
60 allocator.GetStats(s);
61}
62
63void HwasanAllocatorInit() {
64 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
65 !flags()->disable_allocator_tagging);
66 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
67 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
68}
69
70void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
71 allocator.SwallowCache(cache);
72}
73
74static uptr TaggedSize(uptr size) {
75 if (!size) size = 1;
76 uptr new_size = RoundUpTo(size, kShadowAlignment);
77 CHECK_GE(new_size, size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((size)); if (__builtin_expect(!
!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 77, "(" "(new_size)" ") " ">=" " (" "(size)" ")", v1, v2
); } while (false)
;
78 return new_size;
79}
80
81static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
82 bool zeroise) {
83 if (orig_size > kMaxAllowedMallocSize) {
84 if (AllocatorMayReturnNull()) {
85 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
86 orig_size);
87 return nullptr;
88 }
89 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
90 }
91
92 alignment = Max(alignment, kShadowAlignment);
93 uptr size = TaggedSize(orig_size);
94 Thread *t = GetCurrentThread();
95 void *allocated;
96 if (t) {
97 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
98 } else {
99 SpinMutexLock l(&fallback_mutex);
100 AllocatorCache *cache = &fallback_allocator_cache;
101 allocated = allocator.Allocate(cache, size, alignment);
102 }
103 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
104 SetAllocatorOutOfMemory();
105 if (AllocatorMayReturnNull())
106 return nullptr;
107 ReportOutOfMemory(size, stack);
108 }
109 Metadata *meta =
110 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
111 meta->requested_size = static_cast<u32>(orig_size);
112 meta->alloc_context_id = StackDepotPut(*stack);
113 if (zeroise) {
114 internal_memset(allocated, 0, size);
115 } else if (flags()->max_malloc_fill_size > 0) {
116 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
117 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
118 }
119
120 void *user_ptr = allocated;
121 if (flags()->tag_in_malloc &&
122 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
123 user_ptr = (void *)TagMemoryAligned(
124 (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
125
126 HWASAN_MALLOC_HOOK(user_ptr, size)do { if (&__sanitizer_malloc_hook) { __sanitizer_malloc_hook
(user_ptr, size); } RunMallocHooks(user_ptr, size); } while (
false)
;
127 return user_ptr;
128}
129
130static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
131 CHECK(tagged_ptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tagged_ptr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 131, "(" "(tagged_ptr)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
132 tag_t ptr_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
133 tag_t mem_tag = *reinterpret_cast<tag_t *>(
134 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
135 return ptr_tag == mem_tag;
136}
137
138void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
139 CHECK(tagged_ptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tagged_ptr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 139, "(" "(tagged_ptr)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
140 HWASAN_FREE_HOOK(tagged_ptr)do { if (&__sanitizer_free_hook) { __sanitizer_free_hook(
tagged_ptr); } RunFreeHooks(tagged_ptr); } while (false)
;
141
142 if (!PointerAndMemoryTagsMatch(tagged_ptr))
143 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
144
145 void *untagged_ptr = UntagPtr(tagged_ptr);
146 Metadata *meta =
147 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr));
148 uptr orig_size = meta->requested_size;
149 u32 free_context_id = StackDepotPut(*stack);
150 u32 alloc_context_id = meta->alloc_context_id;
151 meta->requested_size = 0;
152 meta->alloc_context_id = 0;
153 // This memory will not be reused by anyone else, so we are free to keep it
154 // poisoned.
155 Thread *t = GetCurrentThread();
156 if (flags()->max_free_fill_size > 0) {
157 uptr fill_size = Min(orig_size, (uptr)flags()->max_free_fill_size);
158 internal_memset(untagged_ptr, flags()->free_fill_byte, fill_size);
159 }
160 if (flags()->tag_in_free &&
161 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
162 TagMemoryAligned((uptr)untagged_ptr, TaggedSize(orig_size),
163 t ? t->GenerateRandomTag() : kFallbackFreeTag);
164 if (t) {
165 allocator.Deallocate(t->allocator_cache(), untagged_ptr);
166 if (auto *ha = t->heap_allocations())
167 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
168 free_context_id, static_cast<u32>(orig_size)});
169 } else {
170 SpinMutexLock l(&fallback_mutex);
171 AllocatorCache *cache = &fallback_allocator_cache;
172 allocator.Deallocate(cache, untagged_ptr);
173 }
174}
175
176void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, uptr new_size,
177 uptr alignment) {
178 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
179 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
180
181 void *tagged_ptr_new =
182 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
183 if (tagged_ptr_old && tagged_ptr_new) {
184 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
185 Metadata *meta =
186 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
187 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
188 Min(new_size, static_cast<uptr>(meta->requested_size)));
189 HwasanDeallocate(stack, tagged_ptr_old);
190 }
191 return tagged_ptr_new;
192}
193
194void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
195 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
196 if (AllocatorMayReturnNull())
197 return nullptr;
198 ReportCallocOverflow(nmemb, size, stack);
199 }
200 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
201}
202
203HwasanChunkView FindHeapChunkByAddress(uptr address) {
204 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
1
Calling 'CombinedAllocator::GetBlockBegin'
205 if (!block)
206 return HwasanChunkView();
207 Metadata *metadata =
208 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
209 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
210}
211
212static uptr AllocationSize(const void *tagged_ptr) {
213 const void *untagged_ptr = UntagPtr(tagged_ptr);
214 if (!untagged_ptr) return 0;
215 const void *beg = allocator.GetBlockBegin(untagged_ptr);
216 if (beg != untagged_ptr) return 0;
217 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
218 return b->requested_size;
219}
220
221void *hwasan_malloc(uptr size, StackTrace *stack) {
222 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
223}
224
225void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
226 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
227}
228
229void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
230 if (!ptr)
231 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
232 if (size == 0) {
233 HwasanDeallocate(stack, ptr);
234 return nullptr;
235 }
236 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
237}
238
239void *hwasan_valloc(uptr size, StackTrace *stack) {
240 return SetErrnoOnNull(
241 HwasanAllocate(stack, size, GetPageSizeCached(), false));
242}
243
244void *hwasan_pvalloc(uptr size, StackTrace *stack) {
245 uptr PageSize = GetPageSizeCached();
246 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
247 errno(*__errno_location()) = errno_ENOMEM12;
248 if (AllocatorMayReturnNull())
249 return nullptr;
250 ReportPvallocOverflow(size, stack);
251 }
252 // pvalloc(0) should allocate one page.
253 size = size ? RoundUpTo(size, PageSize) : PageSize;
254 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
255}
256
257void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
258 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
259 errno(*__errno_location()) = errno_EINVAL22;
260 if (AllocatorMayReturnNull())
261 return nullptr;
262 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
263 }
264 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
265}
266
267void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
268 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
269 errno(*__errno_location()) = errno_EINVAL22;
270 if (AllocatorMayReturnNull())
271 return nullptr;
272 ReportInvalidAllocationAlignment(alignment, stack);
273 }
274 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
275}
276
277int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
278 StackTrace *stack) {
279 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
280 if (AllocatorMayReturnNull())
281 return errno_EINVAL22;
282 ReportInvalidPosixMemalignAlignment(alignment, stack);
283 }
284 void *ptr = HwasanAllocate(stack, size, alignment, false);
285 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
286 // OOM error is already taken care of by HwasanAllocate.
287 return errno_ENOMEM12;
288 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 288, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
289 *memptr = ptr;
290 return 0;
291}
292
293} // namespace __hwasan
294
295using namespace __hwasan;
296
297void __hwasan_enable_allocator_tagging() {
298 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
299}
300
301void __hwasan_disable_allocator_tagging() {
302 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
303}
304
305uptr __sanitizer_get_current_allocated_bytes() {
306 uptr stats[AllocatorStatCount];
307 allocator.GetStats(stats);
308 return stats[AllocatorStatAllocated];
309}
310
311uptr __sanitizer_get_heap_size() {
312 uptr stats[AllocatorStatCount];
313 allocator.GetStats(stats);
314 return stats[AllocatorStatMapped];
315}
316
317uptr __sanitizer_get_free_bytes() { return 1; }
318
319uptr __sanitizer_get_unmapped_bytes() { return 1; }
320
321uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
322
323int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
324
325uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// This class implements a complete memory allocator by using two
18// internal allocators:
19// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20// When allocating 2^x bytes it should return 2^x aligned chunk.
21// PrimaryAllocator is used via a local AllocatorCache.
22// SecondaryAllocator can allocate anything, but is not efficient.
23template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator> // NOLINT
25class CombinedAllocator {
26 public:
27 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
28 primary_.Init(release_to_os_interval_ms);
29 secondary_.InitLinkerInitialized();
30 stats_.InitLinkerInitialized();
31 }
32
33 void Init(s32 release_to_os_interval_ms) {
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.Init();
36 stats_.Init();
37 }
38
39 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
40 // Returning 0 on malloc(0) may break a lot of code.
41 if (size == 0)
42 size = 1;
43 if (size + alignment < size) {
44 Report("WARNING: %s: CombinedAllocator allocation overflow: "
45 "0x%zx bytes with 0x%zx alignment requested\n",
46 SanitizerToolName, size, alignment);
47 return nullptr;
48 }
49 uptr original_size = size;
50 // If alignment requirements are to be fulfilled by the frontend allocator
51 // rather than by the primary or secondary, passing an alignment lower than
52 // or equal to 8 will prevent any further rounding up, as well as the later
53 // alignment check.
54 if (alignment > 8)
55 size = RoundUpTo(size, alignment);
56 // The primary allocator should return a 2^x aligned allocation when
57 // requested 2^x bytes, hence using the rounded up 'size' when being
58 // serviced by the primary (this is no longer true when the primary is
59 // using a non-fixed base address). The secondary takes care of the
60 // alignment without such requirement, and allocating 'size' would use
61 // extraneous memory, so we employ 'original_size'.
62 void *res;
63 if (primary_.CanAllocate(size, alignment))
64 res = cache->Allocate(&primary_, primary_.ClassID(size));
65 else
66 res = secondary_.Allocate(&stats_, original_size, alignment);
67 if (alignment > 8)
68 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h"
, 68, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
69 return res;
70 }
71
72 s32 ReleaseToOSIntervalMs() const {
73 return primary_.ReleaseToOSIntervalMs();
74 }
75
76 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
77 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
78 }
79
80 void ForceReleaseToOS() {
81 primary_.ForceReleaseToOS();
82 }
83
84 void Deallocate(AllocatorCache *cache, void *p) {
85 if (!p) return;
86 if (primary_.PointerIsMine(p))
87 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
88 else
89 secondary_.Deallocate(&stats_, p);
90 }
91
92 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
93 uptr alignment) {
94 if (!p)
95 return Allocate(cache, new_size, alignment);
96 if (!new_size) {
97 Deallocate(cache, p);
98 return nullptr;
99 }
100 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h"
, 100, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
101 uptr old_size = GetActuallyAllocatedSize(p);
102 uptr memcpy_size = Min(new_size, old_size);
103 void *new_p = Allocate(cache, new_size, alignment);
104 if (new_p)
105 internal_memcpy(new_p, p, memcpy_size);
106 Deallocate(cache, p);
107 return new_p;
108 }
109
110 bool PointerIsMine(void *p) {
111 if (primary_.PointerIsMine(p))
112 return true;
113 return secondary_.PointerIsMine(p);
114 }
115
116 bool FromPrimary(void *p) {
117 return primary_.PointerIsMine(p);
118 }
119
120 void *GetMetaData(const void *p) {
121 if (primary_.PointerIsMine(p))
122 return primary_.GetMetaData(p);
123 return secondary_.GetMetaData(p);
124 }
125
126 void *GetBlockBegin(const void *p) {
127 if (primary_.PointerIsMine(p))
2
Taking true branch
128 return primary_.GetBlockBegin(p);
3
Calling 'SizeClassAllocator32::GetBlockBegin'
129 return secondary_.GetBlockBegin(p);
130 }
131
132 // This function does the same as GetBlockBegin, but is much faster.
133 // Must be called with the allocator locked.
134 void *GetBlockBeginFastLocked(void *p) {
135 if (primary_.PointerIsMine(p))
136 return primary_.GetBlockBegin(p);
137 return secondary_.GetBlockBeginFastLocked(p);
138 }
139
140 uptr GetActuallyAllocatedSize(void *p) {
141 if (primary_.PointerIsMine(p))
142 return primary_.GetActuallyAllocatedSize(p);
143 return secondary_.GetActuallyAllocatedSize(p);
144 }
145
146 uptr TotalMemoryUsed() {
147 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
148 }
149
150 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
151
152 void InitCache(AllocatorCache *cache) {
153 cache->Init(&stats_);
154 }
155
156 void DestroyCache(AllocatorCache *cache) {
157 cache->Destroy(&primary_, &stats_);
158 }
159
160 void SwallowCache(AllocatorCache *cache) {
161 cache->Drain(&primary_);
162 }
163
164 void GetStats(AllocatorStatCounters s) const {
165 stats_.Get(s);
166 }
167
168 void PrintStats() {
169 primary_.PrintStats();
170 secondary_.PrintStats();
171 }
172
173 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
174 // introspection API.
175 void ForceLock() {
176 primary_.ForceLock();
177 secondary_.ForceLock();
178 }
179
180 void ForceUnlock() {
181 secondary_.ForceUnlock();
182 primary_.ForceUnlock();
183 }
184
185 // Iterate over all existing chunks.
186 // The allocator must be locked when calling this function.
187 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
188 primary_.ForEachChunk(callback, arg);
189 secondary_.ForEachChunk(callback, arg);
190 }
191
192 private:
193 PrimaryAllocator primary_;
194 SecondaryAllocator secondary_;
195 AllocatorGlobalStats stats_;
196};
197

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h

1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
18
19// SizeClassAllocator32 -- allocator for 32-bit address space.
20// This allocator can theoretically be used on 64-bit arch, but there it is less
21// efficient than SizeClassAllocator64.
22//
23// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
24// be returned by MmapOrDie().
25//
26// Region:
27// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
28// kRegionSize).
29// Since the regions are aligned by kRegionSize, there are exactly
30// kNumPossibleRegions possible regions in the address space and so we keep
31// a ByteMap possible_regions to store the size classes of each Region.
32// 0 size class means the region is not used by the allocator.
33//
34// One Region is used to allocate chunks of a single size class.
35// A Region looks like this:
36// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
37//
38// In order to avoid false sharing the objects of this class should be
39// chache-line aligned.
40
41struct SizeClassAllocator32FlagMasks { // Bit masks.
42 enum {
43 kRandomShuffleChunks = 1,
44 kUseSeparateSizeClassForBatch = 2,
45 };
46};
47
48template <class Params>
49class SizeClassAllocator32 {
50 public:
51 static const uptr kSpaceBeg = Params::kSpaceBeg;
52 static const u64 kSpaceSize = Params::kSpaceSize;
53 static const uptr kMetadataSize = Params::kMetadataSize;
54 typedef typename Params::SizeClassMap SizeClassMap;
55 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
56 typedef typename Params::ByteMap ByteMap;
57 typedef typename Params::MapUnmapCallback MapUnmapCallback;
58
59 static const bool kRandomShuffleChunks = Params::kFlags &
60 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
61 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
62 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
63
64 struct TransferBatch {
65 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
66 void SetFromArray(void *batch[], uptr count) {
67 DCHECK_LE(count, kMaxNumCached);
68 count_ = count;
69 for (uptr i = 0; i < count; i++)
70 batch_[i] = batch[i];
71 }
72 uptr Count() const { return count_; }
73 void Clear() { count_ = 0; }
74 void Add(void *ptr) {
75 batch_[count_++] = ptr;
76 DCHECK_LE(count_, kMaxNumCached);
77 }
78 void CopyToArray(void *to_batch[]) const {
79 for (uptr i = 0, n = Count(); i < n; i++)
80 to_batch[i] = batch_[i];
81 }
82
83 // How much memory do we need for a batch containing n elements.
84 static uptr AllocationSizeRequiredForNElements(uptr n) {
85 return sizeof(uptr) * 2 + sizeof(void *) * n;
86 }
87 static uptr MaxCached(uptr size) {
88 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
89 }
90
91 TransferBatch *next;
92
93 private:
94 uptr count_;
95 void *batch_[kMaxNumCached];
96 };
97
98 static const uptr kBatchSize = sizeof(TransferBatch);
99 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)typedef char assertion_failed__99[2*(int)((kBatchSize & (
kBatchSize - 1)) == 0)-1]
;
100 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))typedef char assertion_failed__100[2*(int)(kBatchSize == SizeClassMap
::kMaxNumCachedHint * sizeof(uptr))-1]
;
101
102 static uptr ClassIdToSize(uptr class_id) {
103 return (class_id == SizeClassMap::kBatchClassID) ?
5
'?' condition is false
11
Returning zero
104 kBatchSize : SizeClassMap::Size(class_id);
6
Calling 'SizeClassMap::Size'
10
Returning from 'SizeClassMap::Size'
105 }
106
107 typedef SizeClassAllocator32<Params> ThisT;
108 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
109
110 void Init(s32 release_to_os_interval_ms) {
111 possible_regions.Init();
112 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
113 }
114
115 s32 ReleaseToOSIntervalMs() const {
116 return kReleaseToOSIntervalNever;
117 }
118
119 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
120 // This is empty here. Currently only implemented in 64-bit allocator.
121 }
122
123 void ForceReleaseToOS() {
124 // Currently implemented in 64-bit allocator only.
125 }
126
127 void *MapWithCallback(uptr size) {
128 void *res = MmapOrDie(size, PrimaryAllocatorName);
129 MapUnmapCallback().OnMap((uptr)res, size);
130 return res;
131 }
132
133 void UnmapWithCallback(uptr beg, uptr size) {
134 MapUnmapCallback().OnUnmap(beg, size);
135 UnmapOrDie(reinterpret_cast<void *>(beg), size);
136 }
137
138 static bool CanAllocate(uptr size, uptr alignment) {
139 return size <= SizeClassMap::kMaxSize &&
140 alignment <= SizeClassMap::kMaxSize;
141 }
142
143 void *GetMetaData(const void *p) {
144 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 144, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
145 uptr mem = reinterpret_cast<uptr>(p);
146 uptr beg = ComputeRegionBeg(mem);
147 uptr size = ClassIdToSize(GetSizeClass(p));
148 u32 offset = mem - beg;
149 uptr n = offset / (u32)size; // 32-bit division
150 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
151 return reinterpret_cast<void*>(meta);
152 }
153
154 NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
155 uptr class_id) {
156 DCHECK_LT(class_id, kNumClasses);
157 SizeClassInfo *sci = GetSizeClassInfo(class_id);
158 SpinMutexLock l(&sci->mutex);
159 if (sci->free_list.empty()) {
160 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id)
), 0)
)
161 return nullptr;
162 DCHECK(!sci->free_list.empty());
163 }
164 TransferBatch *b = sci->free_list.front();
165 sci->free_list.pop_front();
166 return b;
167 }
168
169 NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id,
170 TransferBatch *b) {
171 DCHECK_LT(class_id, kNumClasses);
172 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 172, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
173 SizeClassInfo *sci = GetSizeClassInfo(class_id);
174 SpinMutexLock l(&sci->mutex);
175 sci->free_list.push_front(b);
176 }
177
178 bool PointerIsMine(const void *p) {
179 uptr mem = reinterpret_cast<uptr>(p);
180 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
181 return false;
182 return GetSizeClass(p) != 0;
183 }
184
185 uptr GetSizeClass(const void *p) {
186 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
187 }
188
189 void *GetBlockBegin(const void *p) {
190 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 190, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
191 uptr mem = reinterpret_cast<uptr>(p);
192 uptr beg = ComputeRegionBeg(mem);
193 uptr size = ClassIdToSize(GetSizeClass(p));
4
Calling 'SizeClassAllocator32::ClassIdToSize'
12
Returning from 'SizeClassAllocator32::ClassIdToSize'
13
'size' initialized to 0
194 u32 offset = mem - beg;
195 u32 n = offset / (u32)size; // 32-bit division
14
Division by zero
196 uptr res = beg + (n * (u32)size);
197 return reinterpret_cast<void*>(res);
198 }
199
200 uptr GetActuallyAllocatedSize(void *p) {
201 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 201, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
202 return ClassIdToSize(GetSizeClass(p));
203 }
204
205 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
206
207 uptr TotalMemoryUsed() {
208 // No need to lock here.
209 uptr res = 0;
210 for (uptr i = 0; i < kNumPossibleRegions; i++)
211 if (possible_regions[i])
212 res += kRegionSize;
213 return res;
214 }
215
216 void TestOnlyUnmap() {
217 for (uptr i = 0; i < kNumPossibleRegions; i++)
218 if (possible_regions[i])
219 UnmapWithCallback((i * kRegionSize), kRegionSize);
220 }
221
222 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
223 // introspection API.
224 void ForceLock() {
225 for (uptr i = 0; i < kNumClasses; i++) {
226 GetSizeClassInfo(i)->mutex.Lock();
227 }
228 }
229
230 void ForceUnlock() {
231 for (int i = kNumClasses - 1; i >= 0; i--) {
232 GetSizeClassInfo(i)->mutex.Unlock();
233 }
234 }
235
236 // Iterate over all existing chunks.
237 // The allocator must be locked when calling this function.
238 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
239 for (uptr region = 0; region < kNumPossibleRegions; region++)
240 if (possible_regions[region]) {
241 uptr chunk_size = ClassIdToSize(possible_regions[region]);
242 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
243 uptr region_beg = region * kRegionSize;
244 for (uptr chunk = region_beg;
245 chunk < region_beg + max_chunks_in_region * chunk_size;
246 chunk += chunk_size) {
247 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
248 callback(chunk, arg);
249 }
250 }
251 }
252
253 void PrintStats() {}
254
255 static uptr AdditionalSize() { return 0; }
256
257 typedef SizeClassMap SizeClassMapT;
258 static const uptr kNumClasses = SizeClassMap::kNumClasses;
259
260 private:
261 static const uptr kRegionSize = 1 << kRegionSizeLog;
262 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
263
264 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo {
265 StaticSpinMutex mutex;
266 IntrusiveList<TransferBatch> free_list;
267 u32 rand_state;
268 };
269 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)typedef char assertion_failed__269[2*(int)(sizeof(SizeClassInfo
) % kCacheLineSize == 0)-1]
;
270
271 uptr ComputeRegionId(uptr mem) {
272 const uptr res = mem >> kRegionSizeLog;
273 CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 273, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")"
, v1, v2); } while (false)
;
274 return res;
275 }
276
277 uptr ComputeRegionBeg(uptr mem) {
278 return mem & ~(kRegionSize - 1);
279 }
280
281 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
282 DCHECK_LT(class_id, kNumClasses);
283 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
284 kRegionSize, kRegionSize, PrimaryAllocatorName));
285 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
286 return 0;
287 MapUnmapCallback().OnMap(res, kRegionSize);
288 stat->Add(AllocatorStatMapped, kRegionSize);
289 CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 289, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
290 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
291 return res;
292 }
293
294 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
295 DCHECK_LT(class_id, kNumClasses);
296 return &size_class_info_array[class_id];
297 }
298
299 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
300 TransferBatch **current_batch, uptr max_count,
301 uptr *pointers_array, uptr count) {
302 // If using a separate class for batches, we do not need to shuffle it.
303 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
304 class_id != SizeClassMap::kBatchClassID))
305 RandomShuffle(pointers_array, count, &sci->rand_state);
306 TransferBatch *b = *current_batch;
307 for (uptr i = 0; i < count; i++) {
308 if (!b) {
309 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
310 if (UNLIKELY(!b)__builtin_expect(!!(!b), 0))
311 return false;
312 b->Clear();
313 }
314 b->Add((void*)pointers_array[i]);
315 if (b->Count() == max_count) {
316 sci->free_list.push_back(b);
317 b = nullptr;
318 }
319 }
320 *current_batch = b;
321 return true;
322 }
323
324 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
325 SizeClassInfo *sci, uptr class_id) {
326 const uptr region = AllocateRegion(stat, class_id);
327 if (UNLIKELY(!region)__builtin_expect(!!(!region), 0))
328 return false;
329 if (kRandomShuffleChunks)
330 if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0))
331 // The random state is initialized from ASLR (PIE) and time.
332 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
333 const uptr size = ClassIdToSize(class_id);
334 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
335 const uptr max_count = TransferBatch::MaxCached(size);
336 DCHECK_GT(max_count, 0);
337 TransferBatch *b = nullptr;
338 constexpr uptr kShuffleArraySize = 48;
339 uptr shuffle_array[kShuffleArraySize];
340 uptr count = 0;
341 for (uptr i = region; i < region + n_chunks * size; i += size) {
342 shuffle_array[count++] = i;
343 if (count == kShuffleArraySize) {
344 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
345 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
346 return false;
347 count = 0;
348 }
349 }
350 if (count) {
351 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
352 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
353 return false;
354 }
355 if (b) {
356 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 356, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
357 sci->free_list.push_back(b);
358 }
359 return true;
360 }
361
362 ByteMap possible_regions;
363 SizeClassInfo size_class_info_array[kNumClasses];
364};

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h

1//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// SizeClassMap maps allocation sizes into size classes and back.
18// Class 0 always corresponds to size 0.
19// The other sizes are controlled by the template parameters:
20// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
21// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
22// kMidSizeLog: the classes starting from 1 increase with step
23// 2^kMinSizeLog until 2^kMidSizeLog.
24// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
25// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
26// look like 0b1xx0..0, where x is either 0 or 1.
27//
28// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
29//
30// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
31// Next 4 classes: 256 + i * 64 (i = 1 to 4).
32// Next 4 classes: 512 + i * 128 (i = 1 to 4).
33// ...
34// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
35// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
36//
37// This structure of the size class map gives us:
38// - Efficient table-free class-to-size and size-to-class functions.
39// - Difference between two consequent size classes is between 14% and 25%
40//
41// This class also gives a hint to a thread-caching allocator about the amount
42// of chunks that need to be cached per-thread:
43// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
44// The actual number is computed in TransferBatch.
45// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46//
47// Part of output of SizeClassMap::Print():
48// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
56//
57// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
65//
66// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
70//
71// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
75//
76// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80//
81// ...
82//
83// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87//
88// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
89//
90//
91// Another example (kNumBits=2):
92// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
93// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
94// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
95// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
96// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
97// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
98// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
99// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
100// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
101// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
102// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
103// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
104// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
105// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
106// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
107// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
108// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
109// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
110// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
111// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
112// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
113// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
114// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
115// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
116// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
117// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
118// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
119
120template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
121 uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
122class SizeClassMap {
123 static const uptr kMinSize = 1 << kMinSizeLog;
124 static const uptr kMidSize = 1 << kMidSizeLog;
125 static const uptr kMidClass = kMidSize / kMinSize;
126 static const uptr S = kNumBits - 1;
127 static const uptr M = (1 << S) - 1;
128
129 public:
130 // kMaxNumCachedHintT is a power of two. It serves as a hint
131 // for the size of TransferBatch, the actual size could be a bit smaller.
132 static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
133 COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)typedef char assertion_failed__133[2*(int)((kMaxNumCachedHint
& (kMaxNumCachedHint - 1)) == 0)-1]
;
134
135 static const uptr kMaxSize = 1UL << kMaxSizeLog;
136 static const uptr kNumClasses =
137 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
138 static const uptr kLargestClassID = kNumClasses - 2;
139 static const uptr kBatchClassID = kNumClasses - 1;
140 COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)typedef char assertion_failed__140[2*(int)(kNumClasses >= 16
&& kNumClasses <= 256)-1]
;
141 static const uptr kNumClassesRounded =
142 kNumClasses <= 32 ? 32 :
143 kNumClasses <= 64 ? 64 :
144 kNumClasses <= 128 ? 128 : 256;
145
146 static uptr Size(uptr class_id) {
147 // Estimate the result for kBatchClassID because this class does not know
148 // the exact size of TransferBatch. It's OK since we are using the actual
149 // sizeof(TransferBatch) where it matters.
150 if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0))
7
Taking false branch
151 return kMaxNumCachedHint * sizeof(uptr);
152 if (class_id <= kMidClass)
8
Taking true branch
153 return kMinSize * class_id;
9
Returning zero
154 class_id -= kMidClass;
155 uptr t = kMidSize << (class_id >> S);
156 return t + (t >> S) * (class_id & M);
157 }
158
159 static uptr ClassID(uptr size) {
160 if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0))
161 return 0;
162 if (size <= kMidSize)
163 return (size + kMinSize - 1) >> kMinSizeLog;
164 const uptr l = MostSignificantSetBitIndex(size);
165 const uptr hbits = (size >> (l - S)) & M;
166 const uptr lbits = size & ((1U << (l - S)) - 1);
167 const uptr l1 = l - kMidSizeLog;
168 return kMidClass + (l1 << S) + hbits + (lbits > 0);
169 }
170
171 static uptr MaxCachedHint(uptr size) {
172 DCHECK_LE(size, kMaxSize);
173 if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0))
174 return 0;
175 uptr n;
176 // Force a 32-bit division if the template parameters allow for it.
177 if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
178 n = (1UL << kMaxBytesCachedLog) / size;
179 else
180 n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
181 return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
182 }
183
184 static void Print() {
185 uptr prev_s = 0;
186 uptr total_cached = 0;
187 for (uptr i = 0; i < kNumClasses; i++) {
188 uptr s = Size(i);
189 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
190 Printf("\n");
191 uptr d = s - prev_s;
192 uptr p = prev_s ? (d * 100 / prev_s) : 0;
193 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
194 uptr cached = MaxCachedHint(s) * s;
195 if (i == kBatchClassID)
196 d = p = l = 0;
197 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
198 "cached: %zd %zd; id %zd\n",
199 i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
200 total_cached += cached;
201 prev_s = s;
202 }
203 Printf("Total cached: %zd\n", total_cached);
204 }
205
206 static void Validate() {
207 for (uptr c = 1; c < kNumClasses; c++) {
208 // Printf("Validate: c%zd\n", c);
209 uptr s = Size(c);
210 CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!(
!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 210, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (
false)
;
211 if (c == kBatchClassID)
212 continue;
213 CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer
::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 213, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); }
while (false)
;
214 if (c < kLargestClassID)
215 CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 215, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1
, v2); } while (false)
;
216 CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 216, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2
); } while (false)
;
217 CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 217, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1
, v2); } while (false)
;
218 }
219 CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize
+ 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 219, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")"
, v1, v2); } while (false)
;
220
221 for (uptr s = 1; s <= kMaxSize; s++) {
222 uptr c = ClassID(s);
223 // Printf("s%zd => c%zd\n", s, c);
224 CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 224, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2
); } while (false)
;
225 CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(!
(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 225, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); }
while (false)
;
226 if (c > 0)
227 CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1)));
__sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 227, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2
); } while (false)
;
228 }
229 }
230};
231
232typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
233typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
234typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
235
236// The following SizeClassMap only holds a way small number of cached entries,
237// allowing for denser per-class arrays, smaller memory footprint and usually
238// better performances in threaded environments.
239typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;