Bug Summary

File:compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h
Warning:line 154, column 23
The result of the left shift is undefined due to shifting '256' by '4611686018427387900', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name scudo_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=all -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/build-llvm/projects/compiler-rt/lib/scudo -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D clang_rt_scudo_minimal_dynamic_x86_64_EXPORTS -I /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/build-llvm/projects/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/llvm/include -I /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/.. -D NDEBUG -D GWP_ASAN_HOOKS -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/build-llvm/projects/compiler-rt/lib/scudo -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-16-061740-32950-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/scudo_allocator.cpp

/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/scudo_allocator.cpp

1//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// Scudo Hardened Allocator implementation.
10/// It uses the sanitizer_common allocator as a base and aims at mitigating
11/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
12/// header, a delayed free list, and additional sanity checks.
13///
14//===----------------------------------------------------------------------===//
15
16#include "scudo_allocator.h"
17#include "scudo_crc32.h"
18#include "scudo_errors.h"
19#include "scudo_flags.h"
20#include "scudo_interface_internal.h"
21#include "scudo_tsd.h"
22#include "scudo_utils.h"
23
24#include "sanitizer_common/sanitizer_allocator_checks.h"
25#include "sanitizer_common/sanitizer_allocator_interface.h"
26#include "sanitizer_common/sanitizer_quarantine.h"
27
28#ifdef GWP_ASAN_HOOKS1
29# include "gwp_asan/guarded_pool_allocator.h"
30# include "gwp_asan/optional/backtrace.h"
31# include "gwp_asan/optional/options_parser.h"
32#include "gwp_asan/optional/segv_handler.h"
33#endif // GWP_ASAN_HOOKS
34
35#include <errno(*__errno_location ()).h>
36#include <string.h>
37
38namespace __scudo {
39
40// Global static cookie, initialized at start-up.
41static u32 Cookie;
42
43// We default to software CRC32 if the alternatives are not supported, either
44// at compilation or at runtime.
45static atomic_uint8_t HashAlgorithm = { CRC32Software };
46
47inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
48 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
49 // as opposed to only for scudo_crc32.cpp. This means that other hardware
50 // specific instructions were likely emitted at other places, and as a
51 // result there is no reason to not use it here.
52#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
53 Crc = CRC32_INTRINSIC(Crc, Value);
54 for (uptr i = 0; i < ArraySize; i++)
55 Crc = CRC32_INTRINSIC(Crc, Array[i]);
56 return Crc;
57#else
58 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
59 Crc = computeHardwareCRC32(Crc, Value);
60 for (uptr i = 0; i < ArraySize; i++)
61 Crc = computeHardwareCRC32(Crc, Array[i]);
62 return Crc;
63 }
64 Crc = computeSoftwareCRC32(Crc, Value);
65 for (uptr i = 0; i < ArraySize; i++)
66 Crc = computeSoftwareCRC32(Crc, Array[i]);
67 return Crc;
68#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
69}
70
71static BackendT &getBackend();
72
73namespace Chunk {
74 static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
75 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
76 getHeaderSize());
77 }
78 static inline
79 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
80 return reinterpret_cast<const AtomicPackedHeader *>(
81 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
82 }
83
84 static inline bool isAligned(const void *Ptr) {
85 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
86 }
87
88 // We can't use the offset member of the chunk itself, as we would double
89 // fetch it without any warranty that it wouldn't have been tampered. To
90 // prevent this, we work with a local copy of the header.
91 static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
92 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
93 getHeaderSize() - (Header->Offset << MinAlignmentLog));
94 }
95
96 // Returns the usable size for a chunk, meaning the amount of bytes from the
97 // beginning of the user data to the end of the backend allocated chunk.
98 static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
99 const uptr ClassId = Header->ClassId;
100 if (ClassId)
101 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
102 (Header->Offset << MinAlignmentLog);
103 return SecondaryT::GetActuallyAllocatedSize(
104 getBackendPtr(Ptr, Header)) - getHeaderSize();
105 }
106
107 // Returns the size the user requested when allocating the chunk.
108 static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
109 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
110 if (Header->ClassId)
111 return SizeOrUnusedBytes;
112 return SecondaryT::GetActuallyAllocatedSize(
113 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
114 }
115
116 // Compute the checksum of the chunk pointer and its header.
117 static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
118 UnpackedHeader ZeroChecksumHeader = *Header;
119 ZeroChecksumHeader.Checksum = 0;
120 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
121 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
122 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
123 HeaderHolder, ARRAY_SIZE(HeaderHolder)(sizeof(HeaderHolder)/sizeof((HeaderHolder)[0])));
124 return static_cast<u16>(Crc);
125 }
126
127 // Checks the validity of a chunk by verifying its checksum. It doesn't
128 // incur termination in the event of an invalid chunk.
129 static inline bool isValid(const void *Ptr) {
130 PackedHeader NewPackedHeader =
131 atomic_load_relaxed(getConstAtomicHeader(Ptr));
132 UnpackedHeader NewUnpackedHeader =
133 bit_cast<UnpackedHeader>(NewPackedHeader);
134 return (NewUnpackedHeader.Checksum ==
135 computeChecksum(Ptr, &NewUnpackedHeader));
136 }
137
138 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
139 // for a fully nulled out header, its state will be available anyway.
140 COMPILER_CHECK(ChunkAvailable == 0)static_assert(ChunkAvailable == 0, "");
141
142 // Loads and unpacks the header, verifying the checksum in the process.
143 static inline
144 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
145 PackedHeader NewPackedHeader =
146 atomic_load_relaxed(getConstAtomicHeader(Ptr));
147 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
148 if (UNLIKELY(NewUnpackedHeader->Checksum !=__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
149 computeChecksum(Ptr, NewUnpackedHeader))__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
)
150 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
151 }
152
153 // Packs and stores the header, computing the checksum in the process.
154 static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
155 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
156 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
157 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
158 }
159
160 // Packs and stores the header, computing the checksum in the process. We
161 // compare the current header with the expected provided one to ensure that
162 // we are not being raced by a corruption occurring in another thread.
163 static inline void compareExchangeHeader(void *Ptr,
164 UnpackedHeader *NewUnpackedHeader,
165 UnpackedHeader *OldUnpackedHeader) {
166 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
167 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
168 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
169 if (UNLIKELY(!atomic_compare_exchange_strong(__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
170 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
171 memory_order_relaxed))__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
)
172 dieWithMessage("race on chunk header at address %p\n", Ptr);
173 }
174} // namespace Chunk
175
176struct QuarantineCallback {
177 explicit QuarantineCallback(AllocatorCacheT *Cache)
178 : Cache_(Cache) {}
179
180 // Chunk recycling function, returns a quarantined chunk to the backend,
181 // first making sure it hasn't been tampered with.
182 void Recycle(void *Ptr) {
183 UnpackedHeader Header;
184 Chunk::loadHeader(Ptr, &Header);
185 if (UNLIKELY(Header.State != ChunkQuarantine)__builtin_expect(!!(Header.State != ChunkQuarantine), 0))
186 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
187 UnpackedHeader NewHeader = Header;
188 NewHeader.State = ChunkAvailable;
189 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
190 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
191 if (Header.ClassId)
192 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
193 else
194 getBackend().deallocateSecondary(BackendPtr);
195 }
196
197 // Internal quarantine allocation and deallocation functions. We first check
198 // that the batches are indeed serviced by the Primary.
199 // TODO(kostyak): figure out the best way to protect the batches.
200 void *Allocate(uptr Size) {
201 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
202 return getBackend().allocatePrimary(Cache_, BatchClassId);
203 }
204
205 void Deallocate(void *Ptr) {
206 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
207 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
208 }
209
210 AllocatorCacheT *Cache_;
211 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize)static_assert(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize
, "")
;
212};
213
214typedef Quarantine<QuarantineCallback, void> QuarantineT;
215typedef QuarantineT::Cache QuarantineCacheT;
216COMPILER_CHECK(sizeof(QuarantineCacheT) <=static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::
QuarantineCachePlaceHolder), "")
217 sizeof(ScudoTSD::QuarantineCachePlaceHolder))static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::
QuarantineCachePlaceHolder), "")
;
218
219QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
220 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
221}
222
223#ifdef GWP_ASAN_HOOKS1
224static gwp_asan::GuardedPoolAllocator GuardedAlloc;
225#endif // GWP_ASAN_HOOKS
226
227struct Allocator {
228 static const uptr MaxAllowedMallocSize =
229 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40)(1ULL << 40);
230
231 BackendT Backend;
232 QuarantineT Quarantine;
233
234 u32 QuarantineChunksUpToSize;
235
236 bool DeallocationTypeMismatch;
237 bool ZeroContents;
238 bool DeleteSizeMismatch;
239
240 bool CheckRssLimit;
241 uptr HardRssLimitMb;
242 uptr SoftRssLimitMb;
243 atomic_uint8_t RssLimitExceeded;
244 atomic_uint64_t RssLastCheckedAtNS;
245
246 explicit Allocator(LinkerInitialized)
247 : Quarantine(LINKER_INITIALIZED) {}
248
249 NOINLINE__attribute__((noinline)) void performSanityChecks();
250
251 void init() {
252 SanitizerToolName = "Scudo";
253 PrimaryAllocatorName = "ScudoPrimary";
254 SecondaryAllocatorName = "ScudoSecondary";
255
256 initFlags();
257
258 performSanityChecks();
259
260 // Check if hardware CRC32 is supported in the binary and by the platform,
261 // if so, opt for the CRC32 hardware version of the checksum.
262 if (&computeHardwareCRC32 && hasHardwareCRC32())
263 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
264
265 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
266 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
267 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
268 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
269 Quarantine.Init(
270 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
271 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
272 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
273 getFlags()->QuarantineChunksUpToSize;
274 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
275 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
276 ZeroContents = getFlags()->ZeroContents;
277
278 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
279 /*blocking=*/false))__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
) {
280 Cookie = static_cast<u32>((NanoTime() >> 12) ^
281 (reinterpret_cast<uptr>(this) >> 4));
282 }
283
284 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
285 if (CheckRssLimit)
286 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
287 }
288
289 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
290 bool isValidPointer(const void *Ptr) {
291 initThreadMaybe();
292 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
293 return false;
294 if (!Chunk::isAligned(Ptr))
295 return false;
296 return Chunk::isValid(Ptr);
297 }
298
299 NOINLINE__attribute__((noinline)) bool isRssLimitExceeded();
300
301 // Allocates a chunk.
302 void *allocate(uptr Size, uptr Alignment, AllocType Type,
303 bool ForceZeroContents = false) {
304 initThreadMaybe();
305
306 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
6
Assuming 'Alignment' is <= 'MaxAlignment'
7
Taking false branch
307 if (AllocatorMayReturnNull())
308 return nullptr;
309 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
310 }
311 if (UNLIKELY(Alignment < MinAlignment)__builtin_expect(!!(Alignment < MinAlignment), 0))
8
Assuming 'Alignment' is >= 'MinAlignment'
9
Taking false branch
312 Alignment = MinAlignment;
313
314#ifdef GWP_ASAN_HOOKS1
315 if (UNLIKELY(GuardedAlloc.shouldSample())__builtin_expect(!!(GuardedAlloc.shouldSample()), 0)) {
10
Taking false branch
316 if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
317 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook)
318 __sanitizer_malloc_hook(Ptr, Size);
319 return Ptr;
320 }
321 }
322#endif // GWP_ASAN_HOOKS
323
324 const uptr NeededSize = RoundUpTo(Size
10.1
'Size' is not equal to 0
10.1
'Size' is not equal to 0
10.1
'Size' is not equal to 0
? Size : 1, MinAlignment) +
11
'?' condition is true
325 Chunk::getHeaderSize();
326 const uptr AlignedSize = (Alignment > MinAlignment) ?
12
Assuming 'Alignment' is <= 'MinAlignment'
13
'?' condition is false
327 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
328 if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0) ||
15
Taking false branch
329 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)__builtin_expect(!!(AlignedSize >= MaxAllowedMallocSize), 0
)
) {
14
Assuming 'AlignedSize' is < 'MaxAllowedMallocSize'
330 if (AllocatorMayReturnNull())
331 return nullptr;
332 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
333 }
334
335 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())__builtin_expect(!!(isRssLimitExceeded()), 0)) {
16
Assuming field 'CheckRssLimit' is false
336 if (AllocatorMayReturnNull())
337 return nullptr;
338 reportRssLimitExceeded();
339 }
340
341 // Primary and Secondary backed allocations have a different treatment. We
342 // deal with alignment requirements of Primary serviced allocations here,
343 // but the Secondary will take care of its own alignment needs.
344 void *BackendPtr;
345 uptr BackendSize;
346 u8 ClassId;
347 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
17
Taking true branch
348 BackendSize = AlignedSize;
349 ClassId = SizeClassMap::ClassID(BackendSize);
350 bool UnlockRequired;
351 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
352 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
353 if (UnlockRequired
17.1
'UnlockRequired' is false
17.1
'UnlockRequired' is false
17.1
'UnlockRequired' is false
)
18
Taking false branch
354 TSD->unlock();
355 } else {
356 BackendSize = NeededSize;
357 ClassId = 0;
358 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
359 }
360 if (UNLIKELY(!BackendPtr)__builtin_expect(!!(!BackendPtr), 0)) {
19
Assuming 'BackendPtr' is non-null
20
Taking false branch
361 SetAllocatorOutOfMemory();
362 if (AllocatorMayReturnNull())
363 return nullptr;
364 reportOutOfMemory(Size);
365 }
366
367 // If requested, we will zero out the entire contents of the returned chunk.
368 if ((ForceZeroContents
20.1
'ForceZeroContents' is false
20.1
'ForceZeroContents' is false
20.1
'ForceZeroContents' is false
|| ZeroContents) && ClassId)
21
Assuming field 'ZeroContents' is true
22
Assuming 'ClassId' is not equal to 0
23
Taking true branch
369 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
24
Calling 'SizeClassAllocator64::ClassIdToSize'
370
371 UnpackedHeader Header = {};
372 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
373 if (UNLIKELY(!IsAligned(UserPtr, Alignment))__builtin_expect(!!(!IsAligned(UserPtr, Alignment)), 0)) {
374 // Since the Secondary takes care of alignment, a non-aligned pointer
375 // means it is from the Primary. It is also the only case where the offset
376 // field of the header would be non-zero.
377 DCHECK(ClassId);
378 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
379 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
380 UserPtr = AlignedUserPtr;
381 }
382 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
383 Header.State = ChunkAllocated;
384 Header.AllocType = Type;
385 if (ClassId) {
386 Header.ClassId = ClassId;
387 Header.SizeOrUnusedBytes = Size;
388 } else {
389 // The secondary fits the allocations to a page, so the amount of unused
390 // bytes is the difference between the end of the user allocation and the
391 // next page boundary.
392 const uptr PageSize = GetPageSizeCached();
393 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
394 if (TrailingBytes)
395 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
396 }
397 void *Ptr = reinterpret_cast<void *>(UserPtr);
398 Chunk::storeHeader(Ptr, &Header);
399 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook)
400 __sanitizer_malloc_hook(Ptr, Size);
401 return Ptr;
402 }
403
404 // Place a chunk in the quarantine or directly deallocate it in the event of
405 // a zero-sized quarantine, or if the size of the chunk is greater than the
406 // quarantine chunk size threshold.
407 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
408 uptr Size) {
409 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
410 if (BypassQuarantine) {
411 UnpackedHeader NewHeader = *Header;
412 NewHeader.State = ChunkAvailable;
413 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
414 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
415 if (Header->ClassId) {
416 bool UnlockRequired;
417 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
418 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
419 Header->ClassId);
420 if (UnlockRequired)
421 TSD->unlock();
422 } else {
423 getBackend().deallocateSecondary(BackendPtr);
424 }
425 } else {
426 // If a small memory amount was allocated with a larger alignment, we want
427 // to take that into account. Otherwise the Quarantine would be filled
428 // with tiny chunks, taking a lot of VA memory. This is an approximation
429 // of the usable size, that allows us to not call
430 // GetActuallyAllocatedSize.
431 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
432 UnpackedHeader NewHeader = *Header;
433 NewHeader.State = ChunkQuarantine;
434 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
435 bool UnlockRequired;
436 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
437 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
438 Ptr, EstimatedSize);
439 if (UnlockRequired)
440 TSD->unlock();
441 }
442 }
443
444 // Deallocates a Chunk, which means either adding it to the quarantine or
445 // directly returning it to the backend if criteria are met.
446 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
447 AllocType Type) {
448 // For a deallocation, we only ensure minimal initialization, meaning thread
449 // local data will be left uninitialized for now (when using ELF TLS). The
450 // fallback cache will be used instead. This is a workaround for a situation
451 // where the only heap operation performed in a thread would be a free past
452 // the TLS destructors, ending up in initialized thread specific data never
453 // being destroyed properly. Any other heap operation will do a full init.
454 initThreadMaybe(/*MinimalInit=*/true);
455 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_free_hook)
456 __sanitizer_free_hook(Ptr);
457 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
458 return;
459
460#ifdef GWP_ASAN_HOOKS1
461 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) {
462 GuardedAlloc.deallocate(Ptr);
463 return;
464 }
465#endif // GWP_ASAN_HOOKS
466
467 if (UNLIKELY(!Chunk::isAligned(Ptr))__builtin_expect(!!(!Chunk::isAligned(Ptr)), 0))
468 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
469 UnpackedHeader Header;
470 Chunk::loadHeader(Ptr, &Header);
471 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
472 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
473 if (DeallocationTypeMismatch) {
474 // The deallocation type has to match the allocation one.
475 if (Header.AllocType != Type) {
476 // With the exception of memalign'd Chunks, that can be still be free'd.
477 if (Header.AllocType != FromMemalign || Type != FromMalloc)
478 dieWithMessage("allocation type mismatch when deallocating address "
479 "%p\n", Ptr);
480 }
481 }
482 const uptr Size = Chunk::getSize(Ptr, &Header);
483 if (DeleteSizeMismatch) {
484 if (DeleteSize && DeleteSize != Size)
485 dieWithMessage("invalid sized delete when deallocating address %p\n",
486 Ptr);
487 }
488 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
489 quarantineOrDeallocateChunk(Ptr, &Header, Size);
490 }
491
492 // Reallocates a chunk. We can save on a new allocation if the new requested
493 // size still fits in the chunk.
494 void *reallocate(void *OldPtr, uptr NewSize) {
495 initThreadMaybe();
496
497#ifdef GWP_ASAN_HOOKS1
498 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(OldPtr)), 0)) {
499 size_t OldSize = GuardedAlloc.getSize(OldPtr);
500 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
501 if (NewPtr)
502 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
503 GuardedAlloc.deallocate(OldPtr);
504 return NewPtr;
505 }
506#endif // GWP_ASAN_HOOKS
507
508 if (UNLIKELY(!Chunk::isAligned(OldPtr))__builtin_expect(!!(!Chunk::isAligned(OldPtr)), 0))
509 dieWithMessage("misaligned address when reallocating address %p\n",
510 OldPtr);
511 UnpackedHeader OldHeader;
512 Chunk::loadHeader(OldPtr, &OldHeader);
513 if (UNLIKELY(OldHeader.State != ChunkAllocated)__builtin_expect(!!(OldHeader.State != ChunkAllocated), 0))
514 dieWithMessage("invalid chunk state when reallocating address %p\n",
515 OldPtr);
516 if (DeallocationTypeMismatch) {
517 if (UNLIKELY(OldHeader.AllocType != FromMalloc)__builtin_expect(!!(OldHeader.AllocType != FromMalloc), 0))
518 dieWithMessage("allocation type mismatch when reallocating address "
519 "%p\n", OldPtr);
520 }
521 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
522 // The new size still fits in the current chunk, and the size difference
523 // is reasonable.
524 if (NewSize <= UsableSize &&
525 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
526 UnpackedHeader NewHeader = OldHeader;
527 NewHeader.SizeOrUnusedBytes =
528 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
529 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
530 return OldPtr;
531 }
532 // Otherwise, we have to allocate a new chunk and copy the contents of the
533 // old one.
534 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
535 if (NewPtr) {
536 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
537 UsableSize - OldHeader.SizeOrUnusedBytes;
538 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
539 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
540 }
541 return NewPtr;
542 }
543
544 // Helper function that returns the actual usable size of a chunk.
545 uptr getUsableSize(const void *Ptr) {
546 initThreadMaybe();
547 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
548 return 0;
549
550#ifdef GWP_ASAN_HOOKS1
551 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0))
552 return GuardedAlloc.getSize(Ptr);
553#endif // GWP_ASAN_HOOKS
554
555 UnpackedHeader Header;
556 Chunk::loadHeader(Ptr, &Header);
557 // Getting the usable size of a chunk only makes sense if it's allocated.
558 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
559 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
560 return Chunk::getUsableSize(Ptr, &Header);
561 }
562
563 void *calloc(uptr NMemB, uptr Size) {
564 initThreadMaybe();
565 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))__builtin_expect(!!(CheckForCallocOverflow(NMemB, Size)), 0)) {
566 if (AllocatorMayReturnNull())
567 return nullptr;
568 reportCallocOverflow(NMemB, Size);
569 }
570 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
571 }
572
573 void commitBack(ScudoTSD *TSD) {
574 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
575 Backend.destroyCache(&TSD->Cache);
576 }
577
578 uptr getStats(AllocatorStat StatType) {
579 initThreadMaybe();
580 uptr stats[AllocatorStatCount];
581 Backend.getStats(stats);
582 return stats[StatType];
583 }
584
585 bool canReturnNull() {
586 initThreadMaybe();
587 return AllocatorMayReturnNull();
588 }
589
590 void setRssLimit(uptr LimitMb, bool HardLimit) {
591 if (HardLimit)
592 HardRssLimitMb = LimitMb;
593 else
594 SoftRssLimitMb = LimitMb;
595 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
596 }
597
598 void printStats() {
599 initThreadMaybe();
600 Backend.printStats();
601 }
602};
603
604NOINLINE__attribute__((noinline)) void Allocator::performSanityChecks() {
605 // Verify that the header offset field can hold the maximum offset. In the
606 // case of the Secondary allocator, it takes care of alignment and the
607 // offset will always be 0. In the case of the Primary, the worst case
608 // scenario happens in the last size class, when the backend allocation
609 // would already be aligned on the requested alignment, which would happen
610 // to be the maximum alignment that would fit in that size class. As a
611 // result, the maximum offset will be at most the maximum alignment for the
612 // last size class minus the header size, in multiples of MinAlignment.
613 UnpackedHeader Header = {};
614 const uptr MaxPrimaryAlignment =
615 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
616 const uptr MaxOffset =
617 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
618 Header.Offset = MaxOffset;
619 if (Header.Offset != MaxOffset)
620 dieWithMessage("maximum possible offset doesn't fit in header\n");
621 // Verify that we can fit the maximum size or amount of unused bytes in the
622 // header. Given that the Secondary fits the allocation to a page, the worst
623 // case scenario happens in the Primary. It will depend on the second to
624 // last and last class sizes, as well as the dynamic base for the Primary.
625 // The following is an over-approximation that works for our needs.
626 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
627 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
628 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
629 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
630
631 const uptr LargestClassId = SizeClassMap::kLargestClassID;
632 Header.ClassId = LargestClassId;
633 if (Header.ClassId != LargestClassId)
634 dieWithMessage("largest class ID doesn't fit in header\n");
635}
636
637// Opportunistic RSS limit check. This will update the RSS limit status, if
638// it can, every 250ms, otherwise it will just return the current one.
639NOINLINE__attribute__((noinline)) bool Allocator::isRssLimitExceeded() {
640 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
641 const u64 CurrentCheck = MonotonicNanoTime();
642 if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL))__builtin_expect(!!(CurrentCheck < LastCheck + (250ULL * 1000000ULL
)), 1)
)
643 return atomic_load_relaxed(&RssLimitExceeded);
644 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
645 CurrentCheck, memory_order_relaxed))
646 return atomic_load_relaxed(&RssLimitExceeded);
647 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
648 // RSS from /proc/self/statm by default. We might want to
649 // call getrusage directly, even if it's less accurate.
650 const uptr CurrentRssMb = GetRSS() >> 20;
651 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)__builtin_expect(!!(HardRssLimitMb < CurrentRssMb), 0))
652 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
653 HardRssLimitMb, CurrentRssMb);
654 if (SoftRssLimitMb) {
655 if (atomic_load_relaxed(&RssLimitExceeded)) {
656 if (CurrentRssMb <= SoftRssLimitMb)
657 atomic_store_relaxed(&RssLimitExceeded, false);
658 } else {
659 if (CurrentRssMb > SoftRssLimitMb) {
660 atomic_store_relaxed(&RssLimitExceeded, true);
661 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
662 SoftRssLimitMb, CurrentRssMb);
663 }
664 }
665 }
666 return atomic_load_relaxed(&RssLimitExceeded);
667}
668
669static Allocator Instance(LINKER_INITIALIZED);
670
671static BackendT &getBackend() {
672 return Instance.Backend;
673}
674
675void initScudo() {
676 Instance.init();
677#ifdef GWP_ASAN_HOOKS1
678 gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
679 Printf);
680 gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
681 Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
682 GuardedAlloc.init(Opts);
683
684 if (Opts.InstallSignalHandlers)
685 gwp_asan::segv_handler::installSignalHandlers(
686 &GuardedAlloc, __sanitizer::Printf,
687 gwp_asan::backtrace::getPrintBacktraceFunction(),
688 gwp_asan::backtrace::getSegvBacktraceFunction());
689#endif // GWP_ASAN_HOOKS
690}
691
692void ScudoTSD::init() {
693 getBackend().initCache(&Cache);
694 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
695}
696
697void ScudoTSD::commitBack() {
698 Instance.commitBack(this);
699}
700
701void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
702 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))__builtin_expect(!!(!IsPowerOfTwo(Alignment)), 0)) {
703 errno(*__errno_location ()) = EINVAL22;
704 if (Instance.canReturnNull())
705 return nullptr;
706 reportAllocationAlignmentNotPowerOfTwo(Alignment);
707 }
708 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
709}
710
711void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
712 Instance.deallocate(Ptr, Size, Alignment, Type);
713}
714
715void *scudoRealloc(void *Ptr, uptr Size) {
716 if (!Ptr)
717 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
718 if (Size == 0) {
719 Instance.deallocate(Ptr, 0, 0, FromMalloc);
720 return nullptr;
721 }
722 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
723}
724
725void *scudoCalloc(uptr NMemB, uptr Size) {
726 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
727}
728
729void *scudoValloc(uptr Size) {
730 return SetErrnoOnNull(
731 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
732}
733
734void *scudoPvalloc(uptr Size) {
735 const uptr PageSize = GetPageSizeCached();
736 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(Size, PageSize)),
0)
) {
1
Assuming the condition is false
2
Taking false branch
737 errno(*__errno_location ()) = ENOMEM12;
738 if (Instance.canReturnNull())
739 return nullptr;
740 reportPvallocOverflow(Size);
741 }
742 // pvalloc(0) should allocate one page.
743 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
3
Assuming 'Size' is 0
4
'?' condition is false
744 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
5
Calling 'Allocator::allocate'
745}
746
747int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
748 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(Alignment)),
0)
) {
749 if (!Instance.canReturnNull())
750 reportInvalidPosixMemalignAlignment(Alignment);
751 return EINVAL22;
752 }
753 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
754 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
755 return ENOMEM12;
756 *MemPtr = Ptr;
757 return 0;
758}
759
760void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
761 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(Alignment
, Size)), 0)
) {
762 errno(*__errno_location ()) = EINVAL22;
763 if (Instance.canReturnNull())
764 return nullptr;
765 reportInvalidAlignedAllocAlignment(Size, Alignment);
766 }
767 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
768}
769
770uptr scudoMallocUsableSize(void *Ptr) {
771 return Instance.getUsableSize(Ptr);
772}
773
774} // namespace __scudo
775
776using namespace __scudo;
777
778// MallocExtension helper functions
779
780uptr __sanitizer_get_current_allocated_bytes() {
781 return Instance.getStats(AllocatorStatAllocated);
782}
783
784uptr __sanitizer_get_heap_size() {
785 return Instance.getStats(AllocatorStatMapped);
786}
787
788uptr __sanitizer_get_free_bytes() {
789 return 1;
790}
791
792uptr __sanitizer_get_unmapped_bytes() {
793 return 1;
794}
795
796uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
797 return Size;
798}
799
800int __sanitizer_get_ownership(const void *Ptr) {
801 return Instance.isValidPointer(Ptr);
802}
803
804uptr __sanitizer_get_allocated_size(const void *Ptr) {
805 return Instance.getUsableSize(Ptr);
806}
807
808#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
809SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
810 void *Ptr, uptr Size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
{
811 (void)Ptr;
812 (void)Size;
813}
814
815SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *Ptr)
{
816 (void)Ptr;
817}
818#endif
819
820// Interface functions
821
822void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
823 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE1)
824 return;
825 Instance.setRssLimit(LimitMb, !!HardLimit);
826}
827
828void __scudo_print_stats() {
829 Instance.printStats();
830}

/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h

1//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
17
18// SizeClassAllocator64 -- allocator for 64-bit address space.
19// The template parameter Params is a class containing the actual parameters.
20//
21// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
22// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
23// Otherwise SpaceBeg=kSpaceBeg (fixed address).
24// kSpaceSize is a power of two.
25// At the beginning the entire space is mprotect-ed, then small parts of it
26// are mapped on demand.
27//
28// Region: a part of Space dedicated to a single size class.
29// There are kNumClasses Regions of equal size.
30//
31// UserChunk: a piece of memory returned to user.
32// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33
34// FreeArray is an array free-d chunks (stored as 4-byte offsets)
35//
36// A Region looks like this:
37// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38
39struct SizeClassAllocator64FlagMasks { // Bit masks.
40 enum {
41 kRandomShuffleChunks = 1,
42 };
43};
44
45template <class Params>
46class SizeClassAllocator64 {
47 public:
48 using AddressSpaceView = typename Params::AddressSpaceView;
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const uptr kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 typedef typename Params::MapUnmapCallback MapUnmapCallback;
54
55 static const bool kRandomShuffleChunks =
56 Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
57
58 typedef SizeClassAllocator64<Params> ThisT;
59 typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
60
61 // When we know the size class (the region base) we can represent a pointer
62 // as a 4-byte integer (offset from the region start shifted right by 4).
63 typedef u32 CompactPtrT;
64 static const uptr kCompactPtrScale = 4;
65 CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
66 return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
67 }
68 uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
69 return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
70 }
71
72 // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
73 // at heap_start and places the heap there. This mode requires kSpaceBeg ==
74 // ~(uptr)0.
75 void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
76 uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
77 PremappedHeap = heap_start != 0;
78 if (PremappedHeap) {
79 CHECK(!kUsingConstantSpaceBeg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!kUsingConstantSpaceBeg
)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 79, "(" "(!kUsingConstantSpaceBeg)" ") " "!=" " (" "0" ")",
v1, v2); } while (false)
;
80 NonConstSpaceBeg = heap_start;
81 uptr RegionInfoSize = AdditionalSize();
82 RegionInfoSpace =
83 address_range.Init(RegionInfoSize, PrimaryAllocatorName);
84 CHECK_NE(RegionInfoSpace, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace
)); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 84, "(" "(RegionInfoSpace)" ") " "!=" " (" "(~(uptr)0)" ")"
, v1, v2); } while (false)
;
85 CHECK_EQ(RegionInfoSpace,do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace
)); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie
(RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info"
))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 87, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))"
")", v1, v2); } while (false)
86 address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace
)); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie
(RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info"
))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 87, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))"
")", v1, v2); } while (false)
87 "SizeClassAllocator: region info"))do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace
)); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie
(RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info"
))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 87, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))"
")", v1, v2); } while (false)
;
88 MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
89 } else {
90 if (kUsingConstantSpaceBeg) {
91 CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(kSpaceBeg
, SizeClassMap::kMaxSize))); __sanitizer::u64 v2 = (__sanitizer
::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 91, "(" "(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))" ") "
"!=" " (" "0" ")", v1, v2); } while (false)
;
92 CHECK_EQ(kSpaceBeg,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 94, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
93 address_range.Init(TotalSpaceSize, PrimaryAllocatorName,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 94, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
94 kSpaceBeg))do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 94, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
;
95 } else {
96 // Combined allocator expects that an 2^N allocation is always aligned
97 // to 2^N. For this to work, the start of the space needs to be aligned
98 // as high as the largest size class (which also needs to be a power of
99 // 2).
100 NonConstSpaceBeg = address_range.InitAligned(
101 TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
102 CHECK_NE(NonConstSpaceBeg, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((NonConstSpaceBeg
)); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 102, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")"
, v1, v2); } while (false)
;
103 }
104 RegionInfoSpace = SpaceEnd();
105 MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
106 "SizeClassAllocator: region info");
107 }
108 SetReleaseToOSIntervalMs(release_to_os_interval_ms);
109 // Check that the RegionInfo array is aligned on the CacheLine size.
110 DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
111 }
112
113 s32 ReleaseToOSIntervalMs() const {
114 return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
115 }
116
117 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
118 atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
119 memory_order_relaxed);
120 }
121
122 void ForceReleaseToOS() {
123 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
124 BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
125 MaybeReleaseToOS(class_id, true /*force*/);
126 }
127 }
128
129 static bool CanAllocate(uptr size, uptr alignment) {
130 return size <= SizeClassMap::kMaxSize &&
131 alignment <= SizeClassMap::kMaxSize;
132 }
133
134 NOINLINE__attribute__((noinline)) void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
135 const CompactPtrT *chunks, uptr n_chunks) {
136 RegionInfo *region = GetRegionInfo(class_id);
137 uptr region_beg = GetRegionBeginBySizeClass(class_id);
138 CompactPtrT *free_array = GetFreeArray(region_beg);
139
140 BlockingMutexLock l(&region->mutex);
141 uptr old_num_chunks = region->num_freed_chunks;
142 uptr new_num_freed_chunks = old_num_chunks + n_chunks;
143 // Failure to allocate free array space while releasing memory is non
144 // recoverable.
145 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
146 new_num_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
) {
147 Report("FATAL: Internal error: %s's allocator exhausted the free list "
148 "space for size class %zd (%zd bytes).\n", SanitizerToolName,
149 class_id, ClassIdToSize(class_id));
150 Die();
151 }
152 for (uptr i = 0; i < n_chunks; i++)
153 free_array[old_num_chunks + i] = chunks[i];
154 region->num_freed_chunks = new_num_freed_chunks;
155 region->stats.n_freed += n_chunks;
156
157 MaybeReleaseToOS(class_id, false /*force*/);
158 }
159
160 NOINLINE__attribute__((noinline)) bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
161 CompactPtrT *chunks, uptr n_chunks) {
162 RegionInfo *region = GetRegionInfo(class_id);
163 uptr region_beg = GetRegionBeginBySizeClass(class_id);
164 CompactPtrT *free_array = GetFreeArray(region_beg);
165
166 BlockingMutexLock l(&region->mutex);
167#if SANITIZER_WINDOWS0
168 /* On Windows unmapping of memory during __sanitizer_purge_allocator is
169 explicit and immediate, so unmapped regions must be explicitly mapped back
170 in when they are accessed again. */
171 if (region->rtoi.last_released_bytes > 0) {
172 MmapFixedOrDie(region_beg, region->mapped_user,
173 "SizeClassAllocator: region data");
174 region->rtoi.n_freed_at_last_release = 0;
175 region->rtoi.last_released_bytes = 0;
176 }
177#endif
178 if (UNLIKELY(region->num_freed_chunks < n_chunks)__builtin_expect(!!(region->num_freed_chunks < n_chunks
), 0)
) {
179 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
180 n_chunks - region->num_freed_chunks))__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
)
181 return false;
182 CHECK_GE(region->num_freed_chunks, n_chunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->num_freed_chunks
)); __sanitizer::u64 v2 = (__sanitizer::u64)((n_chunks)); if (
__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 182, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)"
")", v1, v2); } while (false)
;
183 }
184 region->num_freed_chunks -= n_chunks;
185 uptr base_idx = region->num_freed_chunks;
186 for (uptr i = 0; i < n_chunks; i++)
187 chunks[i] = free_array[base_idx + i];
188 region->stats.n_allocated += n_chunks;
189 return true;
190 }
191
192 bool PointerIsMine(const void *p) const {
193 uptr P = reinterpret_cast<uptr>(p);
194 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
195 return P / kSpaceSize == kSpaceBeg / kSpaceSize;
196 return P >= SpaceBeg() && P < SpaceEnd();
197 }
198
199 uptr GetRegionBegin(const void *p) {
200 if (kUsingConstantSpaceBeg)
201 return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
202 uptr space_beg = SpaceBeg();
203 return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
204 space_beg;
205 }
206
207 uptr GetRegionBeginBySizeClass(uptr class_id) const {
208 return SpaceBeg() + kRegionSize * class_id;
209 }
210
211 uptr GetSizeClass(const void *p) {
212 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
213 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
214 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
215 kNumClassesRounded;
216 }
217
218 void *GetBlockBegin(const void *p) {
219 uptr class_id = GetSizeClass(p);
220 if (class_id >= kNumClasses) return nullptr;
221 uptr size = ClassIdToSize(class_id);
222 if (!size) return nullptr;
223 uptr chunk_idx = GetChunkIdx((uptr)p, size);
224 uptr reg_beg = GetRegionBegin(p);
225 uptr beg = chunk_idx * size;
226 uptr next_beg = beg + size;
227 const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
228 if (region->mapped_user >= next_beg)
229 return reinterpret_cast<void*>(reg_beg + beg);
230 return nullptr;
231 }
232
233 uptr GetActuallyAllocatedSize(void *p) {
234 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 234, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
235 return ClassIdToSize(GetSizeClass(p));
236 }
237
238 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
239
240 void *GetMetaData(const void *p) {
241 CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize)
); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 241, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2);
} while (false)
;
242 uptr class_id = GetSizeClass(p);
243 uptr size = ClassIdToSize(class_id);
244 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
245 uptr region_beg = GetRegionBeginBySizeClass(class_id);
246 return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
247 (1 + chunk_idx) * kMetadataSize);
248 }
249
250 uptr TotalMemoryUsed() {
251 uptr res = 0;
252 for (uptr i = 0; i < kNumClasses; i++)
253 res += GetRegionInfo(i)->allocated_user;
254 return res;
255 }
256
257 // Test-only.
258 void TestOnlyUnmap() {
259 UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
260 }
261
262 static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
263 uptr stats_size) {
264 for (uptr class_id = 0; class_id < stats_size; class_id++)
265 if (stats[class_id] == start)
266 stats[class_id] = rss;
267 }
268
269 void PrintStats(uptr class_id, uptr rss) {
270 RegionInfo *region = GetRegionInfo(class_id);
271 if (region->mapped_user == 0) return;
272 uptr in_use = region->stats.n_allocated - region->stats.n_freed;
273 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
274 Printf(
275 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
276 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
277 "last released: %6zdK region: 0x%zx\n",
278 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
279 region->mapped_user >> 10, region->stats.n_allocated,
280 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
281 rss >> 10, region->rtoi.num_releases,
282 region->rtoi.last_released_bytes >> 10,
283 SpaceBeg() + kRegionSize * class_id);
284 }
285
286 void PrintStats() {
287 uptr rss_stats[kNumClasses];
288 for (uptr class_id = 0; class_id < kNumClasses; class_id++)
289 rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
290 GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
291
292 uptr total_mapped = 0;
293 uptr total_rss = 0;
294 uptr n_allocated = 0;
295 uptr n_freed = 0;
296 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
297 RegionInfo *region = GetRegionInfo(class_id);
298 if (region->mapped_user != 0) {
299 total_mapped += region->mapped_user;
300 total_rss += rss_stats[class_id];
301 }
302 n_allocated += region->stats.n_allocated;
303 n_freed += region->stats.n_freed;
304 }
305
306 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
307 "%zd allocations; remains %zd\n", total_mapped >> 20,
308 total_rss >> 20, n_allocated, n_allocated - n_freed);
309 for (uptr class_id = 1; class_id < kNumClasses; class_id++)
310 PrintStats(class_id, rss_stats[class_id]);
311 }
312
313 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
314 // introspection API.
315 void ForceLock() {
316 for (uptr i = 0; i < kNumClasses; i++) {
317 GetRegionInfo(i)->mutex.Lock();
318 }
319 }
320
321 void ForceUnlock() {
322 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
323 GetRegionInfo(i)->mutex.Unlock();
324 }
325 }
326
327 // Iterate over all existing chunks.
328 // The allocator must be locked when calling this function.
329 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
330 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
331 RegionInfo *region = GetRegionInfo(class_id);
332 uptr chunk_size = ClassIdToSize(class_id);
333 uptr region_beg = SpaceBeg() + class_id * kRegionSize;
334 uptr region_allocated_user_size =
335 AddressSpaceView::Load(region)->allocated_user;
336 for (uptr chunk = region_beg;
337 chunk < region_beg + region_allocated_user_size;
338 chunk += chunk_size) {
339 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
340 callback(chunk, arg);
341 }
342 }
343 }
344
345 static uptr ClassIdToSize(uptr class_id) {
346 return SizeClassMap::Size(class_id);
25
Calling 'SizeClassMap::Size'
347 }
348
349 static uptr AdditionalSize() {
350 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
351 GetPageSizeCached());
352 }
353
354 typedef SizeClassMap SizeClassMapT;
355 static const uptr kNumClasses = SizeClassMap::kNumClasses;
356 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
357
358 // A packed array of counters. Each counter occupies 2^n bits, enough to store
359 // counter's max_value. Ctor will try to allocate the required buffer via
360 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
361 // whether the initialization was successful by checking IsAllocated() result.
362 // For the performance sake, none of the accessors check the validity of the
363 // arguments, it is assumed that index is always in [0, n) range and the value
364 // is not incremented past max_value.
365 template<class MemoryMapperT>
366 class PackedCounterArray {
367 public:
368 PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
369 : n(num_counters), memory_mapper(mapper) {
370 CHECK_GT(num_counters, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((num_counters))
; __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 370, "(" "(num_counters)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
371 CHECK_GT(max_value, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((max_value)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 371, "(" "(max_value)" ") " ">" " (" "(0)" ")", v1, v2);
} while (false)
;
372 constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
373 // Rounding counter storage size up to the power of two allows for using
374 // bit shifts calculating particular counter's index and offset.
375 uptr counter_size_bits =
376 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
377 CHECK_LE(counter_size_bits, kMaxCounterBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((counter_size_bits
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kMaxCounterBits
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 377, "(" "(counter_size_bits)" ") " "<=" " (" "(kMaxCounterBits)"
")", v1, v2); } while (false)
;
378 counter_size_bits_log = Log2(counter_size_bits);
379 counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
380
381 uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
382 CHECK_GT(packing_ratio, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((packing_ratio)
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 382, "(" "(packing_ratio)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
383 packing_ratio_log = Log2(packing_ratio);
384 bit_offset_mask = packing_ratio - 1;
385
386 buffer_size =
387 (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
388 sizeof(*buffer);
389 buffer = reinterpret_cast<u64*>(
390 memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
391 }
392 ~PackedCounterArray() {
393 if (buffer) {
394 memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
395 }
396 }
397
398 bool IsAllocated() const {
399 return !!buffer;
400 }
401
402 u64 GetCount() const {
403 return n;
404 }
405
406 uptr Get(uptr i) const {
407 DCHECK_LT(i, n);
408 uptr index = i >> packing_ratio_log;
409 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
410 return (buffer[index] >> bit_offset) & counter_mask;
411 }
412
413 void Inc(uptr i) const {
414 DCHECK_LT(Get(i), counter_mask);
415 uptr index = i >> packing_ratio_log;
416 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
417 buffer[index] += 1ULL << bit_offset;
418 }
419
420 void IncRange(uptr from, uptr to) const {
421 DCHECK_LE(from, to);
422 for (uptr i = from; i <= to; i++)
423 Inc(i);
424 }
425
426 private:
427 const u64 n;
428 u64 counter_size_bits_log;
429 u64 counter_mask;
430 u64 packing_ratio_log;
431 u64 bit_offset_mask;
432
433 MemoryMapperT* const memory_mapper;
434 u64 buffer_size;
435 u64* buffer;
436 };
437
438 template<class MemoryMapperT>
439 class FreePagesRangeTracker {
440 public:
441 explicit FreePagesRangeTracker(MemoryMapperT* mapper)
442 : memory_mapper(mapper),
443 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
444 in_the_range(false), current_page(0), current_range_start_page(0) {}
445
446 void NextPage(bool freed) {
447 if (freed) {
448 if (!in_the_range) {
449 current_range_start_page = current_page;
450 in_the_range = true;
451 }
452 } else {
453 CloseOpenedRange();
454 }
455 current_page++;
456 }
457
458 void Done() {
459 CloseOpenedRange();
460 }
461
462 private:
463 void CloseOpenedRange() {
464 if (in_the_range) {
465 memory_mapper->ReleasePageRangeToOS(
466 current_range_start_page << page_size_scaled_log,
467 current_page << page_size_scaled_log);
468 in_the_range = false;
469 }
470 }
471
472 MemoryMapperT* const memory_mapper;
473 const uptr page_size_scaled_log;
474 bool in_the_range;
475 uptr current_page;
476 uptr current_range_start_page;
477 };
478
479 // Iterates over the free_array to identify memory pages containing freed
480 // chunks only and returns these pages back to OS.
481 // allocated_pages_count is the total number of pages allocated for the
482 // current bucket.
483 template<class MemoryMapperT>
484 static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
485 uptr free_array_count, uptr chunk_size,
486 uptr allocated_pages_count,
487 MemoryMapperT *memory_mapper) {
488 const uptr page_size = GetPageSizeCached();
489
490 // Figure out the number of chunks per page and whether we can take a fast
491 // path (the number of chunks per page is the same for all pages).
492 uptr full_pages_chunk_count_max;
493 bool same_chunk_count_per_page;
494 if (chunk_size <= page_size && page_size % chunk_size == 0) {
495 // Same number of chunks per page, no cross overs.
496 full_pages_chunk_count_max = page_size / chunk_size;
497 same_chunk_count_per_page = true;
498 } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
499 chunk_size % (page_size % chunk_size) == 0) {
500 // Some chunks are crossing page boundaries, which means that the page
501 // contains one or two partial chunks, but all pages contain the same
502 // number of chunks.
503 full_pages_chunk_count_max = page_size / chunk_size + 1;
504 same_chunk_count_per_page = true;
505 } else if (chunk_size <= page_size) {
506 // Some chunks are crossing page boundaries, which means that the page
507 // contains one or two partial chunks.
508 full_pages_chunk_count_max = page_size / chunk_size + 2;
509 same_chunk_count_per_page = false;
510 } else if (chunk_size > page_size && chunk_size % page_size == 0) {
511 // One chunk covers multiple pages, no cross overs.
512 full_pages_chunk_count_max = 1;
513 same_chunk_count_per_page = true;
514 } else if (chunk_size > page_size) {
515 // One chunk covers multiple pages, Some chunks are crossing page
516 // boundaries. Some pages contain one chunk, some contain two.
517 full_pages_chunk_count_max = 2;
518 same_chunk_count_per_page = false;
519 } else {
520 UNREACHABLE("All chunk_size/page_size ratios must be handled.")do { do { __sanitizer::u64 v1 = (__sanitizer::u64)((0 &&
"All chunk_size/page_size ratios must be handled.")); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 520, "(" "(0 && \"All chunk_size/page_size ratios must be handled.\")"
") " "!=" " (" "0" ")", v1, v2); } while (false); Die(); } while
(0)
;
521 }
522
523 PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
524 full_pages_chunk_count_max,
525 memory_mapper);
526 if (!counters.IsAllocated())
527 return;
528
529 const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
530 const uptr page_size_scaled = page_size >> kCompactPtrScale;
531 const uptr page_size_scaled_log = Log2(page_size_scaled);
532
533 // Iterate over free chunks and count how many free chunks affect each
534 // allocated page.
535 if (chunk_size <= page_size && page_size % chunk_size == 0) {
536 // Each chunk affects one page only.
537 for (uptr i = 0; i < free_array_count; i++)
538 counters.Inc(free_array[i] >> page_size_scaled_log);
539 } else {
540 // In all other cases chunks might affect more than one page.
541 for (uptr i = 0; i < free_array_count; i++) {
542 counters.IncRange(
543 free_array[i] >> page_size_scaled_log,
544 (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
545 }
546 }
547
548 // Iterate over pages detecting ranges of pages with chunk counters equal
549 // to the expected number of chunks for the particular page.
550 FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
551 if (same_chunk_count_per_page) {
552 // Fast path, every page has the same number of chunks affecting it.
553 for (uptr i = 0; i < counters.GetCount(); i++)
554 range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
555 } else {
556 // Show path, go through the pages keeping count how many chunks affect
557 // each page.
558 const uptr pn =
559 chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
560 const uptr pnc = pn * chunk_size_scaled;
561 // The idea is to increment the current page pointer by the first chunk
562 // size, middle portion size (the portion of the page covered by chunks
563 // except the first and the last one) and then the last chunk size, adding
564 // up the number of chunks on the current page and checking on every step
565 // whether the page boundary was crossed.
566 uptr prev_page_boundary = 0;
567 uptr current_boundary = 0;
568 for (uptr i = 0; i < counters.GetCount(); i++) {
569 uptr page_boundary = prev_page_boundary + page_size_scaled;
570 uptr chunks_per_page = pn;
571 if (current_boundary < page_boundary) {
572 if (current_boundary > prev_page_boundary)
573 chunks_per_page++;
574 current_boundary += pnc;
575 if (current_boundary < page_boundary) {
576 chunks_per_page++;
577 current_boundary += chunk_size_scaled;
578 }
579 }
580 prev_page_boundary = page_boundary;
581
582 range_tracker.NextPage(counters.Get(i) == chunks_per_page);
583 }
584 }
585 range_tracker.Done();
586 }
587
588 private:
589 friend class MemoryMapper;
590
591 ReservedAddressRange address_range;
592
593 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
594 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
595 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
596 // elements, but in reality this will not happen. For simplicity we
597 // dedicate 1/8 of the region's virtual space to FreeArray.
598 static const uptr kFreeArraySize = kRegionSize / 8;
599
600 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
601 uptr NonConstSpaceBeg;
602 uptr SpaceBeg() const {
603 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
604 }
605 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
606 // kRegionSize must be >= 2^32.
607 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)))static_assert((kRegionSize) >= (1ULL << (64 / 2)), ""
)
;
608 // kRegionSize must be <= 2^36, see CompactPtrT.
609 COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)))static_assert((kRegionSize) <= (1ULL << (64 / 2 + 4)
), "")
;
610 // Call mmap for user memory with at least this size.
611 static const uptr kUserMapSize = 1 << 16;
612 // Call mmap for metadata memory with at least this size.
613 static const uptr kMetaMapSize = 1 << 16;
614 // Call mmap for free array memory with at least this size.
615 static const uptr kFreeArrayMapSize = 1 << 16;
616
617 atomic_sint32_t release_to_os_interval_ms_;
618
619 uptr RegionInfoSpace;
620
621 // True if the user has already mapped the entire heap R/W.
622 bool PremappedHeap;
623
624 struct Stats {
625 uptr n_allocated;
626 uptr n_freed;
627 };
628
629 struct ReleaseToOsInfo {
630 uptr n_freed_at_last_release;
631 uptr num_releases;
632 u64 last_release_at_ns;
633 u64 last_released_bytes;
634 };
635
636 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) RegionInfo {
637 BlockingMutex mutex;
638 uptr num_freed_chunks; // Number of elements in the freearray.
639 uptr mapped_free_array; // Bytes mapped for freearray.
640 uptr allocated_user; // Bytes allocated for user memory.
641 uptr allocated_meta; // Bytes allocated for metadata.
642 uptr mapped_user; // Bytes mapped for user memory.
643 uptr mapped_meta; // Bytes mapped for metadata.
644 u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
645 bool exhausted; // Whether region is out of space for new chunks.
646 Stats stats;
647 ReleaseToOsInfo rtoi;
648 };
649 COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0)static_assert(sizeof(RegionInfo) % kCacheLineSize == 0, "");
650
651 RegionInfo *GetRegionInfo(uptr class_id) const {
652 DCHECK_LT(class_id, kNumClasses);
653 RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
654 return &regions[class_id];
655 }
656
657 uptr GetMetadataEnd(uptr region_beg) const {
658 return region_beg + kRegionSize - kFreeArraySize;
659 }
660
661 uptr GetChunkIdx(uptr chunk, uptr size) const {
662 if (!kUsingConstantSpaceBeg)
663 chunk -= SpaceBeg();
664
665 uptr offset = chunk % kRegionSize;
666 // Here we divide by a non-constant. This is costly.
667 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
668 if (offset >> (SANITIZER_WORDSIZE64 / 2))
669 return offset / size;
670 return (u32)offset / (u32)size;
671 }
672
673 CompactPtrT *GetFreeArray(uptr region_beg) const {
674 return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
675 }
676
677 bool MapWithCallback(uptr beg, uptr size, const char *name) {
678 if (PremappedHeap)
679 return beg >= NonConstSpaceBeg &&
680 beg + size <= NonConstSpaceBeg + kSpaceSize;
681 uptr mapped = address_range.Map(beg, size, name);
682 if (UNLIKELY(!mapped)__builtin_expect(!!(!mapped), 0))
683 return false;
684 CHECK_EQ(beg, mapped)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((mapped)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 684, "(" "(beg)" ") " "==" " (" "(mapped)" ")", v1, v2); } while
(false)
;
685 MapUnmapCallback().OnMap(beg, size);
686 return true;
687 }
688
689 void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
690 if (PremappedHeap) {
691 CHECK_GE(beg, NonConstSpaceBeg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((NonConstSpaceBeg)); if (__builtin_expect
(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 691, "(" "(beg)" ") " ">=" " (" "(NonConstSpaceBeg)" ")"
, v1, v2); } while (false)
;
692 CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg + size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((NonConstSpaceBeg + kSpaceSize)
); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 692, "(" "(beg + size)" ") " "<=" " (" "(NonConstSpaceBeg + kSpaceSize)"
")", v1, v2); } while (false)
;
693 return;
694 }
695 CHECK_EQ(beg, address_range.MapOrDie(beg, size, name))do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie(beg, size
, name))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer
::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 695, "(" "(beg)" ") " "==" " (" "(address_range.MapOrDie(beg, size, name))"
")", v1, v2); } while (false)
;
696 MapUnmapCallback().OnMap(beg, size);
697 }
698
699 void UnmapWithCallbackOrDie(uptr beg, uptr size) {
700 if (PremappedHeap)
701 return;
702 MapUnmapCallback().OnUnmap(beg, size);
703 address_range.Unmap(beg, size);
704 }
705
706 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
707 uptr num_freed_chunks) {
708 uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
709 if (region->mapped_free_array < needed_space) {
710 uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
711 CHECK_LE(new_mapped_free_array, kFreeArraySize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_mapped_free_array
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kFreeArraySize)
); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 711, "(" "(new_mapped_free_array)" ") " "<=" " (" "(kFreeArraySize)"
")", v1, v2); } while (false)
;
712 uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
713 region->mapped_free_array;
714 uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
715 if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size
, "SizeClassAllocator: freearray")), 0)
716 "SizeClassAllocator: freearray"))__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size
, "SizeClassAllocator: freearray")), 0)
)
717 return false;
718 region->mapped_free_array = new_mapped_free_array;
719 }
720 return true;
721 }
722
723 // Check whether this size class is exhausted.
724 bool IsRegionExhausted(RegionInfo *region, uptr class_id,
725 uptr additional_map_size) {
726 if (LIKELY(region->mapped_user + region->mapped_meta +__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
727 additional_map_size <= kRegionSize - kFreeArraySize)__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
)
728 return false;
729 if (!region->exhausted) {
730 region->exhausted = true;
731 Printf("%s: Out of memory. ", SanitizerToolName);
732 Printf("The process has exhausted %zuMB for size class %zu.\n",
733 kRegionSize >> 20, ClassIdToSize(class_id));
734 }
735 return true;
736 }
737
738 NOINLINE__attribute__((noinline)) bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
739 RegionInfo *region, uptr requested_count) {
740 // region->mutex is held.
741 const uptr region_beg = GetRegionBeginBySizeClass(class_id);
742 const uptr size = ClassIdToSize(class_id);
743
744 const uptr total_user_bytes =
745 region->allocated_user + requested_count * size;
746 // Map more space for chunks, if necessary.
747 if (LIKELY(total_user_bytes > region->mapped_user)__builtin_expect(!!(total_user_bytes > region->mapped_user
), 1)
) {
748 if (UNLIKELY(region->mapped_user == 0)__builtin_expect(!!(region->mapped_user == 0), 0)) {
749 if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
750 // The random state is initialized from ASLR.
751 region->rand_state = static_cast<u32>(region_beg >> 12);
752 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
753 // preventing just allocated memory from being released sooner than
754 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
755 // for short lived processes.
756 // Do it only when the feature is turned on, to avoid a potentially
757 // extraneous syscall.
758 if (ReleaseToOSIntervalMs() >= 0)
759 region->rtoi.last_release_at_ns = MonotonicNanoTime();
760 }
761 // Do the mmap for the user memory.
762 const uptr user_map_size =
763 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
764 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, user_map_size
)), 0)
)
765 return false;
766 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
767 user_map_size,__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
768 "SizeClassAllocator: region data"))__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size, "SizeClassAllocator: region data"
)), 0)
)
769 return false;
770 stat->Add(AllocatorStatMapped, user_map_size);
771 region->mapped_user += user_map_size;
772 }
773 const uptr new_chunks_count =
774 (region->mapped_user - region->allocated_user) / size;
775
776 if (kMetadataSize) {
777 // Calculate the required space for metadata.
778 const uptr total_meta_bytes =
779 region->allocated_meta + new_chunks_count * kMetadataSize;
780 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
781 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
782 // Map more space for metadata, if necessary.
783 if (meta_map_size) {
784 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, meta_map_size
)), 0)
)
785 return false;
786 if (UNLIKELY(!MapWithCallback(__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
787 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
788 meta_map_size, "SizeClassAllocator: region metadata"))__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata"
)), 0)
)
789 return false;
790 region->mapped_meta += meta_map_size;
791 }
792 }
793
794 // If necessary, allocate more space for the free array and populate it with
795 // newly allocated chunks.
796 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
797 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
total_freed_chunks)), 0)
)
798 return false;
799 CompactPtrT *free_array = GetFreeArray(region_beg);
800 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
801 i++, chunk += size)
802 free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
803 if (kRandomShuffleChunks)
804 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
805 &region->rand_state);
806
807 // All necessary memory is mapped and now it is safe to advance all
808 // 'allocated_*' counters.
809 region->num_freed_chunks += new_chunks_count;
810 region->allocated_user += new_chunks_count * size;
811 CHECK_LE(region->allocated_user, region->mapped_user)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_user
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_user
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 811, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)"
")", v1, v2); } while (false)
;
812 region->allocated_meta += new_chunks_count * kMetadataSize;
813 CHECK_LE(region->allocated_meta, region->mapped_meta)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_meta
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_meta
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 813, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)"
")", v1, v2); } while (false)
;
814 region->exhausted = false;
815
816 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
817 // MaybeReleaseToOS from releasing just allocated pages or protect these
818 // not yet used chunks some other way.
819
820 return true;
821 }
822
823 class MemoryMapper {
824 public:
825 MemoryMapper(const ThisT& base_allocator, uptr class_id)
826 : allocator(base_allocator),
827 region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
828 released_ranges_count(0),
829 released_bytes(0) {
830 }
831
832 uptr GetReleasedRangesCount() const {
833 return released_ranges_count;
834 }
835
836 uptr GetReleasedBytes() const {
837 return released_bytes;
838 }
839
840 void *MapPackedCounterArrayBuffer(uptr buffer_size) {
841 // TODO(alekseyshl): The idea to explore is to check if we have enough
842 // space between num_freed_chunks*sizeof(CompactPtrT) and
843 // mapped_free_array to fit buffer_size bytes and use that space instead
844 // of mapping a temporary one.
845 return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
846 }
847
848 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
849 UnmapOrDie(buffer, buffer_size);
850 }
851
852 // Releases [from, to) range of pages back to OS.
853 void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
854 const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
855 const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
856 ReleaseMemoryPagesToOS(from_page, to_page);
857 released_ranges_count++;
858 released_bytes += to_page - from_page;
859 }
860
861 private:
862 const ThisT& allocator;
863 const uptr region_base;
864 uptr released_ranges_count;
865 uptr released_bytes;
866 };
867
868 // Attempts to release RAM occupied by freed chunks back to OS. The region is
869 // expected to be locked.
870 //
871 // TODO(morehouse): Support a callback on memory release so HWASan can release
872 // aliases as well.
873 void MaybeReleaseToOS(uptr class_id, bool force) {
874 RegionInfo *region = GetRegionInfo(class_id);
875 const uptr chunk_size = ClassIdToSize(class_id);
876 const uptr page_size = GetPageSizeCached();
877
878 uptr n = region->num_freed_chunks;
879 if (n * chunk_size < page_size)
880 return; // No chance to release anything.
881 if ((region->stats.n_freed -
882 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
883 return; // Nothing new to release.
884 }
885
886 if (!force) {
887 s32 interval_ms = ReleaseToOSIntervalMs();
888 if (interval_ms < 0)
889 return;
890
891 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
892 MonotonicNanoTime()) {
893 return; // Memory was returned recently.
894 }
895 }
896
897 MemoryMapper memory_mapper(*this, class_id);
898
899 ReleaseFreeMemoryToOS<MemoryMapper>(
900 GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
901 RoundUpTo(region->allocated_user, page_size) / page_size,
902 &memory_mapper);
903
904 if (memory_mapper.GetReleasedRangesCount() > 0) {
905 region->rtoi.n_freed_at_last_release = region->stats.n_freed;
906 region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
907 region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
908 }
909 region->rtoi.last_release_at_ns = MonotonicNanoTime();
910 }
911};

/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h

1//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// SizeClassMap maps allocation sizes into size classes and back.
17// Class 0 always corresponds to size 0.
18// The other sizes are controlled by the template parameters:
19// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
20// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
21// kMidSizeLog: the classes starting from 1 increase with step
22// 2^kMinSizeLog until 2^kMidSizeLog.
23// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
24// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
25// look like 0b1xx0..0, where x is either 0 or 1.
26//
27// Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
28//
29// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
30// Next 4 classes: 256 + i * 64 (i = 1 to 4).
31// Next 4 classes: 512 + i * 128 (i = 1 to 4).
32// ...
33// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
34// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
35//
36// This structure of the size class map gives us:
37// - Efficient table-free class-to-size and size-to-class functions.
38// - Difference between two consequent size classes is between 14% and 25%
39//
40// This class also gives a hint to a thread-caching allocator about the amount
41// of chunks that need to be cached per-thread:
42// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
43// The actual number is computed in TransferBatch.
44// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
45//
46// Part of output of SizeClassMap::Print():
47// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
48// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
49// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
50// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
51// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
52// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
53// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
54// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
55//
56// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
57// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
58// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
59// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
60// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
61// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
62// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
63// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
64//
65// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
66// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
67// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
68// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
69//
70// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
71// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
72// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
73// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
74//
75// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
76// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
77// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
78// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
79//
80// ...
81//
82// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
83// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
84// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
85// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
86//
87// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
88//
89//
90// Another example (kNumBits=2):
91// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
92// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
93// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
94// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
95// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
96// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
97// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
98// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
99// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
100// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
101// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
102// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
103// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
104// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
105// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
106// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
107// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
108// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
109// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
110// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
111// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
112// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
113// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
114// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
115// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
116// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
117// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
118
119template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
120 uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
121class SizeClassMap {
122 static const uptr kMinSize = 1 << kMinSizeLog;
123 static const uptr kMidSize = 1 << kMidSizeLog;
124 static const uptr kMidClass = kMidSize / kMinSize;
125 static const uptr S = kNumBits - 1;
126 static const uptr M = (1 << S) - 1;
127
128 public:
129 // kMaxNumCachedHintT is a power of two. It serves as a hint
130 // for the size of TransferBatch, the actual size could be a bit smaller.
131 static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
132 COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)static_assert((kMaxNumCachedHint & (kMaxNumCachedHint - 1
)) == 0, "")
;
133
134 static const uptr kMaxSize = 1UL << kMaxSizeLog;
135 static const uptr kNumClasses =
136 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
137 static const uptr kLargestClassID = kNumClasses - 2;
138 static const uptr kBatchClassID = kNumClasses - 1;
139 COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)static_assert(kNumClasses >= 16 && kNumClasses <=
256, "")
;
140 static const uptr kNumClassesRounded =
141 kNumClasses <= 32 ? 32 :
142 kNumClasses <= 64 ? 64 :
143 kNumClasses <= 128 ? 128 : 256;
144
145 static uptr Size(uptr class_id) {
146 // Estimate the result for kBatchClassID because this class does not know
147 // the exact size of TransferBatch. It's OK since we are using the actual
148 // sizeof(TransferBatch) where it matters.
149 if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0))
26
Assuming 'class_id' is not equal to 'kBatchClassID'
27
Taking false branch
150 return kMaxNumCachedHint * sizeof(uptr);
151 if (class_id <= kMidClass)
28
Assuming 'class_id' is > 'kMidClass'
29
Taking false branch
152 return kMinSize * class_id;
153 class_id -= kMidClass;
154 uptr t = kMidSize << (class_id >> S);
30
The result of the left shift is undefined due to shifting '256' by '4611686018427387900', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'
155 return t + (t >> S) * (class_id & M);
156 }
157
158 static uptr ClassID(uptr size) {
159 if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0))
160 return 0;
161 if (size <= kMidSize)
162 return (size + kMinSize - 1) >> kMinSizeLog;
163 const uptr l = MostSignificantSetBitIndex(size);
164 const uptr hbits = (size >> (l - S)) & M;
165 const uptr lbits = size & ((1U << (l - S)) - 1);
166 const uptr l1 = l - kMidSizeLog;
167 return kMidClass + (l1 << S) + hbits + (lbits > 0);
168 }
169
170 static uptr MaxCachedHint(uptr size) {
171 DCHECK_LE(size, kMaxSize);
172 if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0))
173 return 0;
174 uptr n;
175 // Force a 32-bit division if the template parameters allow for it.
176 if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
177 n = (1UL << kMaxBytesCachedLog) / size;
178 else
179 n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
180 return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
181 }
182
183 static void Print() {
184 uptr prev_s = 0;
185 uptr total_cached = 0;
186 for (uptr i = 0; i < kNumClasses; i++) {
187 uptr s = Size(i);
188 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
189 Printf("\n");
190 uptr d = s - prev_s;
191 uptr p = prev_s ? (d * 100 / prev_s) : 0;
192 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
193 uptr cached = MaxCachedHint(s) * s;
194 if (i == kBatchClassID)
195 d = p = l = 0;
196 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
197 "cached: %zd %zd; id %zd\n",
198 i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
199 total_cached += cached;
200 prev_s = s;
201 }
202 Printf("Total cached: %zd\n", total_cached);
203 }
204
205 static void Validate() {
206 for (uptr c = 1; c < kNumClasses; c++) {
207 // Printf("Validate: c%zd\n", c);
208 uptr s = Size(c);
209 CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!(
!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 209, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (
false)
;
210 if (c == kBatchClassID)
211 continue;
212 CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer
::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 212, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); }
while (false)
;
213 if (c < kLargestClassID)
214 CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 214, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1
, v2); } while (false)
;
215 CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 215, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2
); } while (false)
;
216 CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 216, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1
, v2); } while (false)
;
217 }
218 CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize
+ 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 218, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")"
, v1, v2); } while (false)
;
219
220 for (uptr s = 1; s <= kMaxSize; s++) {
221 uptr c = ClassID(s);
222 // Printf("s%zd => c%zd\n", s, c);
223 CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 223, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2
); } while (false)
;
224 CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(!
(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 224, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); }
while (false)
;
225 if (c > 0)
226 CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1)));
__sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-13~++20210615111110+88da6c1ead3f/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 226, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2
); } while (false)
;
227 }
228 }
229};
230
231typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
232typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
233typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
234
235// The following SizeClassMap only holds a way small number of cached entries,
236// allowing for denser per-class arrays, smaller memory footprint and usually
237// better performances in threaded environments.
238typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
239// Similar to VeryCompact map above, this one has a small number of different
240// size classes, and also reduced thread-local caches.
241typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;