Bug Summary

File:compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h
Warning:line 154, column 23
The result of the left shift is undefined due to shifting '256' by '1073741820', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name scudo_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=all -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu i686 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/projects/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/.. -D GWP_ASAN_HOOKS -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/projects/compiler-rt/lib/scudo -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-17-195756-12974-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/scudo_allocator.cpp

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/scudo_allocator.cpp

1//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// Scudo Hardened Allocator implementation.
10/// It uses the sanitizer_common allocator as a base and aims at mitigating
11/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
12/// header, a delayed free list, and additional sanity checks.
13///
14//===----------------------------------------------------------------------===//
15
16#include "scudo_allocator.h"
17#include "scudo_crc32.h"
18#include "scudo_errors.h"
19#include "scudo_flags.h"
20#include "scudo_interface_internal.h"
21#include "scudo_tsd.h"
22#include "scudo_utils.h"
23
24#include "sanitizer_common/sanitizer_allocator_checks.h"
25#include "sanitizer_common/sanitizer_allocator_interface.h"
26#include "sanitizer_common/sanitizer_quarantine.h"
27
28#ifdef GWP_ASAN_HOOKS1
29# include "gwp_asan/guarded_pool_allocator.h"
30# include "gwp_asan/optional/backtrace.h"
31# include "gwp_asan/optional/options_parser.h"
32#include "gwp_asan/optional/segv_handler.h"
33#endif // GWP_ASAN_HOOKS
34
35#include <errno(*__errno_location ()).h>
36#include <string.h>
37
38namespace __scudo {
39
40// Global static cookie, initialized at start-up.
41static u32 Cookie;
42
43// We default to software CRC32 if the alternatives are not supported, either
44// at compilation or at runtime.
45static atomic_uint8_t HashAlgorithm = { CRC32Software };
46
47INLINEinline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
48 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
49 // as opposed to only for scudo_crc32.cpp. This means that other hardware
50 // specific instructions were likely emitted at other places, and as a
51 // result there is no reason to not use it here.
52#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
53 Crc = CRC32_INTRINSIC(Crc, Value);
54 for (uptr i = 0; i < ArraySize; i++)
55 Crc = CRC32_INTRINSIC(Crc, Array[i]);
56 return Crc;
57#else
58 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
59 Crc = computeHardwareCRC32(Crc, Value);
60 for (uptr i = 0; i < ArraySize; i++)
61 Crc = computeHardwareCRC32(Crc, Array[i]);
62 return Crc;
63 }
64 Crc = computeSoftwareCRC32(Crc, Value);
65 for (uptr i = 0; i < ArraySize; i++)
66 Crc = computeSoftwareCRC32(Crc, Array[i]);
67 return Crc;
68#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
69}
70
71static BackendT &getBackend();
72
73namespace Chunk {
74 static INLINEinline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
75 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
76 getHeaderSize());
77 }
78 static INLINEinline
79 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
80 return reinterpret_cast<const AtomicPackedHeader *>(
81 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
82 }
83
84 static INLINEinline bool isAligned(const void *Ptr) {
85 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
86 }
87
88 // We can't use the offset member of the chunk itself, as we would double
89 // fetch it without any warranty that it wouldn't have been tampered. To
90 // prevent this, we work with a local copy of the header.
91 static INLINEinline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
92 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
93 getHeaderSize() - (Header->Offset << MinAlignmentLog));
94 }
95
96 // Returns the usable size for a chunk, meaning the amount of bytes from the
97 // beginning of the user data to the end of the backend allocated chunk.
98 static INLINEinline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
99 const uptr ClassId = Header->ClassId;
100 if (ClassId)
101 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
102 (Header->Offset << MinAlignmentLog);
103 return SecondaryT::GetActuallyAllocatedSize(
104 getBackendPtr(Ptr, Header)) - getHeaderSize();
105 }
106
107 // Returns the size the user requested when allocating the chunk.
108 static INLINEinline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
109 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
110 if (Header->ClassId)
111 return SizeOrUnusedBytes;
112 return SecondaryT::GetActuallyAllocatedSize(
113 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
114 }
115
116 // Compute the checksum of the chunk pointer and its header.
117 static INLINEinline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
118 UnpackedHeader ZeroChecksumHeader = *Header;
119 ZeroChecksumHeader.Checksum = 0;
120 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
121 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
122 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
123 HeaderHolder, ARRAY_SIZE(HeaderHolder)(sizeof(HeaderHolder)/sizeof((HeaderHolder)[0])));
124 return static_cast<u16>(Crc);
125 }
126
127 // Checks the validity of a chunk by verifying its checksum. It doesn't
128 // incur termination in the event of an invalid chunk.
129 static INLINEinline bool isValid(const void *Ptr) {
130 PackedHeader NewPackedHeader =
131 atomic_load_relaxed(getConstAtomicHeader(Ptr));
132 UnpackedHeader NewUnpackedHeader =
133 bit_cast<UnpackedHeader>(NewPackedHeader);
134 return (NewUnpackedHeader.Checksum ==
135 computeChecksum(Ptr, &NewUnpackedHeader));
136 }
137
138 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
139 // for a fully nulled out header, its state will be available anyway.
140 COMPILER_CHECK(ChunkAvailable == 0)static_assert(ChunkAvailable == 0, "");
141
142 // Loads and unpacks the header, verifying the checksum in the process.
143 static INLINEinline
144 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
145 PackedHeader NewPackedHeader =
146 atomic_load_relaxed(getConstAtomicHeader(Ptr));
147 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
148 if (UNLIKELY(NewUnpackedHeader->Checksum !=__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
149 computeChecksum(Ptr, NewUnpackedHeader))__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
)
150 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
151 }
152
153 // Packs and stores the header, computing the checksum in the process.
154 static INLINEinline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
155 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
156 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
157 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
158 }
159
160 // Packs and stores the header, computing the checksum in the process. We
161 // compare the current header with the expected provided one to ensure that
162 // we are not being raced by a corruption occurring in another thread.
163 static INLINEinline void compareExchangeHeader(void *Ptr,
164 UnpackedHeader *NewUnpackedHeader,
165 UnpackedHeader *OldUnpackedHeader) {
166 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
167 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
168 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
169 if (UNLIKELY(!atomic_compare_exchange_strong(__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
170 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
171 memory_order_relaxed))__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
)
172 dieWithMessage("race on chunk header at address %p\n", Ptr);
173 }
174} // namespace Chunk
175
176struct QuarantineCallback {
177 explicit QuarantineCallback(AllocatorCacheT *Cache)
178 : Cache_(Cache) {}
179
180 // Chunk recycling function, returns a quarantined chunk to the backend,
181 // first making sure it hasn't been tampered with.
182 void Recycle(void *Ptr) {
183 UnpackedHeader Header;
184 Chunk::loadHeader(Ptr, &Header);
185 if (UNLIKELY(Header.State != ChunkQuarantine)__builtin_expect(!!(Header.State != ChunkQuarantine), 0))
186 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
187 UnpackedHeader NewHeader = Header;
188 NewHeader.State = ChunkAvailable;
189 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
190 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
191 if (Header.ClassId)
192 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
193 else
194 getBackend().deallocateSecondary(BackendPtr);
195 }
196
197 // Internal quarantine allocation and deallocation functions. We first check
198 // that the batches are indeed serviced by the Primary.
199 // TODO(kostyak): figure out the best way to protect the batches.
200 void *Allocate(uptr Size) {
201 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
202 return getBackend().allocatePrimary(Cache_, BatchClassId);
203 }
204
205 void Deallocate(void *Ptr) {
206 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
207 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
208 }
209
210 AllocatorCacheT *Cache_;
211 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize)static_assert(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize
, "")
;
212};
213
214typedef Quarantine<QuarantineCallback, void> QuarantineT;
215typedef QuarantineT::Cache QuarantineCacheT;
216COMPILER_CHECK(sizeof(QuarantineCacheT) <=static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::
QuarantineCachePlaceHolder), "")
217 sizeof(ScudoTSD::QuarantineCachePlaceHolder))static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::
QuarantineCachePlaceHolder), "")
;
218
219QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
220 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
221}
222
223#ifdef GWP_ASAN_HOOKS1
224static gwp_asan::GuardedPoolAllocator GuardedAlloc;
225#endif // GWP_ASAN_HOOKS
226
227struct Allocator {
228 static const uptr MaxAllowedMallocSize =
229 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40)(2UL << 30);
230
231 BackendT Backend;
232 QuarantineT Quarantine;
233
234 u32 QuarantineChunksUpToSize;
235
236 bool DeallocationTypeMismatch;
237 bool ZeroContents;
238 bool DeleteSizeMismatch;
239
240 bool CheckRssLimit;
241 uptr HardRssLimitMb;
242 uptr SoftRssLimitMb;
243 atomic_uint8_t RssLimitExceeded;
244 atomic_uint64_t RssLastCheckedAtNS;
245
246 explicit Allocator(LinkerInitialized)
247 : Quarantine(LINKER_INITIALIZED) {}
248
249 NOINLINE__attribute__((noinline)) void performSanityChecks();
250
251 void init() {
252 SanitizerToolName = "Scudo";
253 PrimaryAllocatorName = "ScudoPrimary";
254 SecondaryAllocatorName = "ScudoSecondary";
255
256 initFlags();
257
258 performSanityChecks();
259
260 // Check if hardware CRC32 is supported in the binary and by the platform,
261 // if so, opt for the CRC32 hardware version of the checksum.
262 if (&computeHardwareCRC32 && hasHardwareCRC32())
263 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
264
265 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
266 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
267 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
268 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
269 Quarantine.Init(
270 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
271 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
272 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
273 getFlags()->QuarantineChunksUpToSize;
274 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
275 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
276 ZeroContents = getFlags()->ZeroContents;
277
278 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
279 /*blocking=*/false))__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
) {
280 Cookie = static_cast<u32>((NanoTime() >> 12) ^
281 (reinterpret_cast<uptr>(this) >> 4));
282 }
283
284 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
285 if (CheckRssLimit)
286 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
287 }
288
289 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
290 bool isValidPointer(const void *Ptr) {
291 initThreadMaybe();
292 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
293 return false;
294 if (!Chunk::isAligned(Ptr))
295 return false;
296 return Chunk::isValid(Ptr);
297 }
298
299 NOINLINE__attribute__((noinline)) bool isRssLimitExceeded();
300
301 // Allocates a chunk.
302 void *allocate(uptr Size, uptr Alignment, AllocType Type,
303 bool ForceZeroContents = false) {
304 initThreadMaybe();
305
306#ifdef GWP_ASAN_HOOKS1
307 if (UNLIKELY(GuardedAlloc.shouldSample())__builtin_expect(!!(GuardedAlloc.shouldSample()), 0)) {
6
Taking false branch
308 if (void *Ptr = GuardedAlloc.allocate(Size))
309 return Ptr;
310 }
311#endif // GWP_ASAN_HOOKS
312
313 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
7
Assuming 'Alignment' is <= 'MaxAlignment'
8
Taking false branch
314 if (AllocatorMayReturnNull())
315 return nullptr;
316 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
317 }
318 if (UNLIKELY(Alignment < MinAlignment)__builtin_expect(!!(Alignment < MinAlignment), 0))
9
Assuming 'Alignment' is >= 'MinAlignment'
10
Taking false branch
319 Alignment = MinAlignment;
320
321 const uptr NeededSize = RoundUpTo(Size
10.1
'Size' is not equal to 0
10.1
'Size' is not equal to 0
10.1
'Size' is not equal to 0
? Size : 1, MinAlignment) +
11
'?' condition is true
322 Chunk::getHeaderSize();
323 const uptr AlignedSize = (Alignment > MinAlignment) ?
12
Assuming 'Alignment' is <= 'MinAlignment'
13
'?' condition is false
324 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
325 if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0) ||
15
Taking false branch
326 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)__builtin_expect(!!(AlignedSize >= MaxAllowedMallocSize), 0
)
) {
14
Assuming 'AlignedSize' is < 'MaxAllowedMallocSize'
327 if (AllocatorMayReturnNull())
328 return nullptr;
329 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
330 }
331
332 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())__builtin_expect(!!(isRssLimitExceeded()), 0)) {
16
Assuming field 'CheckRssLimit' is false
333 if (AllocatorMayReturnNull())
334 return nullptr;
335 reportRssLimitExceeded();
336 }
337
338 // Primary and Secondary backed allocations have a different treatment. We
339 // deal with alignment requirements of Primary serviced allocations here,
340 // but the Secondary will take care of its own alignment needs.
341 void *BackendPtr;
342 uptr BackendSize;
343 u8 ClassId;
344 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
17
Taking true branch
345 BackendSize = AlignedSize;
346 ClassId = SizeClassMap::ClassID(BackendSize);
347 bool UnlockRequired;
348 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
349 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
350 if (UnlockRequired
17.1
'UnlockRequired' is false
17.1
'UnlockRequired' is false
17.1
'UnlockRequired' is false
)
18
Taking false branch
351 TSD->unlock();
352 } else {
353 BackendSize = NeededSize;
354 ClassId = 0;
355 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
356 }
357 if (UNLIKELY(!BackendPtr)__builtin_expect(!!(!BackendPtr), 0)) {
19
Assuming 'BackendPtr' is non-null
20
Taking false branch
358 SetAllocatorOutOfMemory();
359 if (AllocatorMayReturnNull())
360 return nullptr;
361 reportOutOfMemory(Size);
362 }
363
364 // If requested, we will zero out the entire contents of the returned chunk.
365 if ((ForceZeroContents
20.1
'ForceZeroContents' is false
20.1
'ForceZeroContents' is false
20.1
'ForceZeroContents' is false
|| ZeroContents) && ClassId)
21
Assuming field 'ZeroContents' is true
22
Assuming 'ClassId' is not equal to 0
23
Taking true branch
366 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
24
Calling 'SizeClassAllocator32::ClassIdToSize'
367
368 UnpackedHeader Header = {};
369 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
370 if (UNLIKELY(!IsAligned(UserPtr, Alignment))__builtin_expect(!!(!IsAligned(UserPtr, Alignment)), 0)) {
371 // Since the Secondary takes care of alignment, a non-aligned pointer
372 // means it is from the Primary. It is also the only case where the offset
373 // field of the header would be non-zero.
374 DCHECK(ClassId);
375 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
376 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
377 UserPtr = AlignedUserPtr;
378 }
379 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
380 Header.State = ChunkAllocated;
381 Header.AllocType = Type;
382 if (ClassId) {
383 Header.ClassId = ClassId;
384 Header.SizeOrUnusedBytes = Size;
385 } else {
386 // The secondary fits the allocations to a page, so the amount of unused
387 // bytes is the difference between the end of the user allocation and the
388 // next page boundary.
389 const uptr PageSize = GetPageSizeCached();
390 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
391 if (TrailingBytes)
392 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
393 }
394 void *Ptr = reinterpret_cast<void *>(UserPtr);
395 Chunk::storeHeader(Ptr, &Header);
396 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook)
397 __sanitizer_malloc_hook(Ptr, Size);
398 return Ptr;
399 }
400
401 // Place a chunk in the quarantine or directly deallocate it in the event of
402 // a zero-sized quarantine, or if the size of the chunk is greater than the
403 // quarantine chunk size threshold.
404 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
405 uptr Size) {
406 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
407 if (BypassQuarantine) {
408 UnpackedHeader NewHeader = *Header;
409 NewHeader.State = ChunkAvailable;
410 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
411 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
412 if (Header->ClassId) {
413 bool UnlockRequired;
414 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
415 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
416 Header->ClassId);
417 if (UnlockRequired)
418 TSD->unlock();
419 } else {
420 getBackend().deallocateSecondary(BackendPtr);
421 }
422 } else {
423 // If a small memory amount was allocated with a larger alignment, we want
424 // to take that into account. Otherwise the Quarantine would be filled
425 // with tiny chunks, taking a lot of VA memory. This is an approximation
426 // of the usable size, that allows us to not call
427 // GetActuallyAllocatedSize.
428 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
429 UnpackedHeader NewHeader = *Header;
430 NewHeader.State = ChunkQuarantine;
431 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
432 bool UnlockRequired;
433 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
434 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
435 Ptr, EstimatedSize);
436 if (UnlockRequired)
437 TSD->unlock();
438 }
439 }
440
441 // Deallocates a Chunk, which means either adding it to the quarantine or
442 // directly returning it to the backend if criteria are met.
443 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
444 AllocType Type) {
445 // For a deallocation, we only ensure minimal initialization, meaning thread
446 // local data will be left uninitialized for now (when using ELF TLS). The
447 // fallback cache will be used instead. This is a workaround for a situation
448 // where the only heap operation performed in a thread would be a free past
449 // the TLS destructors, ending up in initialized thread specific data never
450 // being destroyed properly. Any other heap operation will do a full init.
451 initThreadMaybe(/*MinimalInit=*/true);
452 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_free_hook)
453 __sanitizer_free_hook(Ptr);
454 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
455 return;
456
457#ifdef GWP_ASAN_HOOKS1
458 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) {
459 GuardedAlloc.deallocate(Ptr);
460 return;
461 }
462#endif // GWP_ASAN_HOOKS
463
464 if (UNLIKELY(!Chunk::isAligned(Ptr))__builtin_expect(!!(!Chunk::isAligned(Ptr)), 0))
465 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
466 UnpackedHeader Header;
467 Chunk::loadHeader(Ptr, &Header);
468 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
469 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
470 if (DeallocationTypeMismatch) {
471 // The deallocation type has to match the allocation one.
472 if (Header.AllocType != Type) {
473 // With the exception of memalign'd Chunks, that can be still be free'd.
474 if (Header.AllocType != FromMemalign || Type != FromMalloc)
475 dieWithMessage("allocation type mismatch when deallocating address "
476 "%p\n", Ptr);
477 }
478 }
479 const uptr Size = Chunk::getSize(Ptr, &Header);
480 if (DeleteSizeMismatch) {
481 if (DeleteSize && DeleteSize != Size)
482 dieWithMessage("invalid sized delete when deallocating address %p\n",
483 Ptr);
484 }
485 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
486 quarantineOrDeallocateChunk(Ptr, &Header, Size);
487 }
488
489 // Reallocates a chunk. We can save on a new allocation if the new requested
490 // size still fits in the chunk.
491 void *reallocate(void *OldPtr, uptr NewSize) {
492 initThreadMaybe();
493
494#ifdef GWP_ASAN_HOOKS1
495 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(OldPtr)), 0)) {
496 size_t OldSize = GuardedAlloc.getSize(OldPtr);
497 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
498 if (NewPtr)
499 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
500 GuardedAlloc.deallocate(OldPtr);
501 return NewPtr;
502 }
503#endif // GWP_ASAN_HOOKS
504
505 if (UNLIKELY(!Chunk::isAligned(OldPtr))__builtin_expect(!!(!Chunk::isAligned(OldPtr)), 0))
506 dieWithMessage("misaligned address when reallocating address %p\n",
507 OldPtr);
508 UnpackedHeader OldHeader;
509 Chunk::loadHeader(OldPtr, &OldHeader);
510 if (UNLIKELY(OldHeader.State != ChunkAllocated)__builtin_expect(!!(OldHeader.State != ChunkAllocated), 0))
511 dieWithMessage("invalid chunk state when reallocating address %p\n",
512 OldPtr);
513 if (DeallocationTypeMismatch) {
514 if (UNLIKELY(OldHeader.AllocType != FromMalloc)__builtin_expect(!!(OldHeader.AllocType != FromMalloc), 0))
515 dieWithMessage("allocation type mismatch when reallocating address "
516 "%p\n", OldPtr);
517 }
518 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
519 // The new size still fits in the current chunk, and the size difference
520 // is reasonable.
521 if (NewSize <= UsableSize &&
522 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
523 UnpackedHeader NewHeader = OldHeader;
524 NewHeader.SizeOrUnusedBytes =
525 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
526 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
527 return OldPtr;
528 }
529 // Otherwise, we have to allocate a new chunk and copy the contents of the
530 // old one.
531 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
532 if (NewPtr) {
533 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
534 UsableSize - OldHeader.SizeOrUnusedBytes;
535 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
536 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
537 }
538 return NewPtr;
539 }
540
541 // Helper function that returns the actual usable size of a chunk.
542 uptr getUsableSize(const void *Ptr) {
543 initThreadMaybe();
544 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
545 return 0;
546
547#ifdef GWP_ASAN_HOOKS1
548 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0))
549 return GuardedAlloc.getSize(Ptr);
550#endif // GWP_ASAN_HOOKS
551
552 UnpackedHeader Header;
553 Chunk::loadHeader(Ptr, &Header);
554 // Getting the usable size of a chunk only makes sense if it's allocated.
555 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
556 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
557 return Chunk::getUsableSize(Ptr, &Header);
558 }
559
560 void *calloc(uptr NMemB, uptr Size) {
561 initThreadMaybe();
562 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))__builtin_expect(!!(CheckForCallocOverflow(NMemB, Size)), 0)) {
563 if (AllocatorMayReturnNull())
564 return nullptr;
565 reportCallocOverflow(NMemB, Size);
566 }
567 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
568 }
569
570 void commitBack(ScudoTSD *TSD) {
571 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
572 Backend.destroyCache(&TSD->Cache);
573 }
574
575 uptr getStats(AllocatorStat StatType) {
576 initThreadMaybe();
577 uptr stats[AllocatorStatCount];
578 Backend.getStats(stats);
579 return stats[StatType];
580 }
581
582 bool canReturnNull() {
583 initThreadMaybe();
584 return AllocatorMayReturnNull();
585 }
586
587 void setRssLimit(uptr LimitMb, bool HardLimit) {
588 if (HardLimit)
589 HardRssLimitMb = LimitMb;
590 else
591 SoftRssLimitMb = LimitMb;
592 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
593 }
594
595 void printStats() {
596 initThreadMaybe();
597 Backend.printStats();
598 }
599};
600
601NOINLINE__attribute__((noinline)) void Allocator::performSanityChecks() {
602 // Verify that the header offset field can hold the maximum offset. In the
603 // case of the Secondary allocator, it takes care of alignment and the
604 // offset will always be 0. In the case of the Primary, the worst case
605 // scenario happens in the last size class, when the backend allocation
606 // would already be aligned on the requested alignment, which would happen
607 // to be the maximum alignment that would fit in that size class. As a
608 // result, the maximum offset will be at most the maximum alignment for the
609 // last size class minus the header size, in multiples of MinAlignment.
610 UnpackedHeader Header = {};
611 const uptr MaxPrimaryAlignment =
612 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
613 const uptr MaxOffset =
614 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
615 Header.Offset = MaxOffset;
616 if (Header.Offset != MaxOffset)
617 dieWithMessage("maximum possible offset doesn't fit in header\n");
618 // Verify that we can fit the maximum size or amount of unused bytes in the
619 // header. Given that the Secondary fits the allocation to a page, the worst
620 // case scenario happens in the Primary. It will depend on the second to
621 // last and last class sizes, as well as the dynamic base for the Primary.
622 // The following is an over-approximation that works for our needs.
623 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
624 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
625 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
626 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
627
628 const uptr LargestClassId = SizeClassMap::kLargestClassID;
629 Header.ClassId = LargestClassId;
630 if (Header.ClassId != LargestClassId)
631 dieWithMessage("largest class ID doesn't fit in header\n");
632}
633
634// Opportunistic RSS limit check. This will update the RSS limit status, if
635// it can, every 250ms, otherwise it will just return the current one.
636NOINLINE__attribute__((noinline)) bool Allocator::isRssLimitExceeded() {
637 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
638 const u64 CurrentCheck = MonotonicNanoTime();
639 if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL))__builtin_expect(!!(CurrentCheck < LastCheck + (250ULL * 1000000ULL
)), 1)
)
640 return atomic_load_relaxed(&RssLimitExceeded);
641 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
642 CurrentCheck, memory_order_relaxed))
643 return atomic_load_relaxed(&RssLimitExceeded);
644 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
645 // RSS from /proc/self/statm by default. We might want to
646 // call getrusage directly, even if it's less accurate.
647 const uptr CurrentRssMb = GetRSS() >> 20;
648 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)__builtin_expect(!!(HardRssLimitMb < CurrentRssMb), 0))
649 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
650 HardRssLimitMb, CurrentRssMb);
651 if (SoftRssLimitMb) {
652 if (atomic_load_relaxed(&RssLimitExceeded)) {
653 if (CurrentRssMb <= SoftRssLimitMb)
654 atomic_store_relaxed(&RssLimitExceeded, false);
655 } else {
656 if (CurrentRssMb > SoftRssLimitMb) {
657 atomic_store_relaxed(&RssLimitExceeded, true);
658 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
659 SoftRssLimitMb, CurrentRssMb);
660 }
661 }
662 }
663 return atomic_load_relaxed(&RssLimitExceeded);
664}
665
666static Allocator Instance(LINKER_INITIALIZED);
667
668static BackendT &getBackend() {
669 return Instance.Backend;
670}
671
672void initScudo() {
673 Instance.init();
674#ifdef GWP_ASAN_HOOKS1
675 gwp_asan::options::initOptions();
676 gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
677 Opts.Backtrace = gwp_asan::options::getBacktraceFunction();
678 GuardedAlloc.init(Opts);
679
680 if (Opts.InstallSignalHandlers)
681 gwp_asan::crash_handler::installSignalHandlers(
682 &GuardedAlloc, __sanitizer::Printf,
683 gwp_asan::options::getPrintBacktraceFunction(),
684 gwp_asan::crash_handler::getSegvBacktraceFunction());
685#endif // GWP_ASAN_HOOKS
686}
687
688void ScudoTSD::init() {
689 getBackend().initCache(&Cache);
690 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
691}
692
693void ScudoTSD::commitBack() {
694 Instance.commitBack(this);
695}
696
697void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
698 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))__builtin_expect(!!(!IsPowerOfTwo(Alignment)), 0)) {
699 errno(*__errno_location ()) = EINVAL22;
700 if (Instance.canReturnNull())
701 return nullptr;
702 reportAllocationAlignmentNotPowerOfTwo(Alignment);
703 }
704 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
705}
706
707void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
708 Instance.deallocate(Ptr, Size, Alignment, Type);
709}
710
711void *scudoRealloc(void *Ptr, uptr Size) {
712 if (!Ptr)
713 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
714 if (Size == 0) {
715 Instance.deallocate(Ptr, 0, 0, FromMalloc);
716 return nullptr;
717 }
718 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
719}
720
721void *scudoCalloc(uptr NMemB, uptr Size) {
722 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
723}
724
725void *scudoValloc(uptr Size) {
726 return SetErrnoOnNull(
727 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
728}
729
730void *scudoPvalloc(uptr Size) {
731 const uptr PageSize = GetPageSizeCached();
732 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(Size, PageSize)),
0)
) {
1
Assuming the condition is false
2
Taking false branch
733 errno(*__errno_location ()) = ENOMEM12;
734 if (Instance.canReturnNull())
735 return nullptr;
736 reportPvallocOverflow(Size);
737 }
738 // pvalloc(0) should allocate one page.
739 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
3
Assuming 'Size' is 0
4
'?' condition is false
740 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
5
Calling 'Allocator::allocate'
741}
742
743int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
744 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(Alignment)),
0)
) {
745 if (!Instance.canReturnNull())
746 reportInvalidPosixMemalignAlignment(Alignment);
747 return EINVAL22;
748 }
749 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
750 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
751 return ENOMEM12;
752 *MemPtr = Ptr;
753 return 0;
754}
755
756void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
757 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(Alignment
, Size)), 0)
) {
758 errno(*__errno_location ()) = EINVAL22;
759 if (Instance.canReturnNull())
760 return nullptr;
761 reportInvalidAlignedAllocAlignment(Size, Alignment);
762 }
763 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
764}
765
766uptr scudoMallocUsableSize(void *Ptr) {
767 return Instance.getUsableSize(Ptr);
768}
769
770} // namespace __scudo
771
772using namespace __scudo;
773
774// MallocExtension helper functions
775
776uptr __sanitizer_get_current_allocated_bytes() {
777 return Instance.getStats(AllocatorStatAllocated);
778}
779
780uptr __sanitizer_get_heap_size() {
781 return Instance.getStats(AllocatorStatMapped);
782}
783
784uptr __sanitizer_get_free_bytes() {
785 return 1;
786}
787
788uptr __sanitizer_get_unmapped_bytes() {
789 return 1;
790}
791
792uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
793 return Size;
794}
795
796int __sanitizer_get_ownership(const void *Ptr) {
797 return Instance.isValidPointer(Ptr);
798}
799
800uptr __sanitizer_get_allocated_size(const void *Ptr) {
801 return Instance.getUsableSize(Ptr);
802}
803
804#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
805SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
806 void *Ptr, uptr Size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
{
807 (void)Ptr;
808 (void)Size;
809}
810
811SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *Ptr)
{
812 (void)Ptr;
813}
814#endif
815
816// Interface functions
817
818void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
819 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE1)
820 return;
821 Instance.setRssLimit(LimitMb, !!HardLimit);
822}
823
824void __scudo_print_stats() {
825 Instance.printStats();
826}

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h

1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17
18// SizeClassAllocator32 -- allocator for 32-bit address space.
19// This allocator can theoretically be used on 64-bit arch, but there it is less
20// efficient than SizeClassAllocator64.
21//
22// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23// be returned by MmapOrDie().
24//
25// Region:
26// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27// kRegionSize).
28// Since the regions are aligned by kRegionSize, there are exactly
29// kNumPossibleRegions possible regions in the address space and so we keep
30// a ByteMap possible_regions to store the size classes of each Region.
31// 0 size class means the region is not used by the allocator.
32//
33// One Region is used to allocate chunks of a single size class.
34// A Region looks like this:
35// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36//
37// In order to avoid false sharing the objects of this class should be
38// chache-line aligned.
39
40struct SizeClassAllocator32FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 kUseSeparateSizeClassForBatch = 2,
44 };
45};
46
47template <class Params>
48class SizeClassAllocator32 {
49 private:
50 static const u64 kTwoLevelByteMapSize1 =
51 (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
53
54 public:
55 using AddressSpaceView = typename Params::AddressSpaceView;
56 static const uptr kSpaceBeg = Params::kSpaceBeg;
57 static const u64 kSpaceSize = Params::kSpaceSize;
58 static const uptr kMetadataSize = Params::kMetadataSize;
59 typedef typename Params::SizeClassMap SizeClassMap;
60 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
61 typedef typename Params::MapUnmapCallback MapUnmapCallback;
62 using ByteMap = typename conditional<
63 (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
64 FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
65 AddressSpaceView>,
66 TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
67
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0,
"")
69 (kSpaceSize & (kSpaceSize - 1)) == 0)static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0,
"")
;
70
71 static const bool kRandomShuffleChunks = Params::kFlags &
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
73 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
75
76 struct TransferBatch {
77 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
78 void SetFromArray(void *batch[], uptr count) {
79 DCHECK_LE(count, kMaxNumCached);
80 count_ = count;
81 for (uptr i = 0; i < count; i++)
82 batch_[i] = batch[i];
83 }
84 uptr Count() const { return count_; }
85 void Clear() { count_ = 0; }
86 void Add(void *ptr) {
87 batch_[count_++] = ptr;
88 DCHECK_LE(count_, kMaxNumCached);
89 }
90 void CopyToArray(void *to_batch[]) const {
91 for (uptr i = 0, n = Count(); i < n; i++)
92 to_batch[i] = batch_[i];
93 }
94
95 // How much memory do we need for a batch containing n elements.
96 static uptr AllocationSizeRequiredForNElements(uptr n) {
97 return sizeof(uptr) * 2 + sizeof(void *) * n;
98 }
99 static uptr MaxCached(uptr size) {
100 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
101 }
102
103 TransferBatch *next;
104
105 private:
106 uptr count_;
107 void *batch_[kMaxNumCached];
108 };
109
110 static const uptr kBatchSize = sizeof(TransferBatch);
111 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)static_assert((kBatchSize & (kBatchSize - 1)) == 0, "");
112 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))static_assert(kBatchSize == SizeClassMap::kMaxNumCachedHint *
sizeof(uptr), "")
;
113
114 static uptr ClassIdToSize(uptr class_id) {
115 return (class_id == SizeClassMap::kBatchClassID) ?
25
Assuming 'class_id' is not equal to 'kBatchClassID'
26
'?' condition is false
116 kBatchSize : SizeClassMap::Size(class_id);
27
Calling 'SizeClassMap::Size'
117 }
118
119 typedef SizeClassAllocator32<Params> ThisT;
120 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
121
122 void Init(s32 release_to_os_interval_ms) {
123 possible_regions.Init();
124 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
125 }
126
127 s32 ReleaseToOSIntervalMs() const {
128 return kReleaseToOSIntervalNever;
129 }
130
131 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
132 // This is empty here. Currently only implemented in 64-bit allocator.
133 }
134
135 void ForceReleaseToOS() {
136 // Currently implemented in 64-bit allocator only.
137 }
138
139 void *MapWithCallback(uptr size) {
140 void *res = MmapOrDie(size, PrimaryAllocatorName);
141 MapUnmapCallback().OnMap((uptr)res, size);
142 return res;
143 }
144
145 void UnmapWithCallback(uptr beg, uptr size) {
146 MapUnmapCallback().OnUnmap(beg, size);
147 UnmapOrDie(reinterpret_cast<void *>(beg), size);
148 }
149
150 static bool CanAllocate(uptr size, uptr alignment) {
151 return size <= SizeClassMap::kMaxSize &&
152 alignment <= SizeClassMap::kMaxSize;
153 }
154
155 void *GetMetaData(const void *p) {
156 CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize)
); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 156, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2);
} while (false)
;
157 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 157, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
158 uptr mem = reinterpret_cast<uptr>(p);
159 uptr beg = ComputeRegionBeg(mem);
160 uptr size = ClassIdToSize(GetSizeClass(p));
161 u32 offset = mem - beg;
162 uptr n = offset / (u32)size; // 32-bit division
163 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
164 return reinterpret_cast<void*>(meta);
165 }
166
167 NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
168 uptr class_id) {
169 DCHECK_LT(class_id, kNumClasses);
170 SizeClassInfo *sci = GetSizeClassInfo(class_id);
171 SpinMutexLock l(&sci->mutex);
172 if (sci->free_list.empty()) {
173 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id)
), 0)
)
174 return nullptr;
175 DCHECK(!sci->free_list.empty());
176 }
177 TransferBatch *b = sci->free_list.front();
178 sci->free_list.pop_front();
179 return b;
180 }
181
182 NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id,
183 TransferBatch *b) {
184 DCHECK_LT(class_id, kNumClasses);
185 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 185, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
186 SizeClassInfo *sci = GetSizeClassInfo(class_id);
187 SpinMutexLock l(&sci->mutex);
188 sci->free_list.push_front(b);
189 }
190
191 bool PointerIsMine(const void *p) {
192 uptr mem = reinterpret_cast<uptr>(p);
193 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
194 mem &= (kSpaceSize - 1);
195 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
196 return false;
197 return GetSizeClass(p) != 0;
198 }
199
200 uptr GetSizeClass(const void *p) {
201 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
202 }
203
204 void *GetBlockBegin(const void *p) {
205 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 205, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
206 uptr mem = reinterpret_cast<uptr>(p);
207 uptr beg = ComputeRegionBeg(mem);
208 uptr size = ClassIdToSize(GetSizeClass(p));
209 u32 offset = mem - beg;
210 u32 n = offset / (u32)size; // 32-bit division
211 uptr res = beg + (n * (u32)size);
212 return reinterpret_cast<void*>(res);
213 }
214
215 uptr GetActuallyAllocatedSize(void *p) {
216 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 216, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
217 return ClassIdToSize(GetSizeClass(p));
218 }
219
220 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
221
222 uptr TotalMemoryUsed() {
223 // No need to lock here.
224 uptr res = 0;
225 for (uptr i = 0; i < kNumPossibleRegions; i++)
226 if (possible_regions[i])
227 res += kRegionSize;
228 return res;
229 }
230
231 void TestOnlyUnmap() {
232 for (uptr i = 0; i < kNumPossibleRegions; i++)
233 if (possible_regions[i])
234 UnmapWithCallback((i * kRegionSize), kRegionSize);
235 }
236
237 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
238 // introspection API.
239 void ForceLock() {
240 for (uptr i = 0; i < kNumClasses; i++) {
241 GetSizeClassInfo(i)->mutex.Lock();
242 }
243 }
244
245 void ForceUnlock() {
246 for (int i = kNumClasses - 1; i >= 0; i--) {
247 GetSizeClassInfo(i)->mutex.Unlock();
248 }
249 }
250
251 // Iterate over all existing chunks.
252 // The allocator must be locked when calling this function.
253 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
254 for (uptr region = 0; region < kNumPossibleRegions; region++)
255 if (possible_regions[region]) {
256 uptr chunk_size = ClassIdToSize(possible_regions[region]);
257 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
258 uptr region_beg = region * kRegionSize;
259 for (uptr chunk = region_beg;
260 chunk < region_beg + max_chunks_in_region * chunk_size;
261 chunk += chunk_size) {
262 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
263 callback(chunk, arg);
264 }
265 }
266 }
267
268 void PrintStats() {}
269
270 static uptr AdditionalSize() { return 0; }
271
272 typedef SizeClassMap SizeClassMapT;
273 static const uptr kNumClasses = SizeClassMap::kNumClasses;
274
275 private:
276 static const uptr kRegionSize = 1 << kRegionSizeLog;
277 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
278
279 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo {
280 StaticSpinMutex mutex;
281 IntrusiveList<TransferBatch> free_list;
282 u32 rand_state;
283 };
284 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)static_assert(sizeof(SizeClassInfo) % kCacheLineSize == 0, ""
)
;
285
286 uptr ComputeRegionId(uptr mem) const {
287 if (SANITIZER_SIGN_EXTENDED_ADDRESSES0)
288 mem &= (kSpaceSize - 1);
289 const uptr res = mem >> kRegionSizeLog;
290 CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 290, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")"
, v1, v2); } while (false)
;
291 return res;
292 }
293
294 uptr ComputeRegionBeg(uptr mem) {
295 return mem & ~(kRegionSize - 1);
296 }
297
298 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
299 DCHECK_LT(class_id, kNumClasses);
300 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
301 kRegionSize, kRegionSize, PrimaryAllocatorName));
302 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
303 return 0;
304 MapUnmapCallback().OnMap(res, kRegionSize);
305 stat->Add(AllocatorStatMapped, kRegionSize);
306 CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 306, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
307 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
308 return res;
309 }
310
311 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
312 DCHECK_LT(class_id, kNumClasses);
313 return &size_class_info_array[class_id];
314 }
315
316 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
317 TransferBatch **current_batch, uptr max_count,
318 uptr *pointers_array, uptr count) {
319 // If using a separate class for batches, we do not need to shuffle it.
320 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
321 class_id != SizeClassMap::kBatchClassID))
322 RandomShuffle(pointers_array, count, &sci->rand_state);
323 TransferBatch *b = *current_batch;
324 for (uptr i = 0; i < count; i++) {
325 if (!b) {
326 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
327 if (UNLIKELY(!b)__builtin_expect(!!(!b), 0))
328 return false;
329 b->Clear();
330 }
331 b->Add((void*)pointers_array[i]);
332 if (b->Count() == max_count) {
333 sci->free_list.push_back(b);
334 b = nullptr;
335 }
336 }
337 *current_batch = b;
338 return true;
339 }
340
341 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
342 SizeClassInfo *sci, uptr class_id) {
343 const uptr region = AllocateRegion(stat, class_id);
344 if (UNLIKELY(!region)__builtin_expect(!!(!region), 0))
345 return false;
346 if (kRandomShuffleChunks)
347 if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0))
348 // The random state is initialized from ASLR (PIE) and time.
349 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
350 const uptr size = ClassIdToSize(class_id);
351 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
352 const uptr max_count = TransferBatch::MaxCached(size);
353 DCHECK_GT(max_count, 0);
354 TransferBatch *b = nullptr;
355 constexpr uptr kShuffleArraySize = 48;
356 uptr shuffle_array[kShuffleArraySize];
357 uptr count = 0;
358 for (uptr i = region; i < region + n_chunks * size; i += size) {
359 shuffle_array[count++] = i;
360 if (count == kShuffleArraySize) {
361 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
362 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
363 return false;
364 count = 0;
365 }
366 }
367 if (count) {
368 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
369 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
370 return false;
371 }
372 if (b) {
373 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h"
, 373, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
374 sci->free_list.push_back(b);
375 }
376 return true;
377 }
378
379 ByteMap possible_regions;
380 SizeClassInfo size_class_info_array[kNumClasses];
381};

/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h

1//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// SizeClassMap maps allocation sizes into size classes and back.
17// Class 0 always corresponds to size 0.
18// The other sizes are controlled by the template parameters:
19// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
20// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
21// kMidSizeLog: the classes starting from 1 increase with step
22// 2^kMinSizeLog until 2^kMidSizeLog.
23// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
24// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
25// look like 0b1xx0..0, where x is either 0 or 1.
26//
27// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
28//
29// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
30// Next 4 classes: 256 + i * 64 (i = 1 to 4).
31// Next 4 classes: 512 + i * 128 (i = 1 to 4).
32// ...
33// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
34// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
35//
36// This structure of the size class map gives us:
37// - Efficient table-free class-to-size and size-to-class functions.
38// - Difference between two consequent size classes is between 14% and 25%
39//
40// This class also gives a hint to a thread-caching allocator about the amount
41// of chunks that need to be cached per-thread:
42// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
43// The actual number is computed in TransferBatch.
44// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
45//
46// Part of output of SizeClassMap::Print():
47// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
48// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
49// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
50// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
51// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
52// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
53// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
54// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
55//
56// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
57// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
58// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
59// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
60// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
61// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
62// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
63// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
64//
65// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
66// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
67// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
68// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
69//
70// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
71// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
72// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
73// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
74//
75// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
76// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
77// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
78// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
79//
80// ...
81//
82// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
83// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
84// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
85// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
86//
87// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
88//
89//
90// Another example (kNumBits=2):
91// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
92// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
93// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
94// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
95// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
96// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
97// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
98// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
99// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
100// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
101// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
102// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
103// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
104// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
105// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
106// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
107// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
108// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
109// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
110// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
111// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
112// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
113// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
114// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
115// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
116// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
117// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
118
119template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
120 uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
121class SizeClassMap {
122 static const uptr kMinSize = 1 << kMinSizeLog;
123 static const uptr kMidSize = 1 << kMidSizeLog;
124 static const uptr kMidClass = kMidSize / kMinSize;
125 static const uptr S = kNumBits - 1;
126 static const uptr M = (1 << S) - 1;
127
128 public:
129 // kMaxNumCachedHintT is a power of two. It serves as a hint
130 // for the size of TransferBatch, the actual size could be a bit smaller.
131 static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
132 COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)static_assert((kMaxNumCachedHint & (kMaxNumCachedHint - 1
)) == 0, "")
;
133
134 static const uptr kMaxSize = 1UL << kMaxSizeLog;
135 static const uptr kNumClasses =
136 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
137 static const uptr kLargestClassID = kNumClasses - 2;
138 static const uptr kBatchClassID = kNumClasses - 1;
139 COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)static_assert(kNumClasses >= 16 && kNumClasses <=
256, "")
;
140 static const uptr kNumClassesRounded =
141 kNumClasses <= 32 ? 32 :
142 kNumClasses <= 64 ? 64 :
143 kNumClasses <= 128 ? 128 : 256;
144
145 static uptr Size(uptr class_id) {
146 // Estimate the result for kBatchClassID because this class does not know
147 // the exact size of TransferBatch. It's OK since we are using the actual
148 // sizeof(TransferBatch) where it matters.
149 if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0))
28
Taking false branch
150 return kMaxNumCachedHint * sizeof(uptr);
151 if (class_id <= kMidClass)
29
Assuming 'class_id' is > 'kMidClass'
30
Taking false branch
152 return kMinSize * class_id;
153 class_id -= kMidClass;
154 uptr t = kMidSize << (class_id >> S);
31
The result of the left shift is undefined due to shifting '256' by '1073741820', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'
155 return t + (t >> S) * (class_id & M);
156 }
157
158 static uptr ClassID(uptr size) {
159 if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0))
160 return 0;
161 if (size <= kMidSize)
162 return (size + kMinSize - 1) >> kMinSizeLog;
163 const uptr l = MostSignificantSetBitIndex(size);
164 const uptr hbits = (size >> (l - S)) & M;
165 const uptr lbits = size & ((1U << (l - S)) - 1);
166 const uptr l1 = l - kMidSizeLog;
167 return kMidClass + (l1 << S) + hbits + (lbits > 0);
168 }
169
170 static uptr MaxCachedHint(uptr size) {
171 DCHECK_LE(size, kMaxSize);
172 if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0))
173 return 0;
174 uptr n;
175 // Force a 32-bit division if the template parameters allow for it.
176 if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
177 n = (1UL << kMaxBytesCachedLog) / size;
178 else
179 n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
180 return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
181 }
182
183 static void Print() {
184 uptr prev_s = 0;
185 uptr total_cached = 0;
186 for (uptr i = 0; i < kNumClasses; i++) {
187 uptr s = Size(i);
188 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
189 Printf("\n");
190 uptr d = s - prev_s;
191 uptr p = prev_s ? (d * 100 / prev_s) : 0;
192 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
193 uptr cached = MaxCachedHint(s) * s;
194 if (i == kBatchClassID)
195 d = p = l = 0;
196 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
197 "cached: %zd %zd; id %zd\n",
198 i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
199 total_cached += cached;
200 prev_s = s;
201 }
202 Printf("Total cached: %zd\n", total_cached);
203 }
204
205 static void Validate() {
206 for (uptr c = 1; c < kNumClasses; c++) {
207 // Printf("Validate: c%zd\n", c);
208 uptr s = Size(c);
209 CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!(
!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 209, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (
false)
;
210 if (c == kBatchClassID)
211 continue;
212 CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer
::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 212, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); }
while (false)
;
213 if (c < kLargestClassID)
214 CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 214, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1
, v2); } while (false)
;
215 CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 215, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2
); } while (false)
;
216 CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 216, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1
, v2); } while (false)
;
217 }
218 CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize
+ 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 218, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")"
, v1, v2); } while (false)
;
219
220 for (uptr s = 1; s <= kMaxSize; s++) {
221 uptr c = ClassID(s);
222 // Printf("s%zd => c%zd\n", s, c);
223 CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 223, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2
); } while (false)
;
224 CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(!
(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 224, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); }
while (false)
;
225 if (c > 0)
226 CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1)));
__sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 226, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2
); } while (false)
;
227 }
228 }
229};
230
231typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
232typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
233typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
234
235// The following SizeClassMap only holds a way small number of cached entries,
236// allowing for denser per-class arrays, smaller memory footprint and usually
237// better performances in threaded environments.
238typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
239// Similar to VeryCompact map above, this one has a small number of different
240// size classes, and also reduced thread-local caches.
241typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;