Bug Summary

File:projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h
Warning:line 155, column 23
The result of the left shift is undefined due to shifting '256' by '4611686018427387900', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name scudo_allocator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D clang_rt_scudo_dynamic_x86_64_EXPORTS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -I /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/compiler-rt/lib/scudo -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/scudo_allocator.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/scudo_allocator.cpp

1//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_crc32.h"
19#include "scudo_errors.h"
20#include "scudo_flags.h"
21#include "scudo_interface_internal.h"
22#include "scudo_tsd.h"
23#include "scudo_utils.h"
24
25#include "sanitizer_common/sanitizer_allocator_checks.h"
26#include "sanitizer_common/sanitizer_allocator_interface.h"
27#include "sanitizer_common/sanitizer_quarantine.h"
28
29#include <errno(*__errno_location ()).h>
30#include <string.h>
31
32namespace __scudo {
33
34// Global static cookie, initialized at start-up.
35static u32 Cookie;
36
37// We default to software CRC32 if the alternatives are not supported, either
38// at compilation or at runtime.
39static atomic_uint8_t HashAlgorithm = { CRC32Software };
40
41INLINEinline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
42 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
43 // as opposed to only for scudo_crc32.cpp. This means that other hardware
44 // specific instructions were likely emitted at other places, and as a
45 // result there is no reason to not use it here.
46#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
47 Crc = CRC32_INTRINSIC(Crc, Value);
48 for (uptr i = 0; i < ArraySize; i++)
49 Crc = CRC32_INTRINSIC(Crc, Array[i]);
50 return Crc;
51#else
52 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
53 Crc = computeHardwareCRC32(Crc, Value);
54 for (uptr i = 0; i < ArraySize; i++)
55 Crc = computeHardwareCRC32(Crc, Array[i]);
56 return Crc;
57 }
58 Crc = computeSoftwareCRC32(Crc, Value);
59 for (uptr i = 0; i < ArraySize; i++)
60 Crc = computeSoftwareCRC32(Crc, Array[i]);
61 return Crc;
62#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
63}
64
65static BackendT &getBackend();
66
67namespace Chunk {
68 static INLINEinline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
69 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
70 getHeaderSize());
71 }
72 static INLINEinline
73 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
74 return reinterpret_cast<const AtomicPackedHeader *>(
75 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
76 }
77
78 static INLINEinline bool isAligned(const void *Ptr) {
79 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
80 }
81
82 // We can't use the offset member of the chunk itself, as we would double
83 // fetch it without any warranty that it wouldn't have been tampered. To
84 // prevent this, we work with a local copy of the header.
85 static INLINEinline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
86 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
87 getHeaderSize() - (Header->Offset << MinAlignmentLog));
88 }
89
90 // Returns the usable size for a chunk, meaning the amount of bytes from the
91 // beginning of the user data to the end of the backend allocated chunk.
92 static INLINEinline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
93 const uptr ClassId = Header->ClassId;
94 if (ClassId)
95 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
96 (Header->Offset << MinAlignmentLog);
97 return SecondaryT::GetActuallyAllocatedSize(
98 getBackendPtr(Ptr, Header)) - getHeaderSize();
99 }
100
101 // Returns the size the user requested when allocating the chunk.
102 static INLINEinline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
103 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
104 if (Header->ClassId)
105 return SizeOrUnusedBytes;
106 return SecondaryT::GetActuallyAllocatedSize(
107 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
108 }
109
110 // Compute the checksum of the chunk pointer and its header.
111 static INLINEinline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
112 UnpackedHeader ZeroChecksumHeader = *Header;
113 ZeroChecksumHeader.Checksum = 0;
114 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
115 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
116 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
117 HeaderHolder, ARRAY_SIZE(HeaderHolder)(sizeof(HeaderHolder)/sizeof((HeaderHolder)[0])));
118 return static_cast<u16>(Crc);
119 }
120
121 // Checks the validity of a chunk by verifying its checksum. It doesn't
122 // incur termination in the event of an invalid chunk.
123 static INLINEinline bool isValid(const void *Ptr) {
124 PackedHeader NewPackedHeader =
125 atomic_load_relaxed(getConstAtomicHeader(Ptr));
126 UnpackedHeader NewUnpackedHeader =
127 bit_cast<UnpackedHeader>(NewPackedHeader);
128 return (NewUnpackedHeader.Checksum ==
129 computeChecksum(Ptr, &NewUnpackedHeader));
130 }
131
132 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
133 // for a fully nulled out header, its state will be available anyway.
134 COMPILER_CHECK(ChunkAvailable == 0)typedef char assertion_failed__134[2*(int)(ChunkAvailable == 0
)-1]
;
135
136 // Loads and unpacks the header, verifying the checksum in the process.
137 static INLINEinline
138 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
139 PackedHeader NewPackedHeader =
140 atomic_load_relaxed(getConstAtomicHeader(Ptr));
141 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
142 if (UNLIKELY(NewUnpackedHeader->Checksum !=__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
143 computeChecksum(Ptr, NewUnpackedHeader))__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum
(Ptr, NewUnpackedHeader)), 0)
)
144 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
145 }
146
147 // Packs and stores the header, computing the checksum in the process.
148 static INLINEinline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
149 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
150 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
151 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
152 }
153
154 // Packs and stores the header, computing the checksum in the process. We
155 // compare the current header with the expected provided one to ensure that
156 // we are not being raced by a corruption occurring in another thread.
157 static INLINEinline void compareExchangeHeader(void *Ptr,
158 UnpackedHeader *NewUnpackedHeader,
159 UnpackedHeader *OldUnpackedHeader) {
160 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
161 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
162 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
163 if (UNLIKELY(!atomic_compare_exchange_strong(__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
164 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
165 memory_order_relaxed))__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader
(Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed
)), 0)
)
166 dieWithMessage("race on chunk header at address %p\n", Ptr);
167 }
168} // namespace Chunk
169
170struct QuarantineCallback {
171 explicit QuarantineCallback(AllocatorCacheT *Cache)
172 : Cache_(Cache) {}
173
174 // Chunk recycling function, returns a quarantined chunk to the backend,
175 // first making sure it hasn't been tampered with.
176 void Recycle(void *Ptr) {
177 UnpackedHeader Header;
178 Chunk::loadHeader(Ptr, &Header);
179 if (UNLIKELY(Header.State != ChunkQuarantine)__builtin_expect(!!(Header.State != ChunkQuarantine), 0))
180 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
181 UnpackedHeader NewHeader = Header;
182 NewHeader.State = ChunkAvailable;
183 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
184 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
185 if (Header.ClassId)
186 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
187 else
188 getBackend().deallocateSecondary(BackendPtr);
189 }
190
191 // Internal quarantine allocation and deallocation functions. We first check
192 // that the batches are indeed serviced by the Primary.
193 // TODO(kostyak): figure out the best way to protect the batches.
194 void *Allocate(uptr Size) {
195 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
196 return getBackend().allocatePrimary(Cache_, BatchClassId);
197 }
198
199 void Deallocate(void *Ptr) {
200 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
201 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
202 }
203
204 AllocatorCacheT *Cache_;
205 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize)typedef char assertion_failed__205[2*(int)(sizeof(QuarantineBatch
) < SizeClassMap::kMaxSize)-1]
;
206};
207
208typedef Quarantine<QuarantineCallback, void> QuarantineT;
209typedef QuarantineT::Cache QuarantineCacheT;
210COMPILER_CHECK(sizeof(QuarantineCacheT) <=typedef char assertion_failed__211[2*(int)(sizeof(QuarantineCacheT
) <= sizeof(ScudoTSD::QuarantineCachePlaceHolder))-1]
211 sizeof(ScudoTSD::QuarantineCachePlaceHolder))typedef char assertion_failed__211[2*(int)(sizeof(QuarantineCacheT
) <= sizeof(ScudoTSD::QuarantineCachePlaceHolder))-1]
;
212
213QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
214 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
215}
216
217struct Allocator {
218 static const uptr MaxAllowedMallocSize =
219 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40)(1ULL << 40);
220
221 BackendT Backend;
222 QuarantineT Quarantine;
223
224 u32 QuarantineChunksUpToSize;
225
226 bool DeallocationTypeMismatch;
227 bool ZeroContents;
228 bool DeleteSizeMismatch;
229
230 bool CheckRssLimit;
231 uptr HardRssLimitMb;
232 uptr SoftRssLimitMb;
233 atomic_uint8_t RssLimitExceeded;
234 atomic_uint64_t RssLastCheckedAtNS;
235
236 explicit Allocator(LinkerInitialized)
237 : Quarantine(LINKER_INITIALIZED) {}
238
239 NOINLINE__attribute__((noinline)) void performSanityChecks();
240
241 void init() {
242 SanitizerToolName = "Scudo";
243 PrimaryAllocatorName = "ScudoPrimary";
244 SecondaryAllocatorName = "ScudoSecondary";
245
246 initFlags();
247
248 performSanityChecks();
249
250 // Check if hardware CRC32 is supported in the binary and by the platform,
251 // if so, opt for the CRC32 hardware version of the checksum.
252 if (&computeHardwareCRC32 && hasHardwareCRC32())
253 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
254
255 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
256 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
257 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
258 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
259 Quarantine.Init(
260 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
261 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
262 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
263 getFlags()->QuarantineChunksUpToSize;
264 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
265 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
266 ZeroContents = getFlags()->ZeroContents;
267
268 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
269 /*blocking=*/false))__builtin_expect(!!(!GetRandom(reinterpret_cast<void *>
(&Cookie), sizeof(Cookie), false)), 0)
) {
270 Cookie = static_cast<u32>((NanoTime() >> 12) ^
271 (reinterpret_cast<uptr>(this) >> 4));
272 }
273
274 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
275 if (CheckRssLimit)
276 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
277 }
278
279 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
280 bool isValidPointer(const void *Ptr) {
281 initThreadMaybe();
282 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
283 return false;
284 if (!Chunk::isAligned(Ptr))
285 return false;
286 return Chunk::isValid(Ptr);
287 }
288
289 NOINLINE__attribute__((noinline)) bool isRssLimitExceeded();
290
291 // Allocates a chunk.
292 void *allocate(uptr Size, uptr Alignment, AllocType Type,
293 bool ForceZeroContents = false) {
294 initThreadMaybe();
295 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
5
Taking false branch
296 if (AllocatorMayReturnNull())
297 return nullptr;
298 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
299 }
300 if (UNLIKELY(Alignment < MinAlignment)__builtin_expect(!!(Alignment < MinAlignment), 0))
6
Taking false branch
301 Alignment = MinAlignment;
302
303 const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
7
'?' condition is true
304 Chunk::getHeaderSize();
305 const uptr AlignedSize = (Alignment > MinAlignment) ?
8
Assuming 'Alignment' is <= 'MinAlignment'
9
'?' condition is false
306 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
307 if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0) ||
10
Taking false branch
308 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)__builtin_expect(!!(AlignedSize >= MaxAllowedMallocSize), 0
)
) {
309 if (AllocatorMayReturnNull())
310 return nullptr;
311 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
312 }
313
314 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())__builtin_expect(!!(isRssLimitExceeded()), 0)) {
11
Assuming the condition is false
315 if (AllocatorMayReturnNull())
316 return nullptr;
317 reportRssLimitExceeded();
318 }
319
320 // Primary and Secondary backed allocations have a different treatment. We
321 // deal with alignment requirements of Primary serviced allocations here,
322 // but the Secondary will take care of its own alignment needs.
323 void *BackendPtr;
324 uptr BackendSize;
325 u8 ClassId;
326 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
12
Taking true branch
327 BackendSize = AlignedSize;
328 ClassId = SizeClassMap::ClassID(BackendSize);
329 bool UnlockRequired;
330 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
331 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
332 if (UnlockRequired)
13
Taking false branch
333 TSD->unlock();
334 } else {
335 BackendSize = NeededSize;
336 ClassId = 0;
337 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
338 }
339 if (UNLIKELY(!BackendPtr)__builtin_expect(!!(!BackendPtr), 0)) {
14
Taking false branch
340 SetAllocatorOutOfMemory();
341 if (AllocatorMayReturnNull())
342 return nullptr;
343 reportOutOfMemory(Size);
344 }
345
346 // If requested, we will zero out the entire contents of the returned chunk.
347 if ((ForceZeroContents || ZeroContents) && ClassId)
15
Assuming the condition is true
16
Assuming 'ClassId' is not equal to 0
17
Taking true branch
348 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
18
Calling 'SizeClassAllocator64::ClassIdToSize'
349
350 UnpackedHeader Header = {};
351 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
352 if (UNLIKELY(!IsAligned(UserPtr, Alignment))__builtin_expect(!!(!IsAligned(UserPtr, Alignment)), 0)) {
353 // Since the Secondary takes care of alignment, a non-aligned pointer
354 // means it is from the Primary. It is also the only case where the offset
355 // field of the header would be non-zero.
356 DCHECK(ClassId);
357 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
358 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
359 UserPtr = AlignedUserPtr;
360 }
361 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
362 Header.State = ChunkAllocated;
363 Header.AllocType = Type;
364 if (ClassId) {
365 Header.ClassId = ClassId;
366 Header.SizeOrUnusedBytes = Size;
367 } else {
368 // The secondary fits the allocations to a page, so the amount of unused
369 // bytes is the difference between the end of the user allocation and the
370 // next page boundary.
371 const uptr PageSize = GetPageSizeCached();
372 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
373 if (TrailingBytes)
374 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
375 }
376 void *Ptr = reinterpret_cast<void *>(UserPtr);
377 Chunk::storeHeader(Ptr, &Header);
378 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook)
379 __sanitizer_malloc_hook(Ptr, Size);
380 return Ptr;
381 }
382
383 // Place a chunk in the quarantine or directly deallocate it in the event of
384 // a zero-sized quarantine, or if the size of the chunk is greater than the
385 // quarantine chunk size threshold.
386 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
387 uptr Size) {
388 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
389 if (BypassQuarantine) {
390 UnpackedHeader NewHeader = *Header;
391 NewHeader.State = ChunkAvailable;
392 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
393 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
394 if (Header->ClassId) {
395 bool UnlockRequired;
396 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
397 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
398 Header->ClassId);
399 if (UnlockRequired)
400 TSD->unlock();
401 } else {
402 getBackend().deallocateSecondary(BackendPtr);
403 }
404 } else {
405 // If a small memory amount was allocated with a larger alignment, we want
406 // to take that into account. Otherwise the Quarantine would be filled
407 // with tiny chunks, taking a lot of VA memory. This is an approximation
408 // of the usable size, that allows us to not call
409 // GetActuallyAllocatedSize.
410 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
411 UnpackedHeader NewHeader = *Header;
412 NewHeader.State = ChunkQuarantine;
413 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
414 bool UnlockRequired;
415 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
416 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
417 Ptr, EstimatedSize);
418 if (UnlockRequired)
419 TSD->unlock();
420 }
421 }
422
423 // Deallocates a Chunk, which means either adding it to the quarantine or
424 // directly returning it to the backend if criteria are met.
425 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
426 AllocType Type) {
427 // For a deallocation, we only ensure minimal initialization, meaning thread
428 // local data will be left uninitialized for now (when using ELF TLS). The
429 // fallback cache will be used instead. This is a workaround for a situation
430 // where the only heap operation performed in a thread would be a free past
431 // the TLS destructors, ending up in initialized thread specific data never
432 // being destroyed properly. Any other heap operation will do a full init.
433 initThreadMaybe(/*MinimalInit=*/true);
434 if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_free_hook)
435 __sanitizer_free_hook(Ptr);
436 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
437 return;
438 if (UNLIKELY(!Chunk::isAligned(Ptr))__builtin_expect(!!(!Chunk::isAligned(Ptr)), 0))
439 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
440 UnpackedHeader Header;
441 Chunk::loadHeader(Ptr, &Header);
442 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
443 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
444 if (DeallocationTypeMismatch) {
445 // The deallocation type has to match the allocation one.
446 if (Header.AllocType != Type) {
447 // With the exception of memalign'd Chunks, that can be still be free'd.
448 if (Header.AllocType != FromMemalign || Type != FromMalloc)
449 dieWithMessage("allocation type mismatch when deallocating address "
450 "%p\n", Ptr);
451 }
452 }
453 const uptr Size = Chunk::getSize(Ptr, &Header);
454 if (DeleteSizeMismatch) {
455 if (DeleteSize && DeleteSize != Size)
456 dieWithMessage("invalid sized delete when deallocating address %p\n",
457 Ptr);
458 }
459 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
460 quarantineOrDeallocateChunk(Ptr, &Header, Size);
461 }
462
463 // Reallocates a chunk. We can save on a new allocation if the new requested
464 // size still fits in the chunk.
465 void *reallocate(void *OldPtr, uptr NewSize) {
466 initThreadMaybe();
467 if (UNLIKELY(!Chunk::isAligned(OldPtr))__builtin_expect(!!(!Chunk::isAligned(OldPtr)), 0))
468 dieWithMessage("misaligned address when reallocating address %p\n",
469 OldPtr);
470 UnpackedHeader OldHeader;
471 Chunk::loadHeader(OldPtr, &OldHeader);
472 if (UNLIKELY(OldHeader.State != ChunkAllocated)__builtin_expect(!!(OldHeader.State != ChunkAllocated), 0))
473 dieWithMessage("invalid chunk state when reallocating address %p\n",
474 OldPtr);
475 if (DeallocationTypeMismatch) {
476 if (UNLIKELY(OldHeader.AllocType != FromMalloc)__builtin_expect(!!(OldHeader.AllocType != FromMalloc), 0))
477 dieWithMessage("allocation type mismatch when reallocating address "
478 "%p\n", OldPtr);
479 }
480 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
481 // The new size still fits in the current chunk, and the size difference
482 // is reasonable.
483 if (NewSize <= UsableSize &&
484 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
485 UnpackedHeader NewHeader = OldHeader;
486 NewHeader.SizeOrUnusedBytes =
487 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
488 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
489 return OldPtr;
490 }
491 // Otherwise, we have to allocate a new chunk and copy the contents of the
492 // old one.
493 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
494 if (NewPtr) {
495 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
496 UsableSize - OldHeader.SizeOrUnusedBytes;
497 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
498 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
499 }
500 return NewPtr;
501 }
502
503 // Helper function that returns the actual usable size of a chunk.
504 uptr getUsableSize(const void *Ptr) {
505 initThreadMaybe();
506 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
507 return 0;
508 UnpackedHeader Header;
509 Chunk::loadHeader(Ptr, &Header);
510 // Getting the usable size of a chunk only makes sense if it's allocated.
511 if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0))
512 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
513 return Chunk::getUsableSize(Ptr, &Header);
514 }
515
516 void *calloc(uptr NMemB, uptr Size) {
517 initThreadMaybe();
518 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))__builtin_expect(!!(CheckForCallocOverflow(NMemB, Size)), 0)) {
519 if (AllocatorMayReturnNull())
520 return nullptr;
521 reportCallocOverflow(NMemB, Size);
522 }
523 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
524 }
525
526 void commitBack(ScudoTSD *TSD) {
527 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
528 Backend.destroyCache(&TSD->Cache);
529 }
530
531 uptr getStats(AllocatorStat StatType) {
532 initThreadMaybe();
533 uptr stats[AllocatorStatCount];
534 Backend.getStats(stats);
535 return stats[StatType];
536 }
537
538 bool canReturnNull() {
539 initThreadMaybe();
540 return AllocatorMayReturnNull();
541 }
542
543 void setRssLimit(uptr LimitMb, bool HardLimit) {
544 if (HardLimit)
545 HardRssLimitMb = LimitMb;
546 else
547 SoftRssLimitMb = LimitMb;
548 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
549 }
550
551 void printStats() {
552 initThreadMaybe();
553 Backend.printStats();
554 }
555};
556
557NOINLINE__attribute__((noinline)) void Allocator::performSanityChecks() {
558 // Verify that the header offset field can hold the maximum offset. In the
559 // case of the Secondary allocator, it takes care of alignment and the
560 // offset will always be 0. In the case of the Primary, the worst case
561 // scenario happens in the last size class, when the backend allocation
562 // would already be aligned on the requested alignment, which would happen
563 // to be the maximum alignment that would fit in that size class. As a
564 // result, the maximum offset will be at most the maximum alignment for the
565 // last size class minus the header size, in multiples of MinAlignment.
566 UnpackedHeader Header = {};
567 const uptr MaxPrimaryAlignment =
568 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
569 const uptr MaxOffset =
570 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
571 Header.Offset = MaxOffset;
572 if (Header.Offset != MaxOffset)
573 dieWithMessage("maximum possible offset doesn't fit in header\n");
574 // Verify that we can fit the maximum size or amount of unused bytes in the
575 // header. Given that the Secondary fits the allocation to a page, the worst
576 // case scenario happens in the Primary. It will depend on the second to
577 // last and last class sizes, as well as the dynamic base for the Primary.
578 // The following is an over-approximation that works for our needs.
579 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
580 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
581 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
582 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
583
584 const uptr LargestClassId = SizeClassMap::kLargestClassID;
585 Header.ClassId = LargestClassId;
586 if (Header.ClassId != LargestClassId)
587 dieWithMessage("largest class ID doesn't fit in header\n");
588}
589
590// Opportunistic RSS limit check. This will update the RSS limit status, if
591// it can, every 100ms, otherwise it will just return the current one.
592NOINLINE__attribute__((noinline)) bool Allocator::isRssLimitExceeded() {
593 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
594 const u64 CurrentCheck = MonotonicNanoTime();
595 if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL))__builtin_expect(!!(CurrentCheck < LastCheck + (100ULL * 1000000ULL
)), 1)
)
596 return atomic_load_relaxed(&RssLimitExceeded);
597 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
598 CurrentCheck, memory_order_relaxed))
599 return atomic_load_relaxed(&RssLimitExceeded);
600 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
601 // RSS from /proc/self/statm by default. We might want to
602 // call getrusage directly, even if it's less accurate.
603 const uptr CurrentRssMb = GetRSS() >> 20;
604 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)__builtin_expect(!!(HardRssLimitMb < CurrentRssMb), 0))
605 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
606 HardRssLimitMb, CurrentRssMb);
607 if (SoftRssLimitMb) {
608 if (atomic_load_relaxed(&RssLimitExceeded)) {
609 if (CurrentRssMb <= SoftRssLimitMb)
610 atomic_store_relaxed(&RssLimitExceeded, false);
611 } else {
612 if (CurrentRssMb > SoftRssLimitMb) {
613 atomic_store_relaxed(&RssLimitExceeded, true);
614 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
615 SoftRssLimitMb, CurrentRssMb);
616 }
617 }
618 }
619 return atomic_load_relaxed(&RssLimitExceeded);
620}
621
622static Allocator Instance(LINKER_INITIALIZED);
623
624static BackendT &getBackend() {
625 return Instance.Backend;
626}
627
628void initScudo() {
629 Instance.init();
630}
631
632void ScudoTSD::init() {
633 getBackend().initCache(&Cache);
634 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
635}
636
637void ScudoTSD::commitBack() {
638 Instance.commitBack(this);
639}
640
641void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
642 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))__builtin_expect(!!(!IsPowerOfTwo(Alignment)), 0)) {
643 errno(*__errno_location ()) = EINVAL22;
644 if (Instance.canReturnNull())
645 return nullptr;
646 reportAllocationAlignmentNotPowerOfTwo(Alignment);
647 }
648 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
649}
650
651void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
652 Instance.deallocate(Ptr, Size, Alignment, Type);
653}
654
655void *scudoRealloc(void *Ptr, uptr Size) {
656 if (!Ptr)
657 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
658 if (Size == 0) {
659 Instance.deallocate(Ptr, 0, 0, FromMalloc);
660 return nullptr;
661 }
662 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
663}
664
665void *scudoCalloc(uptr NMemB, uptr Size) {
666 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
667}
668
669void *scudoValloc(uptr Size) {
670 return SetErrnoOnNull(
671 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
672}
673
674void *scudoPvalloc(uptr Size) {
675 const uptr PageSize = GetPageSizeCached();
676 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(Size, PageSize)),
0)
) {
1
Taking false branch
677 errno(*__errno_location ()) = ENOMEM12;
678 if (Instance.canReturnNull())
679 return nullptr;
680 reportPvallocOverflow(Size);
681 }
682 // pvalloc(0) should allocate one page.
683 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
2
Assuming 'Size' is 0
3
'?' condition is false
684 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
4
Calling 'Allocator::allocate'
685}
686
687int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
688 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(Alignment)),
0)
) {
689 if (!Instance.canReturnNull())
690 reportInvalidPosixMemalignAlignment(Alignment);
691 return EINVAL22;
692 }
693 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
694 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
695 return ENOMEM12;
696 *MemPtr = Ptr;
697 return 0;
698}
699
700void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
701 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(Alignment
, Size)), 0)
) {
702 errno(*__errno_location ()) = EINVAL22;
703 if (Instance.canReturnNull())
704 return nullptr;
705 reportInvalidAlignedAllocAlignment(Size, Alignment);
706 }
707 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
708}
709
710uptr scudoMallocUsableSize(void *Ptr) {
711 return Instance.getUsableSize(Ptr);
712}
713
714} // namespace __scudo
715
716using namespace __scudo;
717
718// MallocExtension helper functions
719
720uptr __sanitizer_get_current_allocated_bytes() {
721 return Instance.getStats(AllocatorStatAllocated);
722}
723
724uptr __sanitizer_get_heap_size() {
725 return Instance.getStats(AllocatorStatMapped);
726}
727
728uptr __sanitizer_get_free_bytes() {
729 return 1;
730}
731
732uptr __sanitizer_get_unmapped_bytes() {
733 return 1;
734}
735
736uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
737 return Size;
738}
739
740int __sanitizer_get_ownership(const void *Ptr) {
741 return Instance.isValidPointer(Ptr);
742}
743
744uptr __sanitizer_get_allocated_size(const void *Ptr) {
745 return Instance.getUsableSize(Ptr);
746}
747
748#if !SANITIZER_SUPPORTS_WEAK_HOOKS1
749SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
750 void *Ptr, uptr Size)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size)
{
751 (void)Ptr;
752 (void)Size;
753}
754
755SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr)extern "C" __attribute__((visibility("default"))) __attribute__
((weak)) void __sanitizer_free_hook(void *Ptr)
{
756 (void)Ptr;
757}
758#endif
759
760// Interface functions
761
762void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
763 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE1)
764 return;
765 Instance.setRssLimit(LimitMb, !!HardLimit);
766}
767
768void __scudo_print_stats() {
769 Instance.printStats();
770}

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h

1//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
18
19// SizeClassAllocator64 -- allocator for 64-bit address space.
20// The template parameter Params is a class containing the actual parameters.
21//
22// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
23// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
24// Otherwise SpaceBeg=kSpaceBeg (fixed address).
25// kSpaceSize is a power of two.
26// At the beginning the entire space is mprotect-ed, then small parts of it
27// are mapped on demand.
28//
29// Region: a part of Space dedicated to a single size class.
30// There are kNumClasses Regions of equal size.
31//
32// UserChunk: a piece of memory returned to user.
33// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
34
35// FreeArray is an array free-d chunks (stored as 4-byte offsets)
36//
37// A Region looks like this:
38// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
39
40struct SizeClassAllocator64FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 };
44};
45
46template <class Params>
47class SizeClassAllocator64 {
48 public:
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const uptr kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 typedef typename Params::MapUnmapCallback MapUnmapCallback;
54
55 static const bool kRandomShuffleChunks =
56 Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
57
58 typedef SizeClassAllocator64<Params> ThisT;
59 typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
60
61 // When we know the size class (the region base) we can represent a pointer
62 // as a 4-byte integer (offset from the region start shifted right by 4).
63 typedef u32 CompactPtrT;
64 static const uptr kCompactPtrScale = 4;
65 CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
66 return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
67 }
68 uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
69 return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
70 }
71
72 void Init(s32 release_to_os_interval_ms) {
73 uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
74 if (kUsingConstantSpaceBeg) {
75 CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 76, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
76 PrimaryAllocatorName, kSpaceBeg))do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize
, PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!!
(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 76, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))"
")", v1, v2); } while (false)
;
77 } else {
78 NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
79 PrimaryAllocatorName);
80 CHECK_NE(NonConstSpaceBeg, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((NonConstSpaceBeg
)); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if (
__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 80, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")"
, v1, v2); } while (false)
;
81 }
82 SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
84 // Check that the RegionInfo array is aligned on the CacheLine size.
85 DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
86 }
87
88 s32 ReleaseToOSIntervalMs() const {
89 return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
90 }
91
92 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
93 atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
94 memory_order_relaxed);
95 }
96
97 void ForceReleaseToOS() {
98 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
99 BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
100 MaybeReleaseToOS(class_id, true /*force*/);
101 }
102 }
103
104 static bool CanAllocate(uptr size, uptr alignment) {
105 return size <= SizeClassMap::kMaxSize &&
106 alignment <= SizeClassMap::kMaxSize;
107 }
108
109 NOINLINE__attribute__((noinline)) void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
110 const CompactPtrT *chunks, uptr n_chunks) {
111 RegionInfo *region = GetRegionInfo(class_id);
112 uptr region_beg = GetRegionBeginBySizeClass(class_id);
113 CompactPtrT *free_array = GetFreeArray(region_beg);
114
115 BlockingMutexLock l(&region->mutex);
116 uptr old_num_chunks = region->num_freed_chunks;
117 uptr new_num_freed_chunks = old_num_chunks + n_chunks;
118 // Failure to allocate free array space while releasing memory is non
119 // recoverable.
120 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
121 new_num_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks)), 0)
) {
122 Report("FATAL: Internal error: %s's allocator exhausted the free list "
123 "space for size class %zd (%zd bytes).\n", SanitizerToolName,
124 class_id, ClassIdToSize(class_id));
125 Die();
126 }
127 for (uptr i = 0; i < n_chunks; i++)
128 free_array[old_num_chunks + i] = chunks[i];
129 region->num_freed_chunks = new_num_freed_chunks;
130 region->stats.n_freed += n_chunks;
131
132 MaybeReleaseToOS(class_id, false /*force*/);
133 }
134
135 NOINLINE__attribute__((noinline)) bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
136 CompactPtrT *chunks, uptr n_chunks) {
137 RegionInfo *region = GetRegionInfo(class_id);
138 uptr region_beg = GetRegionBeginBySizeClass(class_id);
139 CompactPtrT *free_array = GetFreeArray(region_beg);
140
141 BlockingMutexLock l(&region->mutex);
142 if (UNLIKELY(region->num_freed_chunks < n_chunks)__builtin_expect(!!(region->num_freed_chunks < n_chunks
), 0)
) {
143 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
144 n_chunks - region->num_freed_chunks))__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region
, n_chunks - region->num_freed_chunks)), 0)
)
145 return false;
146 CHECK_GE(region->num_freed_chunks, n_chunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->num_freed_chunks
)); __sanitizer::u64 v2 = (__sanitizer::u64)((n_chunks)); if (
__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 146, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)"
")", v1, v2); } while (false)
;
147 }
148 region->num_freed_chunks -= n_chunks;
149 uptr base_idx = region->num_freed_chunks;
150 for (uptr i = 0; i < n_chunks; i++)
151 chunks[i] = free_array[base_idx + i];
152 region->stats.n_allocated += n_chunks;
153 return true;
154 }
155
156 bool PointerIsMine(const void *p) {
157 uptr P = reinterpret_cast<uptr>(p);
158 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
159 return P / kSpaceSize == kSpaceBeg / kSpaceSize;
160 return P >= SpaceBeg() && P < SpaceEnd();
161 }
162
163 uptr GetRegionBegin(const void *p) {
164 if (kUsingConstantSpaceBeg)
165 return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
166 uptr space_beg = SpaceBeg();
167 return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
168 space_beg;
169 }
170
171 uptr GetRegionBeginBySizeClass(uptr class_id) const {
172 return SpaceBeg() + kRegionSize * class_id;
173 }
174
175 uptr GetSizeClass(const void *p) {
176 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
177 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
178 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
179 kNumClassesRounded;
180 }
181
182 void *GetBlockBegin(const void *p) {
183 uptr class_id = GetSizeClass(p);
184 uptr size = ClassIdToSize(class_id);
185 if (!size) return nullptr;
186 uptr chunk_idx = GetChunkIdx((uptr)p, size);
187 uptr reg_beg = GetRegionBegin(p);
188 uptr beg = chunk_idx * size;
189 uptr next_beg = beg + size;
190 if (class_id >= kNumClasses) return nullptr;
191 RegionInfo *region = GetRegionInfo(class_id);
192 if (region->mapped_user >= next_beg)
193 return reinterpret_cast<void*>(reg_beg + beg);
194 return nullptr;
195 }
196
197 uptr GetActuallyAllocatedSize(void *p) {
198 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 198, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
199 return ClassIdToSize(GetSizeClass(p));
200 }
201
202 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
203
204 void *GetMetaData(const void *p) {
205 uptr class_id = GetSizeClass(p);
206 uptr size = ClassIdToSize(class_id);
207 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
208 uptr region_beg = GetRegionBeginBySizeClass(class_id);
209 return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
210 (1 + chunk_idx) * kMetadataSize);
211 }
212
213 uptr TotalMemoryUsed() {
214 uptr res = 0;
215 for (uptr i = 0; i < kNumClasses; i++)
216 res += GetRegionInfo(i)->allocated_user;
217 return res;
218 }
219
220 // Test-only.
221 void TestOnlyUnmap() {
222 UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
223 }
224
225 static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
226 uptr stats_size) {
227 for (uptr class_id = 0; class_id < stats_size; class_id++)
228 if (stats[class_id] == start)
229 stats[class_id] = rss;
230 }
231
232 void PrintStats(uptr class_id, uptr rss) {
233 RegionInfo *region = GetRegionInfo(class_id);
234 if (region->mapped_user == 0) return;
235 uptr in_use = region->stats.n_allocated - region->stats.n_freed;
236 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
237 Printf(
238 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
239 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
240 "last released: %6zdK region: 0x%zx\n",
241 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
242 region->mapped_user >> 10, region->stats.n_allocated,
243 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
244 rss >> 10, region->rtoi.num_releases,
245 region->rtoi.last_released_bytes >> 10,
246 SpaceBeg() + kRegionSize * class_id);
247 }
248
249 void PrintStats() {
250 uptr rss_stats[kNumClasses];
251 for (uptr class_id = 0; class_id < kNumClasses; class_id++)
252 rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
253 GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
254
255 uptr total_mapped = 0;
256 uptr total_rss = 0;
257 uptr n_allocated = 0;
258 uptr n_freed = 0;
259 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
260 RegionInfo *region = GetRegionInfo(class_id);
261 if (region->mapped_user != 0) {
262 total_mapped += region->mapped_user;
263 total_rss += rss_stats[class_id];
264 }
265 n_allocated += region->stats.n_allocated;
266 n_freed += region->stats.n_freed;
267 }
268
269 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
270 "%zd allocations; remains %zd\n", total_mapped >> 20,
271 total_rss >> 20, n_allocated, n_allocated - n_freed);
272 for (uptr class_id = 1; class_id < kNumClasses; class_id++)
273 PrintStats(class_id, rss_stats[class_id]);
274 }
275
276 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
277 // introspection API.
278 void ForceLock() {
279 for (uptr i = 0; i < kNumClasses; i++) {
280 GetRegionInfo(i)->mutex.Lock();
281 }
282 }
283
284 void ForceUnlock() {
285 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
286 GetRegionInfo(i)->mutex.Unlock();
287 }
288 }
289
290 // Iterate over all existing chunks.
291 // The allocator must be locked when calling this function.
292 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
293 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
294 RegionInfo *region = GetRegionInfo(class_id);
295 uptr chunk_size = ClassIdToSize(class_id);
296 uptr region_beg = SpaceBeg() + class_id * kRegionSize;
297 for (uptr chunk = region_beg;
298 chunk < region_beg + region->allocated_user;
299 chunk += chunk_size) {
300 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
301 callback(chunk, arg);
302 }
303 }
304 }
305
306 static uptr ClassIdToSize(uptr class_id) {
307 return SizeClassMap::Size(class_id);
19
Calling 'SizeClassMap::Size'
308 }
309
310 static uptr AdditionalSize() {
311 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
312 GetPageSizeCached());
313 }
314
315 typedef SizeClassMap SizeClassMapT;
316 static const uptr kNumClasses = SizeClassMap::kNumClasses;
317 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
318
319 // A packed array of counters. Each counter occupies 2^n bits, enough to store
320 // counter's max_value. Ctor will try to allocate the required buffer via
321 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
322 // whether the initialization was successful by checking IsAllocated() result.
323 // For the performance sake, none of the accessors check the validity of the
324 // arguments, it is assumed that index is always in [0, n) range and the value
325 // is not incremented past max_value.
326 template<class MemoryMapperT>
327 class PackedCounterArray {
328 public:
329 PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
330 : n(num_counters), memory_mapper(mapper) {
331 CHECK_GT(num_counters, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((num_counters))
; __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 331, "(" "(num_counters)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
332 CHECK_GT(max_value, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((max_value)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(!
(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 332, "(" "(max_value)" ") " ">" " (" "(0)" ")", v1, v2);
} while (false)
;
333 constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
334 // Rounding counter storage size up to the power of two allows for using
335 // bit shifts calculating particular counter's index and offset.
336 uptr counter_size_bits =
337 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
338 CHECK_LE(counter_size_bits, kMaxCounterBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((counter_size_bits
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kMaxCounterBits
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 338, "(" "(counter_size_bits)" ") " "<=" " (" "(kMaxCounterBits)"
")", v1, v2); } while (false)
;
339 counter_size_bits_log = Log2(counter_size_bits);
340 counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
341
342 uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
343 CHECK_GT(packing_ratio, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((packing_ratio)
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 343, "(" "(packing_ratio)" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
344 packing_ratio_log = Log2(packing_ratio);
345 bit_offset_mask = packing_ratio - 1;
346
347 buffer_size =
348 (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
349 sizeof(*buffer);
350 buffer = reinterpret_cast<u64*>(
351 memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
352 }
353 ~PackedCounterArray() {
354 if (buffer) {
355 memory_mapper->UnmapPackedCounterArrayBuffer(
356 reinterpret_cast<uptr>(buffer), buffer_size);
357 }
358 }
359
360 bool IsAllocated() const {
361 return !!buffer;
362 }
363
364 u64 GetCount() const {
365 return n;
366 }
367
368 uptr Get(uptr i) const {
369 DCHECK_LT(i, n);
370 uptr index = i >> packing_ratio_log;
371 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
372 return (buffer[index] >> bit_offset) & counter_mask;
373 }
374
375 void Inc(uptr i) const {
376 DCHECK_LT(Get(i), counter_mask);
377 uptr index = i >> packing_ratio_log;
378 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
379 buffer[index] += 1ULL << bit_offset;
380 }
381
382 void IncRange(uptr from, uptr to) const {
383 DCHECK_LE(from, to);
384 for (uptr i = from; i <= to; i++)
385 Inc(i);
386 }
387
388 private:
389 const u64 n;
390 u64 counter_size_bits_log;
391 u64 counter_mask;
392 u64 packing_ratio_log;
393 u64 bit_offset_mask;
394
395 MemoryMapperT* const memory_mapper;
396 u64 buffer_size;
397 u64* buffer;
398 };
399
400 template<class MemoryMapperT>
401 class FreePagesRangeTracker {
402 public:
403 explicit FreePagesRangeTracker(MemoryMapperT* mapper)
404 : memory_mapper(mapper),
405 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
406 in_the_range(false), current_page(0), current_range_start_page(0) {}
407
408 void NextPage(bool freed) {
409 if (freed) {
410 if (!in_the_range) {
411 current_range_start_page = current_page;
412 in_the_range = true;
413 }
414 } else {
415 CloseOpenedRange();
416 }
417 current_page++;
418 }
419
420 void Done() {
421 CloseOpenedRange();
422 }
423
424 private:
425 void CloseOpenedRange() {
426 if (in_the_range) {
427 memory_mapper->ReleasePageRangeToOS(
428 current_range_start_page << page_size_scaled_log,
429 current_page << page_size_scaled_log);
430 in_the_range = false;
431 }
432 }
433
434 MemoryMapperT* const memory_mapper;
435 const uptr page_size_scaled_log;
436 bool in_the_range;
437 uptr current_page;
438 uptr current_range_start_page;
439 };
440
441 // Iterates over the free_array to identify memory pages containing freed
442 // chunks only and returns these pages back to OS.
443 // allocated_pages_count is the total number of pages allocated for the
444 // current bucket.
445 template<class MemoryMapperT>
446 static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
447 uptr free_array_count, uptr chunk_size,
448 uptr allocated_pages_count,
449 MemoryMapperT *memory_mapper) {
450 const uptr page_size = GetPageSizeCached();
451
452 // Figure out the number of chunks per page and whether we can take a fast
453 // path (the number of chunks per page is the same for all pages).
454 uptr full_pages_chunk_count_max;
455 bool same_chunk_count_per_page;
456 if (chunk_size <= page_size && page_size % chunk_size == 0) {
457 // Same number of chunks per page, no cross overs.
458 full_pages_chunk_count_max = page_size / chunk_size;
459 same_chunk_count_per_page = true;
460 } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
461 chunk_size % (page_size % chunk_size) == 0) {
462 // Some chunks are crossing page boundaries, which means that the page
463 // contains one or two partial chunks, but all pages contain the same
464 // number of chunks.
465 full_pages_chunk_count_max = page_size / chunk_size + 1;
466 same_chunk_count_per_page = true;
467 } else if (chunk_size <= page_size) {
468 // Some chunks are crossing page boundaries, which means that the page
469 // contains one or two partial chunks.
470 full_pages_chunk_count_max = page_size / chunk_size + 2;
471 same_chunk_count_per_page = false;
472 } else if (chunk_size > page_size && chunk_size % page_size == 0) {
473 // One chunk covers multiple pages, no cross overs.
474 full_pages_chunk_count_max = 1;
475 same_chunk_count_per_page = true;
476 } else if (chunk_size > page_size) {
477 // One chunk covers multiple pages, Some chunks are crossing page
478 // boundaries. Some pages contain one chunk, some contain two.
479 full_pages_chunk_count_max = 2;
480 same_chunk_count_per_page = false;
481 } else {
482 UNREACHABLE("All chunk_size/page_size ratios must be handled.")do { do { __sanitizer::u64 v1 = (__sanitizer::u64)((0 &&
"All chunk_size/page_size ratios must be handled.")); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 482, "(" "(0 && \"All chunk_size/page_size ratios must be handled.\")"
") " "!=" " (" "0" ")", v1, v2); } while (false); Die(); } while
(0)
;
483 }
484
485 PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
486 full_pages_chunk_count_max,
487 memory_mapper);
488 if (!counters.IsAllocated())
489 return;
490
491 const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
492 const uptr page_size_scaled = page_size >> kCompactPtrScale;
493 const uptr page_size_scaled_log = Log2(page_size_scaled);
494
495 // Iterate over free chunks and count how many free chunks affect each
496 // allocated page.
497 if (chunk_size <= page_size && page_size % chunk_size == 0) {
498 // Each chunk affects one page only.
499 for (uptr i = 0; i < free_array_count; i++)
500 counters.Inc(free_array[i] >> page_size_scaled_log);
501 } else {
502 // In all other cases chunks might affect more than one page.
503 for (uptr i = 0; i < free_array_count; i++) {
504 counters.IncRange(
505 free_array[i] >> page_size_scaled_log,
506 (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
507 }
508 }
509
510 // Iterate over pages detecting ranges of pages with chunk counters equal
511 // to the expected number of chunks for the particular page.
512 FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
513 if (same_chunk_count_per_page) {
514 // Fast path, every page has the same number of chunks affecting it.
515 for (uptr i = 0; i < counters.GetCount(); i++)
516 range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
517 } else {
518 // Show path, go through the pages keeping count how many chunks affect
519 // each page.
520 const uptr pn =
521 chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
522 const uptr pnc = pn * chunk_size_scaled;
523 // The idea is to increment the current page pointer by the first chunk
524 // size, middle portion size (the portion of the page covered by chunks
525 // except the first and the last one) and then the last chunk size, adding
526 // up the number of chunks on the current page and checking on every step
527 // whether the page boundary was crossed.
528 uptr prev_page_boundary = 0;
529 uptr current_boundary = 0;
530 for (uptr i = 0; i < counters.GetCount(); i++) {
531 uptr page_boundary = prev_page_boundary + page_size_scaled;
532 uptr chunks_per_page = pn;
533 if (current_boundary < page_boundary) {
534 if (current_boundary > prev_page_boundary)
535 chunks_per_page++;
536 current_boundary += pnc;
537 if (current_boundary < page_boundary) {
538 chunks_per_page++;
539 current_boundary += chunk_size_scaled;
540 }
541 }
542 prev_page_boundary = page_boundary;
543
544 range_tracker.NextPage(counters.Get(i) == chunks_per_page);
545 }
546 }
547 range_tracker.Done();
548 }
549
550 private:
551 friend class MemoryMapper;
552
553 ReservedAddressRange address_range;
554
555 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
556 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
557 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
558 // elements, but in reality this will not happen. For simplicity we
559 // dedicate 1/8 of the region's virtual space to FreeArray.
560 static const uptr kFreeArraySize = kRegionSize / 8;
561
562 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
563 uptr NonConstSpaceBeg;
564 uptr SpaceBeg() const {
565 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
566 }
567 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
568 // kRegionSize must be >= 2^32.
569 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)))typedef char assertion_failed__569[2*(int)((kRegionSize) >=
(1ULL << (64 / 2)))-1]
;
570 // kRegionSize must be <= 2^36, see CompactPtrT.
571 COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)))typedef char assertion_failed__571[2*(int)((kRegionSize) <=
(1ULL << (64 / 2 + 4)))-1]
;
572 // Call mmap for user memory with at least this size.
573 static const uptr kUserMapSize = 1 << 16;
574 // Call mmap for metadata memory with at least this size.
575 static const uptr kMetaMapSize = 1 << 16;
576 // Call mmap for free array memory with at least this size.
577 static const uptr kFreeArrayMapSize = 1 << 16;
578
579 atomic_sint32_t release_to_os_interval_ms_;
580
581 struct Stats {
582 uptr n_allocated;
583 uptr n_freed;
584 };
585
586 struct ReleaseToOsInfo {
587 uptr n_freed_at_last_release;
588 uptr num_releases;
589 u64 last_release_at_ns;
590 u64 last_released_bytes;
591 };
592
593 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) RegionInfo {
594 BlockingMutex mutex;
595 uptr num_freed_chunks; // Number of elements in the freearray.
596 uptr mapped_free_array; // Bytes mapped for freearray.
597 uptr allocated_user; // Bytes allocated for user memory.
598 uptr allocated_meta; // Bytes allocated for metadata.
599 uptr mapped_user; // Bytes mapped for user memory.
600 uptr mapped_meta; // Bytes mapped for metadata.
601 u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
602 bool exhausted; // Whether region is out of space for new chunks.
603 Stats stats;
604 ReleaseToOsInfo rtoi;
605 };
606 COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0)typedef char assertion_failed__606[2*(int)(sizeof(RegionInfo)
% kCacheLineSize == 0)-1]
;
607
608 RegionInfo *GetRegionInfo(uptr class_id) const {
609 DCHECK_LT(class_id, kNumClasses);
610 RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
611 return &regions[class_id];
612 }
613
614 uptr GetMetadataEnd(uptr region_beg) const {
615 return region_beg + kRegionSize - kFreeArraySize;
616 }
617
618 uptr GetChunkIdx(uptr chunk, uptr size) const {
619 if (!kUsingConstantSpaceBeg)
620 chunk -= SpaceBeg();
621
622 uptr offset = chunk % kRegionSize;
623 // Here we divide by a non-constant. This is costly.
624 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
625 if (offset >> (SANITIZER_WORDSIZE64 / 2))
626 return offset / size;
627 return (u32)offset / (u32)size;
628 }
629
630 CompactPtrT *GetFreeArray(uptr region_beg) const {
631 return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
632 }
633
634 bool MapWithCallback(uptr beg, uptr size) {
635 uptr mapped = address_range.Map(beg, size);
636 if (UNLIKELY(!mapped)__builtin_expect(!!(!mapped), 0))
637 return false;
638 CHECK_EQ(beg, mapped)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((mapped)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 638, "(" "(beg)" ") " "==" " (" "(mapped)" ")", v1, v2); } while
(false)
;
639 MapUnmapCallback().OnMap(beg, size);
640 return true;
641 }
642
643 void MapWithCallbackOrDie(uptr beg, uptr size) {
644 CHECK_EQ(beg, address_range.MapOrDie(beg, size))do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer
::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie(beg, size
))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 644, "(" "(beg)" ") " "==" " (" "(address_range.MapOrDie(beg, size))"
")", v1, v2); } while (false)
;
645 MapUnmapCallback().OnMap(beg, size);
646 }
647
648 void UnmapWithCallbackOrDie(uptr beg, uptr size) {
649 MapUnmapCallback().OnUnmap(beg, size);
650 address_range.Unmap(beg, size);
651 }
652
653 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
654 uptr num_freed_chunks) {
655 uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
656 if (region->mapped_free_array < needed_space) {
657 uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
658 CHECK_LE(new_mapped_free_array, kFreeArraySize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_mapped_free_array
)); __sanitizer::u64 v2 = (__sanitizer::u64)((kFreeArraySize)
); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 658, "(" "(new_mapped_free_array)" ") " "<=" " (" "(kFreeArraySize)"
")", v1, v2); } while (false)
;
659 uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
660 region->mapped_free_array;
661 uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
662 if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size))__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size
)), 0)
)
663 return false;
664 region->mapped_free_array = new_mapped_free_array;
665 }
666 return true;
667 }
668
669 // Check whether this size class is exhausted.
670 bool IsRegionExhausted(RegionInfo *region, uptr class_id,
671 uptr additional_map_size) {
672 if (LIKELY(region->mapped_user + region->mapped_meta +__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
673 additional_map_size <= kRegionSize - kFreeArraySize)__builtin_expect(!!(region->mapped_user + region->mapped_meta
+ additional_map_size <= kRegionSize - kFreeArraySize), 1
)
)
674 return false;
675 if (!region->exhausted) {
676 region->exhausted = true;
677 Printf("%s: Out of memory. ", SanitizerToolName);
678 Printf("The process has exhausted %zuMB for size class %zu.\n",
679 kRegionSize >> 20, ClassIdToSize(class_id));
680 }
681 return true;
682 }
683
684 NOINLINE__attribute__((noinline)) bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
685 RegionInfo *region, uptr requested_count) {
686 // region->mutex is held.
687 const uptr region_beg = GetRegionBeginBySizeClass(class_id);
688 const uptr size = ClassIdToSize(class_id);
689
690 const uptr total_user_bytes =
691 region->allocated_user + requested_count * size;
692 // Map more space for chunks, if necessary.
693 if (LIKELY(total_user_bytes > region->mapped_user)__builtin_expect(!!(total_user_bytes > region->mapped_user
), 1)
) {
694 if (UNLIKELY(region->mapped_user == 0)__builtin_expect(!!(region->mapped_user == 0), 0)) {
695 if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
696 // The random state is initialized from ASLR.
697 region->rand_state = static_cast<u32>(region_beg >> 12);
698 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
699 // preventing just allocated memory from being released sooner than
700 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
701 // for short lived processes.
702 // Do it only when the feature is turned on, to avoid a potentially
703 // extraneous syscall.
704 if (ReleaseToOSIntervalMs() >= 0)
705 region->rtoi.last_release_at_ns = MonotonicNanoTime();
706 }
707 // Do the mmap for the user memory.
708 const uptr user_map_size =
709 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
710 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, user_map_size
)), 0)
)
711 return false;
712 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size)), 0)
713 user_map_size))__builtin_expect(!!(!MapWithCallback(region_beg + region->
mapped_user, user_map_size)), 0)
)
714 return false;
715 stat->Add(AllocatorStatMapped, user_map_size);
716 region->mapped_user += user_map_size;
717 }
718 const uptr new_chunks_count =
719 (region->mapped_user - region->allocated_user) / size;
720
721 if (kMetadataSize) {
722 // Calculate the required space for metadata.
723 const uptr total_meta_bytes =
724 region->allocated_meta + new_chunks_count * kMetadataSize;
725 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
726 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
727 // Map more space for metadata, if necessary.
728 if (meta_map_size) {
729 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, meta_map_size
)), 0)
)
730 return false;
731 if (UNLIKELY(!MapWithCallback(__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size)), 0
)
732 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size)), 0
)
733 meta_map_size))__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg
) - region->mapped_meta - meta_map_size, meta_map_size)), 0
)
)
734 return false;
735 region->mapped_meta += meta_map_size;
736 }
737 }
738
739 // If necessary, allocate more space for the free array and populate it with
740 // newly allocated chunks.
741 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
742 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg,
total_freed_chunks)), 0)
)
743 return false;
744 CompactPtrT *free_array = GetFreeArray(region_beg);
745 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
746 i++, chunk += size)
747 free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
748 if (kRandomShuffleChunks)
749 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
750 &region->rand_state);
751
752 // All necessary memory is mapped and now it is safe to advance all
753 // 'allocated_*' counters.
754 region->num_freed_chunks += new_chunks_count;
755 region->allocated_user += new_chunks_count * size;
756 CHECK_LE(region->allocated_user, region->mapped_user)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_user
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_user
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 756, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)"
")", v1, v2); } while (false)
;
757 region->allocated_meta += new_chunks_count * kMetadataSize;
758 CHECK_LE(region->allocated_meta, region->mapped_meta)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_meta
)); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_meta
)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::
CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h"
, 758, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)"
")", v1, v2); } while (false)
;
759 region->exhausted = false;
760
761 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
762 // MaybeReleaseToOS from releasing just allocated pages or protect these
763 // not yet used chunks some other way.
764
765 return true;
766 }
767
768 class MemoryMapper {
769 public:
770 MemoryMapper(const ThisT& base_allocator, uptr class_id)
771 : allocator(base_allocator),
772 region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
773 released_ranges_count(0),
774 released_bytes(0) {
775 }
776
777 uptr GetReleasedRangesCount() const {
778 return released_ranges_count;
779 }
780
781 uptr GetReleasedBytes() const {
782 return released_bytes;
783 }
784
785 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
786 // TODO(alekseyshl): The idea to explore is to check if we have enough
787 // space between num_freed_chunks*sizeof(CompactPtrT) and
788 // mapped_free_array to fit buffer_size bytes and use that space instead
789 // of mapping a temporary one.
790 return reinterpret_cast<uptr>(
791 MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
792 }
793
794 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
795 UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
796 }
797
798 // Releases [from, to) range of pages back to OS.
799 void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
800 const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
801 const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
802 ReleaseMemoryPagesToOS(from_page, to_page);
803 released_ranges_count++;
804 released_bytes += to_page - from_page;
805 }
806
807 private:
808 const ThisT& allocator;
809 const uptr region_base;
810 uptr released_ranges_count;
811 uptr released_bytes;
812 };
813
814 // Attempts to release RAM occupied by freed chunks back to OS. The region is
815 // expected to be locked.
816 void MaybeReleaseToOS(uptr class_id, bool force) {
817 RegionInfo *region = GetRegionInfo(class_id);
818 const uptr chunk_size = ClassIdToSize(class_id);
819 const uptr page_size = GetPageSizeCached();
820
821 uptr n = region->num_freed_chunks;
822 if (n * chunk_size < page_size)
823 return; // No chance to release anything.
824 if ((region->stats.n_freed -
825 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
826 return; // Nothing new to release.
827 }
828
829 if (!force) {
830 s32 interval_ms = ReleaseToOSIntervalMs();
831 if (interval_ms < 0)
832 return;
833
834 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
835 MonotonicNanoTime()) {
836 return; // Memory was returned recently.
837 }
838 }
839
840 MemoryMapper memory_mapper(*this, class_id);
841
842 ReleaseFreeMemoryToOS<MemoryMapper>(
843 GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
844 RoundUpTo(region->allocated_user, page_size) / page_size,
845 &memory_mapper);
846
847 if (memory_mapper.GetReleasedRangesCount() > 0) {
848 region->rtoi.n_freed_at_last_release = region->stats.n_freed;
849 region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
850 region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
851 }
852 region->rtoi.last_release_at_ns = MonotonicNanoTime();
853 }
854};

/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h

1//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// SizeClassMap maps allocation sizes into size classes and back.
18// Class 0 always corresponds to size 0.
19// The other sizes are controlled by the template parameters:
20// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
21// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
22// kMidSizeLog: the classes starting from 1 increase with step
23// 2^kMinSizeLog until 2^kMidSizeLog.
24// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
25// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
26// look like 0b1xx0..0, where x is either 0 or 1.
27//
28// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
29//
30// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
31// Next 4 classes: 256 + i * 64 (i = 1 to 4).
32// Next 4 classes: 512 + i * 128 (i = 1 to 4).
33// ...
34// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
35// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
36//
37// This structure of the size class map gives us:
38// - Efficient table-free class-to-size and size-to-class functions.
39// - Difference between two consequent size classes is between 14% and 25%
40//
41// This class also gives a hint to a thread-caching allocator about the amount
42// of chunks that need to be cached per-thread:
43// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
44// The actual number is computed in TransferBatch.
45// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46//
47// Part of output of SizeClassMap::Print():
48// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
56//
57// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
65//
66// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
70//
71// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
75//
76// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80//
81// ...
82//
83// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87//
88// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
89//
90//
91// Another example (kNumBits=2):
92// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
93// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
94// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
95// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
96// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
97// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
98// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
99// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
100// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
101// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
102// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
103// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
104// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
105// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
106// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
107// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
108// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
109// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
110// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
111// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
112// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
113// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
114// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
115// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
116// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
117// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
118// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
119
120template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
121 uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
122class SizeClassMap {
123 static const uptr kMinSize = 1 << kMinSizeLog;
124 static const uptr kMidSize = 1 << kMidSizeLog;
125 static const uptr kMidClass = kMidSize / kMinSize;
126 static const uptr S = kNumBits - 1;
127 static const uptr M = (1 << S) - 1;
128
129 public:
130 // kMaxNumCachedHintT is a power of two. It serves as a hint
131 // for the size of TransferBatch, the actual size could be a bit smaller.
132 static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
133 COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)typedef char assertion_failed__133[2*(int)((kMaxNumCachedHint
& (kMaxNumCachedHint - 1)) == 0)-1]
;
134
135 static const uptr kMaxSize = 1UL << kMaxSizeLog;
136 static const uptr kNumClasses =
137 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
138 static const uptr kLargestClassID = kNumClasses - 2;
139 static const uptr kBatchClassID = kNumClasses - 1;
140 COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)typedef char assertion_failed__140[2*(int)(kNumClasses >= 16
&& kNumClasses <= 256)-1]
;
141 static const uptr kNumClassesRounded =
142 kNumClasses <= 32 ? 32 :
143 kNumClasses <= 64 ? 64 :
144 kNumClasses <= 128 ? 128 : 256;
145
146 static uptr Size(uptr class_id) {
147 // Estimate the result for kBatchClassID because this class does not know
148 // the exact size of TransferBatch. It's OK since we are using the actual
149 // sizeof(TransferBatch) where it matters.
150 if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0))
20
Taking false branch
151 return kMaxNumCachedHint * sizeof(uptr);
152 if (class_id <= kMidClass)
21
Assuming 'class_id' is > 'kMidClass'
22
Taking false branch
153 return kMinSize * class_id;
154 class_id -= kMidClass;
155 uptr t = kMidSize << (class_id >> S);
23
The result of the left shift is undefined due to shifting '256' by '4611686018427387900', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr'
156 return t + (t >> S) * (class_id & M);
157 }
158
159 static uptr ClassID(uptr size) {
160 if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0))
161 return 0;
162 if (size <= kMidSize)
163 return (size + kMinSize - 1) >> kMinSizeLog;
164 const uptr l = MostSignificantSetBitIndex(size);
165 const uptr hbits = (size >> (l - S)) & M;
166 const uptr lbits = size & ((1U << (l - S)) - 1);
167 const uptr l1 = l - kMidSizeLog;
168 return kMidClass + (l1 << S) + hbits + (lbits > 0);
169 }
170
171 static uptr MaxCachedHint(uptr size) {
172 DCHECK_LE(size, kMaxSize);
173 if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0))
174 return 0;
175 uptr n;
176 // Force a 32-bit division if the template parameters allow for it.
177 if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
178 n = (1UL << kMaxBytesCachedLog) / size;
179 else
180 n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
181 return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
182 }
183
184 static void Print() {
185 uptr prev_s = 0;
186 uptr total_cached = 0;
187 for (uptr i = 0; i < kNumClasses; i++) {
188 uptr s = Size(i);
189 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
190 Printf("\n");
191 uptr d = s - prev_s;
192 uptr p = prev_s ? (d * 100 / prev_s) : 0;
193 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
194 uptr cached = MaxCachedHint(s) * s;
195 if (i == kBatchClassID)
196 d = p = l = 0;
197 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
198 "cached: %zd %zd; id %zd\n",
199 i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
200 total_cached += cached;
201 prev_s = s;
202 }
203 Printf("Total cached: %zd\n", total_cached);
204 }
205
206 static void Validate() {
207 for (uptr c = 1; c < kNumClasses; c++) {
208 // Printf("Validate: c%zd\n", c);
209 uptr s = Size(c);
210 CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!(
!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 210, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (
false)
;
211 if (c == kBatchClassID)
212 continue;
213 CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer
::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 213, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); }
while (false)
;
214 if (c < kLargestClassID)
215 CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 215, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1
, v2); } while (false)
;
216 CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 216, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2
); } while (false)
;
217 CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 217, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1
, v2); } while (false)
;
218 }
219 CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize
+ 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 219, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")"
, v1, v2); } while (false)
;
220
221 for (uptr s = 1; s <= kMaxSize; s++) {
222 uptr c = ClassID(s);
223 // Printf("s%zd => c%zd\n", s, c);
224 CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 224, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2
); } while (false)
;
225 CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(!
(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 225, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); }
while (false)
;
226 if (c > 0)
227 CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1)));
__sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn345461/projects/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 227, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2
); } while (false)
;
228 }
229 }
230};
231
232typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
233typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
234typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
235
236// The following SizeClassMap only holds a way small number of cached entries,
237// allowing for denser per-class arrays, smaller memory footprint and usually
238// better performances in threaded environments.
239typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;