File: | compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h |
Warning: | line 154, column 23 The result of the left shift is undefined due to shifting '256' by '4611686018427387900', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | /// | ||||||
9 | /// Scudo Hardened Allocator implementation. | ||||||
10 | /// It uses the sanitizer_common allocator as a base and aims at mitigating | ||||||
11 | /// heap corruption vulnerabilities. It provides a checksum-guarded chunk | ||||||
12 | /// header, a delayed free list, and additional sanity checks. | ||||||
13 | /// | ||||||
14 | //===----------------------------------------------------------------------===// | ||||||
15 | |||||||
16 | #include "scudo_allocator.h" | ||||||
17 | #include "scudo_crc32.h" | ||||||
18 | #include "scudo_errors.h" | ||||||
19 | #include "scudo_flags.h" | ||||||
20 | #include "scudo_interface_internal.h" | ||||||
21 | #include "scudo_tsd.h" | ||||||
22 | #include "scudo_utils.h" | ||||||
23 | |||||||
24 | #include "sanitizer_common/sanitizer_allocator_checks.h" | ||||||
25 | #include "sanitizer_common/sanitizer_allocator_interface.h" | ||||||
26 | #include "sanitizer_common/sanitizer_quarantine.h" | ||||||
27 | |||||||
28 | #ifdef GWP_ASAN_HOOKS1 | ||||||
29 | # include "gwp_asan/guarded_pool_allocator.h" | ||||||
30 | # include "gwp_asan/optional/backtrace.h" | ||||||
31 | # include "gwp_asan/optional/options_parser.h" | ||||||
32 | #include "gwp_asan/optional/segv_handler.h" | ||||||
33 | #endif // GWP_ASAN_HOOKS | ||||||
34 | |||||||
35 | #include <errno(*__errno_location ()).h> | ||||||
36 | #include <string.h> | ||||||
37 | |||||||
38 | namespace __scudo { | ||||||
39 | |||||||
40 | // Global static cookie, initialized at start-up. | ||||||
41 | static u32 Cookie; | ||||||
42 | |||||||
43 | // We default to software CRC32 if the alternatives are not supported, either | ||||||
44 | // at compilation or at runtime. | ||||||
45 | static atomic_uint8_t HashAlgorithm = { CRC32Software }; | ||||||
46 | |||||||
47 | inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { | ||||||
48 | // If the hardware CRC32 feature is defined here, it was enabled everywhere, | ||||||
49 | // as opposed to only for scudo_crc32.cpp. This means that other hardware | ||||||
50 | // specific instructions were likely emitted at other places, and as a | ||||||
51 | // result there is no reason to not use it here. | ||||||
52 | #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) | ||||||
53 | Crc = CRC32_INTRINSIC(Crc, Value); | ||||||
54 | for (uptr i = 0; i < ArraySize; i++) | ||||||
55 | Crc = CRC32_INTRINSIC(Crc, Array[i]); | ||||||
56 | return Crc; | ||||||
57 | #else | ||||||
58 | if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) { | ||||||
59 | Crc = computeHardwareCRC32(Crc, Value); | ||||||
60 | for (uptr i = 0; i < ArraySize; i++) | ||||||
61 | Crc = computeHardwareCRC32(Crc, Array[i]); | ||||||
62 | return Crc; | ||||||
63 | } | ||||||
64 | Crc = computeSoftwareCRC32(Crc, Value); | ||||||
65 | for (uptr i = 0; i < ArraySize; i++) | ||||||
66 | Crc = computeSoftwareCRC32(Crc, Array[i]); | ||||||
67 | return Crc; | ||||||
68 | #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) | ||||||
69 | } | ||||||
70 | |||||||
71 | static BackendT &getBackend(); | ||||||
72 | |||||||
73 | namespace Chunk { | ||||||
74 | static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) { | ||||||
75 | return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) - | ||||||
76 | getHeaderSize()); | ||||||
77 | } | ||||||
78 | static inline | ||||||
79 | const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { | ||||||
80 | return reinterpret_cast<const AtomicPackedHeader *>( | ||||||
81 | reinterpret_cast<uptr>(Ptr) - getHeaderSize()); | ||||||
82 | } | ||||||
83 | |||||||
84 | static inline bool isAligned(const void *Ptr) { | ||||||
85 | return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment); | ||||||
86 | } | ||||||
87 | |||||||
88 | // We can't use the offset member of the chunk itself, as we would double | ||||||
89 | // fetch it without any warranty that it wouldn't have been tampered. To | ||||||
90 | // prevent this, we work with a local copy of the header. | ||||||
91 | static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { | ||||||
92 | return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - | ||||||
93 | getHeaderSize() - (Header->Offset << MinAlignmentLog)); | ||||||
94 | } | ||||||
95 | |||||||
96 | // Returns the usable size for a chunk, meaning the amount of bytes from the | ||||||
97 | // beginning of the user data to the end of the backend allocated chunk. | ||||||
98 | static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { | ||||||
99 | const uptr ClassId = Header->ClassId; | ||||||
100 | if (ClassId) | ||||||
101 | return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - | ||||||
102 | (Header->Offset << MinAlignmentLog); | ||||||
103 | return SecondaryT::GetActuallyAllocatedSize( | ||||||
104 | getBackendPtr(Ptr, Header)) - getHeaderSize(); | ||||||
105 | } | ||||||
106 | |||||||
107 | // Returns the size the user requested when allocating the chunk. | ||||||
108 | static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) { | ||||||
109 | const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; | ||||||
110 | if (Header->ClassId) | ||||||
111 | return SizeOrUnusedBytes; | ||||||
112 | return SecondaryT::GetActuallyAllocatedSize( | ||||||
113 | getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; | ||||||
114 | } | ||||||
115 | |||||||
116 | // Compute the checksum of the chunk pointer and its header. | ||||||
117 | static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) { | ||||||
118 | UnpackedHeader ZeroChecksumHeader = *Header; | ||||||
119 | ZeroChecksumHeader.Checksum = 0; | ||||||
120 | uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; | ||||||
121 | memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); | ||||||
122 | const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr), | ||||||
123 | HeaderHolder, ARRAY_SIZE(HeaderHolder)(sizeof(HeaderHolder)/sizeof((HeaderHolder)[0]))); | ||||||
124 | return static_cast<u16>(Crc); | ||||||
125 | } | ||||||
126 | |||||||
127 | // Checks the validity of a chunk by verifying its checksum. It doesn't | ||||||
128 | // incur termination in the event of an invalid chunk. | ||||||
129 | static inline bool isValid(const void *Ptr) { | ||||||
130 | PackedHeader NewPackedHeader = | ||||||
131 | atomic_load_relaxed(getConstAtomicHeader(Ptr)); | ||||||
132 | UnpackedHeader NewUnpackedHeader = | ||||||
133 | bit_cast<UnpackedHeader>(NewPackedHeader); | ||||||
134 | return (NewUnpackedHeader.Checksum == | ||||||
135 | computeChecksum(Ptr, &NewUnpackedHeader)); | ||||||
136 | } | ||||||
137 | |||||||
138 | // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid | ||||||
139 | // for a fully nulled out header, its state will be available anyway. | ||||||
140 | COMPILER_CHECK(ChunkAvailable == 0)static_assert(ChunkAvailable == 0, ""); | ||||||
141 | |||||||
142 | // Loads and unpacks the header, verifying the checksum in the process. | ||||||
143 | static inline | ||||||
144 | void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) { | ||||||
145 | PackedHeader NewPackedHeader = | ||||||
146 | atomic_load_relaxed(getConstAtomicHeader(Ptr)); | ||||||
147 | *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader); | ||||||
148 | if (UNLIKELY(NewUnpackedHeader->Checksum !=__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum (Ptr, NewUnpackedHeader)), 0) | ||||||
149 | computeChecksum(Ptr, NewUnpackedHeader))__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum (Ptr, NewUnpackedHeader)), 0)) | ||||||
150 | dieWithMessage("corrupted chunk header at address %p\n", Ptr); | ||||||
151 | } | ||||||
152 | |||||||
153 | // Packs and stores the header, computing the checksum in the process. | ||||||
154 | static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) { | ||||||
155 | NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); | ||||||
156 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); | ||||||
157 | atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader); | ||||||
158 | } | ||||||
159 | |||||||
160 | // Packs and stores the header, computing the checksum in the process. We | ||||||
161 | // compare the current header with the expected provided one to ensure that | ||||||
162 | // we are not being raced by a corruption occurring in another thread. | ||||||
163 | static inline void compareExchangeHeader(void *Ptr, | ||||||
164 | UnpackedHeader *NewUnpackedHeader, | ||||||
165 | UnpackedHeader *OldUnpackedHeader) { | ||||||
166 | NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); | ||||||
167 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); | ||||||
168 | PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader); | ||||||
169 | if (UNLIKELY(!atomic_compare_exchange_strong(__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0) | ||||||
170 | getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0) | ||||||
171 | memory_order_relaxed))__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0)) | ||||||
172 | dieWithMessage("race on chunk header at address %p\n", Ptr); | ||||||
173 | } | ||||||
174 | } // namespace Chunk | ||||||
175 | |||||||
176 | struct QuarantineCallback { | ||||||
177 | explicit QuarantineCallback(AllocatorCacheT *Cache) | ||||||
178 | : Cache_(Cache) {} | ||||||
179 | |||||||
180 | // Chunk recycling function, returns a quarantined chunk to the backend, | ||||||
181 | // first making sure it hasn't been tampered with. | ||||||
182 | void Recycle(void *Ptr) { | ||||||
183 | UnpackedHeader Header; | ||||||
184 | Chunk::loadHeader(Ptr, &Header); | ||||||
185 | if (UNLIKELY(Header.State != ChunkQuarantine)__builtin_expect(!!(Header.State != ChunkQuarantine), 0)) | ||||||
186 | dieWithMessage("invalid chunk state when recycling address %p\n", Ptr); | ||||||
187 | UnpackedHeader NewHeader = Header; | ||||||
188 | NewHeader.State = ChunkAvailable; | ||||||
189 | Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header); | ||||||
190 | void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); | ||||||
191 | if (Header.ClassId) | ||||||
192 | getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId); | ||||||
193 | else | ||||||
194 | getBackend().deallocateSecondary(BackendPtr); | ||||||
195 | } | ||||||
196 | |||||||
197 | // Internal quarantine allocation and deallocation functions. We first check | ||||||
198 | // that the batches are indeed serviced by the Primary. | ||||||
199 | // TODO(kostyak): figure out the best way to protect the batches. | ||||||
200 | void *Allocate(uptr Size) { | ||||||
201 | const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); | ||||||
202 | return getBackend().allocatePrimary(Cache_, BatchClassId); | ||||||
203 | } | ||||||
204 | |||||||
205 | void Deallocate(void *Ptr) { | ||||||
206 | const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); | ||||||
207 | getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId); | ||||||
208 | } | ||||||
209 | |||||||
210 | AllocatorCacheT *Cache_; | ||||||
211 | COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize)static_assert(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize , ""); | ||||||
212 | }; | ||||||
213 | |||||||
214 | typedef Quarantine<QuarantineCallback, void> QuarantineT; | ||||||
215 | typedef QuarantineT::Cache QuarantineCacheT; | ||||||
216 | COMPILER_CHECK(sizeof(QuarantineCacheT) <=static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD:: QuarantineCachePlaceHolder), "") | ||||||
217 | sizeof(ScudoTSD::QuarantineCachePlaceHolder))static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD:: QuarantineCachePlaceHolder), ""); | ||||||
218 | |||||||
219 | QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) { | ||||||
220 | return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder); | ||||||
221 | } | ||||||
222 | |||||||
223 | #ifdef GWP_ASAN_HOOKS1 | ||||||
224 | static gwp_asan::GuardedPoolAllocator GuardedAlloc; | ||||||
225 | #endif // GWP_ASAN_HOOKS | ||||||
226 | |||||||
227 | struct Allocator { | ||||||
228 | static const uptr MaxAllowedMallocSize = | ||||||
229 | FIRST_32_SECOND_64(2UL << 30, 1ULL << 40)(1ULL << 40); | ||||||
230 | |||||||
231 | BackendT Backend; | ||||||
232 | QuarantineT Quarantine; | ||||||
233 | |||||||
234 | u32 QuarantineChunksUpToSize; | ||||||
235 | |||||||
236 | bool DeallocationTypeMismatch; | ||||||
237 | bool ZeroContents; | ||||||
238 | bool DeleteSizeMismatch; | ||||||
239 | |||||||
240 | bool CheckRssLimit; | ||||||
241 | uptr HardRssLimitMb; | ||||||
242 | uptr SoftRssLimitMb; | ||||||
243 | atomic_uint8_t RssLimitExceeded; | ||||||
244 | atomic_uint64_t RssLastCheckedAtNS; | ||||||
245 | |||||||
246 | explicit Allocator(LinkerInitialized) | ||||||
247 | : Quarantine(LINKER_INITIALIZED) {} | ||||||
248 | |||||||
249 | NOINLINE__attribute__((noinline)) void performSanityChecks(); | ||||||
250 | |||||||
251 | void init() { | ||||||
252 | SanitizerToolName = "Scudo"; | ||||||
253 | PrimaryAllocatorName = "ScudoPrimary"; | ||||||
254 | SecondaryAllocatorName = "ScudoSecondary"; | ||||||
255 | |||||||
256 | initFlags(); | ||||||
257 | |||||||
258 | performSanityChecks(); | ||||||
259 | |||||||
260 | // Check if hardware CRC32 is supported in the binary and by the platform, | ||||||
261 | // if so, opt for the CRC32 hardware version of the checksum. | ||||||
262 | if (&computeHardwareCRC32 && hasHardwareCRC32()) | ||||||
263 | atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); | ||||||
264 | |||||||
265 | SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); | ||||||
266 | Backend.init(common_flags()->allocator_release_to_os_interval_ms); | ||||||
267 | HardRssLimitMb = common_flags()->hard_rss_limit_mb; | ||||||
268 | SoftRssLimitMb = common_flags()->soft_rss_limit_mb; | ||||||
269 | Quarantine.Init( | ||||||
270 | static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10, | ||||||
271 | static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10); | ||||||
272 | QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 : | ||||||
273 | getFlags()->QuarantineChunksUpToSize; | ||||||
274 | DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch; | ||||||
275 | DeleteSizeMismatch = getFlags()->DeleteSizeMismatch; | ||||||
276 | ZeroContents = getFlags()->ZeroContents; | ||||||
277 | |||||||
278 | if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),__builtin_expect(!!(!GetRandom(reinterpret_cast<void *> (&Cookie), sizeof(Cookie), false)), 0) | ||||||
279 | /*blocking=*/false))__builtin_expect(!!(!GetRandom(reinterpret_cast<void *> (&Cookie), sizeof(Cookie), false)), 0)) { | ||||||
280 | Cookie = static_cast<u32>((NanoTime() >> 12) ^ | ||||||
281 | (reinterpret_cast<uptr>(this) >> 4)); | ||||||
282 | } | ||||||
283 | |||||||
284 | CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; | ||||||
285 | if (CheckRssLimit) | ||||||
286 | atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime()); | ||||||
287 | } | ||||||
288 | |||||||
289 | // Helper function that checks for a valid Scudo chunk. nullptr isn't. | ||||||
290 | bool isValidPointer(const void *Ptr) { | ||||||
291 | initThreadMaybe(); | ||||||
292 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
293 | return false; | ||||||
294 | if (!Chunk::isAligned(Ptr)) | ||||||
295 | return false; | ||||||
296 | return Chunk::isValid(Ptr); | ||||||
297 | } | ||||||
298 | |||||||
299 | NOINLINE__attribute__((noinline)) bool isRssLimitExceeded(); | ||||||
300 | |||||||
301 | // Allocates a chunk. | ||||||
302 | void *allocate(uptr Size, uptr Alignment, AllocType Type, | ||||||
303 | bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { | ||||||
304 | initThreadMaybe(); | ||||||
305 | |||||||
306 | if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) { | ||||||
307 | if (AllocatorMayReturnNull()) | ||||||
308 | return nullptr; | ||||||
309 | reportAllocationAlignmentTooBig(Alignment, MaxAlignment); | ||||||
310 | } | ||||||
311 | if (UNLIKELY(Alignment < MinAlignment)__builtin_expect(!!(Alignment < MinAlignment), 0)) | ||||||
312 | Alignment = MinAlignment; | ||||||
313 | |||||||
314 | #ifdef GWP_ASAN_HOOKS1 | ||||||
315 | if (UNLIKELY(GuardedAlloc.shouldSample())__builtin_expect(!!(GuardedAlloc.shouldSample()), 0)) { | ||||||
316 | if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) { | ||||||
317 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook) | ||||||
318 | __sanitizer_malloc_hook(Ptr, Size); | ||||||
319 | return Ptr; | ||||||
320 | } | ||||||
321 | } | ||||||
322 | #endif // GWP_ASAN_HOOKS | ||||||
323 | |||||||
324 | const uptr NeededSize = RoundUpTo(Size
| ||||||
325 | Chunk::getHeaderSize(); | ||||||
326 | const uptr AlignedSize = (Alignment > MinAlignment) ? | ||||||
327 | NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize; | ||||||
328 | if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0) || | ||||||
329 | UNLIKELY(AlignedSize >= MaxAllowedMallocSize)__builtin_expect(!!(AlignedSize >= MaxAllowedMallocSize), 0 )) { | ||||||
330 | if (AllocatorMayReturnNull()) | ||||||
331 | return nullptr; | ||||||
332 | reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize); | ||||||
333 | } | ||||||
334 | |||||||
335 | if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())__builtin_expect(!!(isRssLimitExceeded()), 0)) { | ||||||
336 | if (AllocatorMayReturnNull()) | ||||||
337 | return nullptr; | ||||||
338 | reportRssLimitExceeded(); | ||||||
339 | } | ||||||
340 | |||||||
341 | // Primary and Secondary backed allocations have a different treatment. We | ||||||
342 | // deal with alignment requirements of Primary serviced allocations here, | ||||||
343 | // but the Secondary will take care of its own alignment needs. | ||||||
344 | void *BackendPtr; | ||||||
345 | uptr BackendSize; | ||||||
346 | u8 ClassId; | ||||||
347 | if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) { | ||||||
348 | BackendSize = AlignedSize; | ||||||
349 | ClassId = SizeClassMap::ClassID(BackendSize); | ||||||
350 | bool UnlockRequired; | ||||||
351 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
352 | BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId); | ||||||
353 | if (UnlockRequired
| ||||||
354 | TSD->unlock(); | ||||||
355 | } else { | ||||||
356 | BackendSize = NeededSize; | ||||||
357 | ClassId = 0; | ||||||
358 | BackendPtr = Backend.allocateSecondary(BackendSize, Alignment); | ||||||
359 | } | ||||||
360 | if (UNLIKELY(!BackendPtr)__builtin_expect(!!(!BackendPtr), 0)) { | ||||||
361 | SetAllocatorOutOfMemory(); | ||||||
362 | if (AllocatorMayReturnNull()) | ||||||
363 | return nullptr; | ||||||
364 | reportOutOfMemory(Size); | ||||||
365 | } | ||||||
366 | |||||||
367 | // If requested, we will zero out the entire contents of the returned chunk. | ||||||
368 | if ((ForceZeroContents
| ||||||
369 | memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId)); | ||||||
370 | |||||||
371 | UnpackedHeader Header = {}; | ||||||
372 | uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize(); | ||||||
373 | if (UNLIKELY(!IsAligned(UserPtr, Alignment))__builtin_expect(!!(!IsAligned(UserPtr, Alignment)), 0)) { | ||||||
374 | // Since the Secondary takes care of alignment, a non-aligned pointer | ||||||
375 | // means it is from the Primary. It is also the only case where the offset | ||||||
376 | // field of the header would be non-zero. | ||||||
377 | DCHECK(ClassId); | ||||||
378 | const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment); | ||||||
379 | Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog; | ||||||
380 | UserPtr = AlignedUserPtr; | ||||||
381 | } | ||||||
382 | DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize); | ||||||
383 | Header.State = ChunkAllocated; | ||||||
384 | Header.AllocType = Type; | ||||||
385 | if (ClassId) { | ||||||
386 | Header.ClassId = ClassId; | ||||||
387 | Header.SizeOrUnusedBytes = Size; | ||||||
388 | } else { | ||||||
389 | // The secondary fits the allocations to a page, so the amount of unused | ||||||
390 | // bytes is the difference between the end of the user allocation and the | ||||||
391 | // next page boundary. | ||||||
392 | const uptr PageSize = GetPageSizeCached(); | ||||||
393 | const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1); | ||||||
394 | if (TrailingBytes) | ||||||
395 | Header.SizeOrUnusedBytes = PageSize - TrailingBytes; | ||||||
396 | } | ||||||
397 | void *Ptr = reinterpret_cast<void *>(UserPtr); | ||||||
398 | Chunk::storeHeader(Ptr, &Header); | ||||||
399 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook) | ||||||
400 | __sanitizer_malloc_hook(Ptr, Size); | ||||||
401 | return Ptr; | ||||||
402 | } | ||||||
403 | |||||||
404 | // Place a chunk in the quarantine or directly deallocate it in the event of | ||||||
405 | // a zero-sized quarantine, or if the size of the chunk is greater than the | ||||||
406 | // quarantine chunk size threshold. | ||||||
407 | void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, | ||||||
408 | uptr Size) NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { | ||||||
409 | const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize); | ||||||
410 | if (BypassQuarantine) { | ||||||
411 | UnpackedHeader NewHeader = *Header; | ||||||
412 | NewHeader.State = ChunkAvailable; | ||||||
413 | Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); | ||||||
414 | void *BackendPtr = Chunk::getBackendPtr(Ptr, Header); | ||||||
415 | if (Header->ClassId) { | ||||||
416 | bool UnlockRequired; | ||||||
417 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
418 | getBackend().deallocatePrimary(&TSD->Cache, BackendPtr, | ||||||
419 | Header->ClassId); | ||||||
420 | if (UnlockRequired) | ||||||
421 | TSD->unlock(); | ||||||
422 | } else { | ||||||
423 | getBackend().deallocateSecondary(BackendPtr); | ||||||
424 | } | ||||||
425 | } else { | ||||||
426 | // If a small memory amount was allocated with a larger alignment, we want | ||||||
427 | // to take that into account. Otherwise the Quarantine would be filled | ||||||
428 | // with tiny chunks, taking a lot of VA memory. This is an approximation | ||||||
429 | // of the usable size, that allows us to not call | ||||||
430 | // GetActuallyAllocatedSize. | ||||||
431 | const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog); | ||||||
432 | UnpackedHeader NewHeader = *Header; | ||||||
433 | NewHeader.State = ChunkQuarantine; | ||||||
434 | Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); | ||||||
435 | bool UnlockRequired; | ||||||
436 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
437 | Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache), | ||||||
438 | Ptr, EstimatedSize); | ||||||
439 | if (UnlockRequired) | ||||||
440 | TSD->unlock(); | ||||||
441 | } | ||||||
442 | } | ||||||
443 | |||||||
444 | // Deallocates a Chunk, which means either adding it to the quarantine or | ||||||
445 | // directly returning it to the backend if criteria are met. | ||||||
446 | void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment, | ||||||
447 | AllocType Type) { | ||||||
448 | // For a deallocation, we only ensure minimal initialization, meaning thread | ||||||
449 | // local data will be left uninitialized for now (when using ELF TLS). The | ||||||
450 | // fallback cache will be used instead. This is a workaround for a situation | ||||||
451 | // where the only heap operation performed in a thread would be a free past | ||||||
452 | // the TLS destructors, ending up in initialized thread specific data never | ||||||
453 | // being destroyed properly. Any other heap operation will do a full init. | ||||||
454 | initThreadMaybe(/*MinimalInit=*/true); | ||||||
455 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_free_hook) | ||||||
456 | __sanitizer_free_hook(Ptr); | ||||||
457 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
458 | return; | ||||||
459 | |||||||
460 | #ifdef GWP_ASAN_HOOKS1 | ||||||
461 | if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) { | ||||||
462 | GuardedAlloc.deallocate(Ptr); | ||||||
463 | return; | ||||||
464 | } | ||||||
465 | #endif // GWP_ASAN_HOOKS | ||||||
466 | |||||||
467 | if (UNLIKELY(!Chunk::isAligned(Ptr))__builtin_expect(!!(!Chunk::isAligned(Ptr)), 0)) | ||||||
468 | dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr); | ||||||
469 | UnpackedHeader Header; | ||||||
470 | Chunk::loadHeader(Ptr, &Header); | ||||||
471 | if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0)) | ||||||
472 | dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr); | ||||||
473 | if (DeallocationTypeMismatch) { | ||||||
474 | // The deallocation type has to match the allocation one. | ||||||
475 | if (Header.AllocType != Type) { | ||||||
476 | // With the exception of memalign'd Chunks, that can be still be free'd. | ||||||
477 | if (Header.AllocType != FromMemalign || Type != FromMalloc) | ||||||
478 | dieWithMessage("allocation type mismatch when deallocating address " | ||||||
479 | "%p\n", Ptr); | ||||||
480 | } | ||||||
481 | } | ||||||
482 | const uptr Size = Chunk::getSize(Ptr, &Header); | ||||||
483 | if (DeleteSizeMismatch) { | ||||||
484 | if (DeleteSize && DeleteSize != Size) | ||||||
485 | dieWithMessage("invalid sized delete when deallocating address %p\n", | ||||||
486 | Ptr); | ||||||
487 | } | ||||||
488 | (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches. | ||||||
489 | quarantineOrDeallocateChunk(Ptr, &Header, Size); | ||||||
490 | } | ||||||
491 | |||||||
492 | // Reallocates a chunk. We can save on a new allocation if the new requested | ||||||
493 | // size still fits in the chunk. | ||||||
494 | void *reallocate(void *OldPtr, uptr NewSize) { | ||||||
495 | initThreadMaybe(); | ||||||
496 | |||||||
497 | #ifdef GWP_ASAN_HOOKS1 | ||||||
498 | if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(OldPtr)), 0)) { | ||||||
499 | size_t OldSize = GuardedAlloc.getSize(OldPtr); | ||||||
500 | void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); | ||||||
501 | if (NewPtr) | ||||||
502 | memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize); | ||||||
503 | GuardedAlloc.deallocate(OldPtr); | ||||||
504 | return NewPtr; | ||||||
505 | } | ||||||
506 | #endif // GWP_ASAN_HOOKS | ||||||
507 | |||||||
508 | if (UNLIKELY(!Chunk::isAligned(OldPtr))__builtin_expect(!!(!Chunk::isAligned(OldPtr)), 0)) | ||||||
509 | dieWithMessage("misaligned address when reallocating address %p\n", | ||||||
510 | OldPtr); | ||||||
511 | UnpackedHeader OldHeader; | ||||||
512 | Chunk::loadHeader(OldPtr, &OldHeader); | ||||||
513 | if (UNLIKELY(OldHeader.State != ChunkAllocated)__builtin_expect(!!(OldHeader.State != ChunkAllocated), 0)) | ||||||
514 | dieWithMessage("invalid chunk state when reallocating address %p\n", | ||||||
515 | OldPtr); | ||||||
516 | if (DeallocationTypeMismatch) { | ||||||
517 | if (UNLIKELY(OldHeader.AllocType != FromMalloc)__builtin_expect(!!(OldHeader.AllocType != FromMalloc), 0)) | ||||||
518 | dieWithMessage("allocation type mismatch when reallocating address " | ||||||
519 | "%p\n", OldPtr); | ||||||
520 | } | ||||||
521 | const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader); | ||||||
522 | // The new size still fits in the current chunk, and the size difference | ||||||
523 | // is reasonable. | ||||||
524 | if (NewSize <= UsableSize && | ||||||
525 | (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { | ||||||
526 | UnpackedHeader NewHeader = OldHeader; | ||||||
527 | NewHeader.SizeOrUnusedBytes = | ||||||
528 | OldHeader.ClassId ? NewSize : UsableSize - NewSize; | ||||||
529 | Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader); | ||||||
530 | return OldPtr; | ||||||
531 | } | ||||||
532 | // Otherwise, we have to allocate a new chunk and copy the contents of the | ||||||
533 | // old one. | ||||||
534 | void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); | ||||||
535 | if (NewPtr) { | ||||||
536 | const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes : | ||||||
537 | UsableSize - OldHeader.SizeOrUnusedBytes; | ||||||
538 | memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize)); | ||||||
539 | quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); | ||||||
540 | } | ||||||
541 | return NewPtr; | ||||||
542 | } | ||||||
543 | |||||||
544 | // Helper function that returns the actual usable size of a chunk. | ||||||
545 | uptr getUsableSize(const void *Ptr) { | ||||||
546 | initThreadMaybe(); | ||||||
547 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
548 | return 0; | ||||||
549 | |||||||
550 | #ifdef GWP_ASAN_HOOKS1 | ||||||
551 | if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) | ||||||
552 | return GuardedAlloc.getSize(Ptr); | ||||||
553 | #endif // GWP_ASAN_HOOKS | ||||||
554 | |||||||
555 | UnpackedHeader Header; | ||||||
556 | Chunk::loadHeader(Ptr, &Header); | ||||||
557 | // Getting the usable size of a chunk only makes sense if it's allocated. | ||||||
558 | if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0)) | ||||||
559 | dieWithMessage("invalid chunk state when sizing address %p\n", Ptr); | ||||||
560 | return Chunk::getUsableSize(Ptr, &Header); | ||||||
561 | } | ||||||
562 | |||||||
563 | void *calloc(uptr NMemB, uptr Size) { | ||||||
564 | initThreadMaybe(); | ||||||
565 | if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))__builtin_expect(!!(CheckForCallocOverflow(NMemB, Size)), 0)) { | ||||||
566 | if (AllocatorMayReturnNull()) | ||||||
567 | return nullptr; | ||||||
568 | reportCallocOverflow(NMemB, Size); | ||||||
569 | } | ||||||
570 | return allocate(NMemB * Size, MinAlignment, FromMalloc, true); | ||||||
571 | } | ||||||
572 | |||||||
573 | void commitBack(ScudoTSD *TSD) { | ||||||
574 | Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache)); | ||||||
575 | Backend.destroyCache(&TSD->Cache); | ||||||
576 | } | ||||||
577 | |||||||
578 | uptr getStats(AllocatorStat StatType) { | ||||||
579 | initThreadMaybe(); | ||||||
580 | uptr stats[AllocatorStatCount]; | ||||||
581 | Backend.getStats(stats); | ||||||
582 | return stats[StatType]; | ||||||
583 | } | ||||||
584 | |||||||
585 | bool canReturnNull() { | ||||||
586 | initThreadMaybe(); | ||||||
587 | return AllocatorMayReturnNull(); | ||||||
588 | } | ||||||
589 | |||||||
590 | void setRssLimit(uptr LimitMb, bool HardLimit) { | ||||||
591 | if (HardLimit) | ||||||
592 | HardRssLimitMb = LimitMb; | ||||||
593 | else | ||||||
594 | SoftRssLimitMb = LimitMb; | ||||||
595 | CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; | ||||||
596 | } | ||||||
597 | |||||||
598 | void printStats() { | ||||||
599 | initThreadMaybe(); | ||||||
600 | Backend.printStats(); | ||||||
601 | } | ||||||
602 | }; | ||||||
603 | |||||||
604 | NOINLINE__attribute__((noinline)) void Allocator::performSanityChecks() { | ||||||
605 | // Verify that the header offset field can hold the maximum offset. In the | ||||||
606 | // case of the Secondary allocator, it takes care of alignment and the | ||||||
607 | // offset will always be 0. In the case of the Primary, the worst case | ||||||
608 | // scenario happens in the last size class, when the backend allocation | ||||||
609 | // would already be aligned on the requested alignment, which would happen | ||||||
610 | // to be the maximum alignment that would fit in that size class. As a | ||||||
611 | // result, the maximum offset will be at most the maximum alignment for the | ||||||
612 | // last size class minus the header size, in multiples of MinAlignment. | ||||||
613 | UnpackedHeader Header = {}; | ||||||
614 | const uptr MaxPrimaryAlignment = | ||||||
615 | 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); | ||||||
616 | const uptr MaxOffset = | ||||||
617 | (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; | ||||||
618 | Header.Offset = MaxOffset; | ||||||
619 | if (Header.Offset != MaxOffset) | ||||||
620 | dieWithMessage("maximum possible offset doesn't fit in header\n"); | ||||||
621 | // Verify that we can fit the maximum size or amount of unused bytes in the | ||||||
622 | // header. Given that the Secondary fits the allocation to a page, the worst | ||||||
623 | // case scenario happens in the Primary. It will depend on the second to | ||||||
624 | // last and last class sizes, as well as the dynamic base for the Primary. | ||||||
625 | // The following is an over-approximation that works for our needs. | ||||||
626 | const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; | ||||||
627 | Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; | ||||||
628 | if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) | ||||||
629 | dieWithMessage("maximum possible unused bytes doesn't fit in header\n"); | ||||||
630 | |||||||
631 | const uptr LargestClassId = SizeClassMap::kLargestClassID; | ||||||
632 | Header.ClassId = LargestClassId; | ||||||
633 | if (Header.ClassId != LargestClassId) | ||||||
634 | dieWithMessage("largest class ID doesn't fit in header\n"); | ||||||
635 | } | ||||||
636 | |||||||
637 | // Opportunistic RSS limit check. This will update the RSS limit status, if | ||||||
638 | // it can, every 250ms, otherwise it will just return the current one. | ||||||
639 | NOINLINE__attribute__((noinline)) bool Allocator::isRssLimitExceeded() { | ||||||
640 | u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); | ||||||
641 | const u64 CurrentCheck = MonotonicNanoTime(); | ||||||
642 | if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL))__builtin_expect(!!(CurrentCheck < LastCheck + (250ULL * 1000000ULL )), 1)) | ||||||
643 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
644 | if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck, | ||||||
645 | CurrentCheck, memory_order_relaxed)) | ||||||
646 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
647 | // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the | ||||||
648 | // RSS from /proc/self/statm by default. We might want to | ||||||
649 | // call getrusage directly, even if it's less accurate. | ||||||
650 | const uptr CurrentRssMb = GetRSS() >> 20; | ||||||
651 | if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)__builtin_expect(!!(HardRssLimitMb < CurrentRssMb), 0)) | ||||||
652 | dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n", | ||||||
653 | HardRssLimitMb, CurrentRssMb); | ||||||
654 | if (SoftRssLimitMb) { | ||||||
655 | if (atomic_load_relaxed(&RssLimitExceeded)) { | ||||||
656 | if (CurrentRssMb <= SoftRssLimitMb) | ||||||
657 | atomic_store_relaxed(&RssLimitExceeded, false); | ||||||
658 | } else { | ||||||
659 | if (CurrentRssMb > SoftRssLimitMb) { | ||||||
660 | atomic_store_relaxed(&RssLimitExceeded, true); | ||||||
661 | Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n", | ||||||
662 | SoftRssLimitMb, CurrentRssMb); | ||||||
663 | } | ||||||
664 | } | ||||||
665 | } | ||||||
666 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
667 | } | ||||||
668 | |||||||
669 | static Allocator Instance(LINKER_INITIALIZED); | ||||||
670 | |||||||
671 | static BackendT &getBackend() { | ||||||
672 | return Instance.Backend; | ||||||
673 | } | ||||||
674 | |||||||
675 | void initScudo() { | ||||||
676 | Instance.init(); | ||||||
677 | #ifdef GWP_ASAN_HOOKS1 | ||||||
678 | gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"), | ||||||
679 | Printf); | ||||||
680 | gwp_asan::options::Options &Opts = gwp_asan::options::getOptions(); | ||||||
681 | Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction(); | ||||||
682 | GuardedAlloc.init(Opts); | ||||||
683 | |||||||
684 | if (Opts.InstallSignalHandlers) | ||||||
685 | gwp_asan::segv_handler::installSignalHandlers( | ||||||
686 | &GuardedAlloc, __sanitizer::Printf, | ||||||
687 | gwp_asan::backtrace::getPrintBacktraceFunction(), | ||||||
688 | gwp_asan::backtrace::getSegvBacktraceFunction()); | ||||||
689 | #endif // GWP_ASAN_HOOKS | ||||||
690 | } | ||||||
691 | |||||||
692 | void ScudoTSD::init() { | ||||||
693 | getBackend().initCache(&Cache); | ||||||
694 | memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); | ||||||
695 | } | ||||||
696 | |||||||
697 | void ScudoTSD::commitBack() { | ||||||
698 | Instance.commitBack(this); | ||||||
699 | } | ||||||
700 | |||||||
701 | void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) { | ||||||
702 | if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))__builtin_expect(!!(!IsPowerOfTwo(Alignment)), 0)) { | ||||||
703 | errno(*__errno_location ()) = EINVAL22; | ||||||
704 | if (Instance.canReturnNull()) | ||||||
705 | return nullptr; | ||||||
706 | reportAllocationAlignmentNotPowerOfTwo(Alignment); | ||||||
707 | } | ||||||
708 | return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type)); | ||||||
709 | } | ||||||
710 | |||||||
711 | void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) { | ||||||
712 | Instance.deallocate(Ptr, Size, Alignment, Type); | ||||||
713 | } | ||||||
714 | |||||||
715 | void *scudoRealloc(void *Ptr, uptr Size) { | ||||||
716 | if (!Ptr) | ||||||
717 | return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); | ||||||
718 | if (Size == 0) { | ||||||
719 | Instance.deallocate(Ptr, 0, 0, FromMalloc); | ||||||
720 | return nullptr; | ||||||
721 | } | ||||||
722 | return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); | ||||||
723 | } | ||||||
724 | |||||||
725 | void *scudoCalloc(uptr NMemB, uptr Size) { | ||||||
726 | return SetErrnoOnNull(Instance.calloc(NMemB, Size)); | ||||||
727 | } | ||||||
728 | |||||||
729 | void *scudoValloc(uptr Size) { | ||||||
730 | return SetErrnoOnNull( | ||||||
731 | Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); | ||||||
732 | } | ||||||
733 | |||||||
734 | void *scudoPvalloc(uptr Size) { | ||||||
735 | const uptr PageSize = GetPageSizeCached(); | ||||||
736 | if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(Size, PageSize)), 0)) { | ||||||
| |||||||
737 | errno(*__errno_location ()) = ENOMEM12; | ||||||
738 | if (Instance.canReturnNull()) | ||||||
739 | return nullptr; | ||||||
740 | reportPvallocOverflow(Size); | ||||||
741 | } | ||||||
742 | // pvalloc(0) should allocate one page. | ||||||
743 | Size = Size ? RoundUpTo(Size, PageSize) : PageSize; | ||||||
744 | return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); | ||||||
745 | } | ||||||
746 | |||||||
747 | int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { | ||||||
748 | if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(Alignment)), 0)) { | ||||||
749 | if (!Instance.canReturnNull()) | ||||||
750 | reportInvalidPosixMemalignAlignment(Alignment); | ||||||
751 | return EINVAL22; | ||||||
752 | } | ||||||
753 | void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); | ||||||
754 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
755 | return ENOMEM12; | ||||||
756 | *MemPtr = Ptr; | ||||||
757 | return 0; | ||||||
758 | } | ||||||
759 | |||||||
760 | void *scudoAlignedAlloc(uptr Alignment, uptr Size) { | ||||||
761 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(Alignment , Size)), 0)) { | ||||||
762 | errno(*__errno_location ()) = EINVAL22; | ||||||
763 | if (Instance.canReturnNull()) | ||||||
764 | return nullptr; | ||||||
765 | reportInvalidAlignedAllocAlignment(Size, Alignment); | ||||||
766 | } | ||||||
767 | return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); | ||||||
768 | } | ||||||
769 | |||||||
770 | uptr scudoMallocUsableSize(void *Ptr) { | ||||||
771 | return Instance.getUsableSize(Ptr); | ||||||
772 | } | ||||||
773 | |||||||
774 | } // namespace __scudo | ||||||
775 | |||||||
776 | using namespace __scudo; | ||||||
777 | |||||||
778 | // MallocExtension helper functions | ||||||
779 | |||||||
780 | uptr __sanitizer_get_current_allocated_bytes() { | ||||||
781 | return Instance.getStats(AllocatorStatAllocated); | ||||||
782 | } | ||||||
783 | |||||||
784 | uptr __sanitizer_get_heap_size() { | ||||||
785 | return Instance.getStats(AllocatorStatMapped); | ||||||
786 | } | ||||||
787 | |||||||
788 | uptr __sanitizer_get_free_bytes() { | ||||||
789 | return 1; | ||||||
790 | } | ||||||
791 | |||||||
792 | uptr __sanitizer_get_unmapped_bytes() { | ||||||
793 | return 1; | ||||||
794 | } | ||||||
795 | |||||||
796 | uptr __sanitizer_get_estimated_allocated_size(uptr Size) { | ||||||
797 | return Size; | ||||||
798 | } | ||||||
799 | |||||||
800 | int __sanitizer_get_ownership(const void *Ptr) { | ||||||
801 | return Instance.isValidPointer(Ptr); | ||||||
802 | } | ||||||
803 | |||||||
804 | uptr __sanitizer_get_allocated_size(const void *Ptr) { | ||||||
805 | return Instance.getUsableSize(Ptr); | ||||||
806 | } | ||||||
807 | |||||||
808 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | ||||||
809 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size) | ||||||
810 | void *Ptr, uptr Size)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size) { | ||||||
811 | (void)Ptr; | ||||||
812 | (void)Size; | ||||||
813 | } | ||||||
814 | |||||||
815 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_free_hook(void *Ptr) { | ||||||
816 | (void)Ptr; | ||||||
817 | } | ||||||
818 | #endif | ||||||
819 | |||||||
820 | // Interface functions | ||||||
821 | |||||||
822 | void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) { | ||||||
823 | if (!SCUDO_CAN_USE_PUBLIC_INTERFACE1) | ||||||
824 | return; | ||||||
825 | Instance.setRssLimit(LimitMb, !!HardLimit); | ||||||
826 | } | ||||||
827 | |||||||
828 | void __scudo_print_stats() { | ||||||
829 | Instance.printStats(); | ||||||
830 | } |
1 | //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Part of the Sanitizer Allocator. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #ifndef SANITIZER_ALLOCATOR_H |
13 | #error This file must be included inside sanitizer_allocator.h |
14 | #endif |
15 | |
16 | template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache; |
17 | |
18 | // SizeClassAllocator64 -- allocator for 64-bit address space. |
19 | // The template parameter Params is a class containing the actual parameters. |
20 | // |
21 | // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg. |
22 | // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap. |
23 | // Otherwise SpaceBeg=kSpaceBeg (fixed address). |
24 | // kSpaceSize is a power of two. |
25 | // At the beginning the entire space is mprotect-ed, then small parts of it |
26 | // are mapped on demand. |
27 | // |
28 | // Region: a part of Space dedicated to a single size class. |
29 | // There are kNumClasses Regions of equal size. |
30 | // |
31 | // UserChunk: a piece of memory returned to user. |
32 | // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. |
33 | |
34 | // FreeArray is an array free-d chunks (stored as 4-byte offsets) |
35 | // |
36 | // A Region looks like this: |
37 | // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray |
38 | |
39 | struct SizeClassAllocator64FlagMasks { // Bit masks. |
40 | enum { |
41 | kRandomShuffleChunks = 1, |
42 | }; |
43 | }; |
44 | |
45 | template <typename Allocator> |
46 | class MemoryMapper { |
47 | public: |
48 | typedef typename Allocator::CompactPtrT CompactPtrT; |
49 | |
50 | explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {} |
51 | |
52 | bool GetAndResetStats(uptr &ranges, uptr &bytes) { |
53 | ranges = released_ranges_count_; |
54 | released_ranges_count_ = 0; |
55 | bytes = released_bytes_; |
56 | released_bytes_ = 0; |
57 | return ranges != 0; |
58 | } |
59 | |
60 | u64 *MapPackedCounterArrayBuffer(uptr count) { |
61 | buffer_.clear(); |
62 | buffer_.resize(count); |
63 | return buffer_.data(); |
64 | } |
65 | |
66 | // Releases [from, to) range of pages back to OS. |
67 | void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) { |
68 | const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id); |
69 | const uptr from_page = allocator_.CompactPtrToPointer(region_base, from); |
70 | const uptr to_page = allocator_.CompactPtrToPointer(region_base, to); |
71 | ReleaseMemoryPagesToOS(from_page, to_page); |
72 | released_ranges_count_++; |
73 | released_bytes_ += to_page - from_page; |
74 | } |
75 | |
76 | private: |
77 | const Allocator &allocator_; |
78 | uptr released_ranges_count_ = 0; |
79 | uptr released_bytes_ = 0; |
80 | InternalMmapVector<u64> buffer_; |
81 | }; |
82 | |
83 | template <class Params> |
84 | class SizeClassAllocator64 { |
85 | public: |
86 | using AddressSpaceView = typename Params::AddressSpaceView; |
87 | static const uptr kSpaceBeg = Params::kSpaceBeg; |
88 | static const uptr kSpaceSize = Params::kSpaceSize; |
89 | static const uptr kMetadataSize = Params::kMetadataSize; |
90 | typedef typename Params::SizeClassMap SizeClassMap; |
91 | typedef typename Params::MapUnmapCallback MapUnmapCallback; |
92 | |
93 | static const bool kRandomShuffleChunks = |
94 | Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks; |
95 | |
96 | typedef SizeClassAllocator64<Params> ThisT; |
97 | typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache; |
98 | typedef MemoryMapper<ThisT> MemoryMapperT; |
99 | |
100 | // When we know the size class (the region base) we can represent a pointer |
101 | // as a 4-byte integer (offset from the region start shifted right by 4). |
102 | typedef u32 CompactPtrT; |
103 | static const uptr kCompactPtrScale = 4; |
104 | CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const { |
105 | return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale); |
106 | } |
107 | uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const { |
108 | return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); |
109 | } |
110 | |
111 | // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W |
112 | // at heap_start and places the heap there. This mode requires kSpaceBeg == |
113 | // ~(uptr)0. |
114 | void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { |
115 | uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); |
116 | PremappedHeap = heap_start != 0; |
117 | if (PremappedHeap) { |
118 | CHECK(!kUsingConstantSpaceBeg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!kUsingConstantSpaceBeg )); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 118, "(" "(!kUsingConstantSpaceBeg)" ") " "!=" " (" "0" ")" , v1, v2); } while (false); |
119 | NonConstSpaceBeg = heap_start; |
120 | uptr RegionInfoSize = AdditionalSize(); |
121 | RegionInfoSpace = |
122 | address_range.Init(RegionInfoSize, PrimaryAllocatorName); |
123 | CHECK_NE(RegionInfoSpace, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace )); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 123, "(" "(RegionInfoSpace)" ") " "!=" " (" "(~(uptr)0)" ")" , v1, v2); } while (false); |
124 | CHECK_EQ(RegionInfoSpace,do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace )); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie (RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info" ))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 126, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))" ")", v1, v2); } while (false) |
125 | address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace )); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie (RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info" ))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 126, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))" ")", v1, v2); } while (false) |
126 | "SizeClassAllocator: region info"))do { __sanitizer::u64 v1 = (__sanitizer::u64)((RegionInfoSpace )); __sanitizer::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie (RegionInfoSpace, RegionInfoSize, "SizeClassAllocator: region info" ))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 126, "(" "(RegionInfoSpace)" ") " "==" " (" "(address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, \"SizeClassAllocator: region info\"))" ")", v1, v2); } while (false); |
127 | MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize); |
128 | } else { |
129 | if (kUsingConstantSpaceBeg) { |
130 | CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(kSpaceBeg , SizeClassMap::kMaxSize))); __sanitizer::u64 v2 = (__sanitizer ::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 130, "(" "(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize))" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
131 | CHECK_EQ(kSpaceBeg,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize , PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!! (!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 133, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))" ")", v1, v2); } while (false) |
132 | address_range.Init(TotalSpaceSize, PrimaryAllocatorName,do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize , PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!! (!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 133, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))" ")", v1, v2); } while (false) |
133 | kSpaceBeg))do { __sanitizer::u64 v1 = (__sanitizer::u64)((kSpaceBeg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.Init(TotalSpaceSize , PrimaryAllocatorName, kSpaceBeg))); if (__builtin_expect(!! (!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 133, "(" "(kSpaceBeg)" ") " "==" " (" "(address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg))" ")", v1, v2); } while (false); |
134 | } else { |
135 | // Combined allocator expects that an 2^N allocation is always aligned |
136 | // to 2^N. For this to work, the start of the space needs to be aligned |
137 | // as high as the largest size class (which also needs to be a power of |
138 | // 2). |
139 | NonConstSpaceBeg = address_range.InitAligned( |
140 | TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); |
141 | CHECK_NE(NonConstSpaceBeg, ~(uptr)0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((NonConstSpaceBeg )); __sanitizer::u64 v2 = (__sanitizer::u64)((~(uptr)0)); if ( __builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 141, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")" , v1, v2); } while (false); |
142 | } |
143 | RegionInfoSpace = SpaceEnd(); |
144 | MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(), |
145 | "SizeClassAllocator: region info"); |
146 | } |
147 | SetReleaseToOSIntervalMs(release_to_os_interval_ms); |
148 | // Check that the RegionInfo array is aligned on the CacheLine size. |
149 | DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0); |
150 | } |
151 | |
152 | s32 ReleaseToOSIntervalMs() const { |
153 | return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed); |
154 | } |
155 | |
156 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { |
157 | atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms, |
158 | memory_order_relaxed); |
159 | } |
160 | |
161 | void ForceReleaseToOS() { |
162 | MemoryMapperT memory_mapper(*this); |
163 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
164 | Lock l(&GetRegionInfo(class_id)->mutex); |
165 | MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/); |
166 | } |
167 | } |
168 | |
169 | static bool CanAllocate(uptr size, uptr alignment) { |
170 | return size <= SizeClassMap::kMaxSize && |
171 | alignment <= SizeClassMap::kMaxSize; |
172 | } |
173 | |
174 | NOINLINE__attribute__((noinline)) void ReturnToAllocator(MemoryMapperT *memory_mapper, |
175 | AllocatorStats *stat, uptr class_id, |
176 | const CompactPtrT *chunks, uptr n_chunks) { |
177 | RegionInfo *region = GetRegionInfo(class_id); |
178 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
179 | CompactPtrT *free_array = GetFreeArray(region_beg); |
180 | |
181 | Lock l(®ion->mutex); |
182 | uptr old_num_chunks = region->num_freed_chunks; |
183 | uptr new_num_freed_chunks = old_num_chunks + n_chunks; |
184 | // Failure to allocate free array space while releasing memory is non |
185 | // recoverable. |
186 | if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks)), 0) |
187 | new_num_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks)), 0)) { |
188 | Report("FATAL: Internal error: %s's allocator exhausted the free list " |
189 | "space for size class %zd (%zd bytes).\n", SanitizerToolName, |
190 | class_id, ClassIdToSize(class_id)); |
191 | Die(); |
192 | } |
193 | for (uptr i = 0; i < n_chunks; i++) |
194 | free_array[old_num_chunks + i] = chunks[i]; |
195 | region->num_freed_chunks = new_num_freed_chunks; |
196 | region->stats.n_freed += n_chunks; |
197 | |
198 | MaybeReleaseToOS(memory_mapper, class_id, false /*force*/); |
199 | } |
200 | |
201 | NOINLINE__attribute__((noinline)) bool GetFromAllocator(AllocatorStats *stat, uptr class_id, |
202 | CompactPtrT *chunks, uptr n_chunks) { |
203 | RegionInfo *region = GetRegionInfo(class_id); |
204 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
205 | CompactPtrT *free_array = GetFreeArray(region_beg); |
206 | |
207 | Lock l(®ion->mutex); |
208 | #if SANITIZER_WINDOWS0 |
209 | /* On Windows unmapping of memory during __sanitizer_purge_allocator is |
210 | explicit and immediate, so unmapped regions must be explicitly mapped back |
211 | in when they are accessed again. */ |
212 | if (region->rtoi.last_released_bytes > 0) { |
213 | MmapFixedOrDie(region_beg, region->mapped_user, |
214 | "SizeClassAllocator: region data"); |
215 | region->rtoi.n_freed_at_last_release = 0; |
216 | region->rtoi.last_released_bytes = 0; |
217 | } |
218 | #endif |
219 | if (UNLIKELY(region->num_freed_chunks < n_chunks)__builtin_expect(!!(region->num_freed_chunks < n_chunks ), 0)) { |
220 | if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region , n_chunks - region->num_freed_chunks)), 0) |
221 | n_chunks - region->num_freed_chunks))__builtin_expect(!!(!PopulateFreeArray(stat, class_id, region , n_chunks - region->num_freed_chunks)), 0)) |
222 | return false; |
223 | CHECK_GE(region->num_freed_chunks, n_chunks)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->num_freed_chunks )); __sanitizer::u64 v2 = (__sanitizer::u64)((n_chunks)); if ( __builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 223, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)" ")", v1, v2); } while (false); |
224 | } |
225 | region->num_freed_chunks -= n_chunks; |
226 | uptr base_idx = region->num_freed_chunks; |
227 | for (uptr i = 0; i < n_chunks; i++) |
228 | chunks[i] = free_array[base_idx + i]; |
229 | region->stats.n_allocated += n_chunks; |
230 | return true; |
231 | } |
232 | |
233 | bool PointerIsMine(const void *p) const { |
234 | uptr P = reinterpret_cast<uptr>(p); |
235 | if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) |
236 | return P / kSpaceSize == kSpaceBeg / kSpaceSize; |
237 | return P >= SpaceBeg() && P < SpaceEnd(); |
238 | } |
239 | |
240 | uptr GetRegionBegin(const void *p) { |
241 | if (kUsingConstantSpaceBeg) |
242 | return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1); |
243 | uptr space_beg = SpaceBeg(); |
244 | return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) + |
245 | space_beg; |
246 | } |
247 | |
248 | uptr GetRegionBeginBySizeClass(uptr class_id) const { |
249 | return SpaceBeg() + kRegionSize * class_id; |
250 | } |
251 | |
252 | uptr GetSizeClass(const void *p) { |
253 | if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) |
254 | return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded; |
255 | return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) % |
256 | kNumClassesRounded; |
257 | } |
258 | |
259 | void *GetBlockBegin(const void *p) { |
260 | uptr class_id = GetSizeClass(p); |
261 | if (class_id >= kNumClasses) return nullptr; |
262 | uptr size = ClassIdToSize(class_id); |
263 | if (!size) return nullptr; |
264 | uptr chunk_idx = GetChunkIdx((uptr)p, size); |
265 | uptr reg_beg = GetRegionBegin(p); |
266 | uptr beg = chunk_idx * size; |
267 | uptr next_beg = beg + size; |
268 | const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id)); |
269 | if (region->mapped_user >= next_beg) |
270 | return reinterpret_cast<void*>(reg_beg + beg); |
271 | return nullptr; |
272 | } |
273 | |
274 | uptr GetActuallyAllocatedSize(void *p) { |
275 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 275, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
276 | return ClassIdToSize(GetSizeClass(p)); |
277 | } |
278 | |
279 | static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } |
280 | |
281 | void *GetMetaData(const void *p) { |
282 | CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize) ); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 282, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
283 | uptr class_id = GetSizeClass(p); |
284 | uptr size = ClassIdToSize(class_id); |
285 | if (!size) |
286 | return nullptr; |
287 | uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); |
288 | uptr region_beg = GetRegionBeginBySizeClass(class_id); |
289 | return reinterpret_cast<void *>(GetMetadataEnd(region_beg) - |
290 | (1 + chunk_idx) * kMetadataSize); |
291 | } |
292 | |
293 | uptr TotalMemoryUsed() { |
294 | uptr res = 0; |
295 | for (uptr i = 0; i < kNumClasses; i++) |
296 | res += GetRegionInfo(i)->allocated_user; |
297 | return res; |
298 | } |
299 | |
300 | // Test-only. |
301 | void TestOnlyUnmap() { |
302 | UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size()); |
303 | } |
304 | |
305 | static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, |
306 | uptr stats_size) { |
307 | for (uptr class_id = 0; class_id < stats_size; class_id++) |
308 | if (stats[class_id] == start) |
309 | stats[class_id] = rss; |
310 | } |
311 | |
312 | void PrintStats(uptr class_id, uptr rss) { |
313 | RegionInfo *region = GetRegionInfo(class_id); |
314 | if (region->mapped_user == 0) return; |
315 | uptr in_use = region->stats.n_allocated - region->stats.n_freed; |
316 | uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); |
317 | Printf( |
318 | "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " |
319 | "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd " |
320 | "last released: %6lldK region: 0x%zx\n", |
321 | region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), |
322 | region->mapped_user >> 10, region->stats.n_allocated, |
323 | region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, |
324 | rss >> 10, region->rtoi.num_releases, |
325 | region->rtoi.last_released_bytes >> 10, |
326 | SpaceBeg() + kRegionSize * class_id); |
327 | } |
328 | |
329 | void PrintStats() { |
330 | uptr rss_stats[kNumClasses]; |
331 | for (uptr class_id = 0; class_id < kNumClasses; class_id++) |
332 | rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; |
333 | GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses); |
334 | |
335 | uptr total_mapped = 0; |
336 | uptr total_rss = 0; |
337 | uptr n_allocated = 0; |
338 | uptr n_freed = 0; |
339 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
340 | RegionInfo *region = GetRegionInfo(class_id); |
341 | if (region->mapped_user != 0) { |
342 | total_mapped += region->mapped_user; |
343 | total_rss += rss_stats[class_id]; |
344 | } |
345 | n_allocated += region->stats.n_allocated; |
346 | n_freed += region->stats.n_freed; |
347 | } |
348 | |
349 | Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in " |
350 | "%zd allocations; remains %zd\n", total_mapped >> 20, |
351 | total_rss >> 20, n_allocated, n_allocated - n_freed); |
352 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) |
353 | PrintStats(class_id, rss_stats[class_id]); |
354 | } |
355 | |
356 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone |
357 | // introspection API. |
358 | void ForceLock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { |
359 | for (uptr i = 0; i < kNumClasses; i++) { |
360 | GetRegionInfo(i)->mutex.Lock(); |
361 | } |
362 | } |
363 | |
364 | void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { |
365 | for (int i = (int)kNumClasses - 1; i >= 0; i--) { |
366 | GetRegionInfo(i)->mutex.Unlock(); |
367 | } |
368 | } |
369 | |
370 | // Iterate over all existing chunks. |
371 | // The allocator must be locked when calling this function. |
372 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
373 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { |
374 | RegionInfo *region = GetRegionInfo(class_id); |
375 | uptr chunk_size = ClassIdToSize(class_id); |
376 | uptr region_beg = SpaceBeg() + class_id * kRegionSize; |
377 | uptr region_allocated_user_size = |
378 | AddressSpaceView::Load(region)->allocated_user; |
379 | for (uptr chunk = region_beg; |
380 | chunk < region_beg + region_allocated_user_size; |
381 | chunk += chunk_size) { |
382 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); |
383 | callback(chunk, arg); |
384 | } |
385 | } |
386 | } |
387 | |
388 | static uptr ClassIdToSize(uptr class_id) { |
389 | return SizeClassMap::Size(class_id); |
390 | } |
391 | |
392 | static uptr AdditionalSize() { |
393 | return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, |
394 | GetPageSizeCached()); |
395 | } |
396 | |
397 | typedef SizeClassMap SizeClassMapT; |
398 | static const uptr kNumClasses = SizeClassMap::kNumClasses; |
399 | static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; |
400 | |
401 | // A packed array of counters. Each counter occupies 2^n bits, enough to store |
402 | // counter's max_value. Ctor will try to allocate the required buffer via |
403 | // mapper->MapPackedCounterArrayBuffer and the caller is expected to check |
404 | // whether the initialization was successful by checking IsAllocated() result. |
405 | // For the performance sake, none of the accessors check the validity of the |
406 | // arguments, it is assumed that index is always in [0, n) range and the value |
407 | // is not incremented past max_value. |
408 | class PackedCounterArray { |
409 | public: |
410 | template <typename MemoryMapper> |
411 | PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper) |
412 | : n(num_counters) { |
413 | CHECK_GT(num_counters, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((num_counters)) ; __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 413, "(" "(num_counters)" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
414 | CHECK_GT(max_value, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((max_value)); __sanitizer ::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect(!!(! (v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 414, "(" "(max_value)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); |
415 | constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL; |
416 | // Rounding counter storage size up to the power of two allows for using |
417 | // bit shifts calculating particular counter's index and offset. |
418 | uptr counter_size_bits = |
419 | RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1); |
420 | CHECK_LE(counter_size_bits, kMaxCounterBits)do { __sanitizer::u64 v1 = (__sanitizer::u64)((counter_size_bits )); __sanitizer::u64 v2 = (__sanitizer::u64)((kMaxCounterBits )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 420, "(" "(counter_size_bits)" ") " "<=" " (" "(kMaxCounterBits)" ")", v1, v2); } while (false); |
421 | counter_size_bits_log = Log2(counter_size_bits); |
422 | counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits); |
423 | |
424 | uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log; |
425 | CHECK_GT(packing_ratio, 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((packing_ratio) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 425, "(" "(packing_ratio)" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
426 | packing_ratio_log = Log2(packing_ratio); |
427 | bit_offset_mask = packing_ratio - 1; |
428 | |
429 | buffer = mapper->MapPackedCounterArrayBuffer( |
430 | RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log); |
431 | } |
432 | |
433 | bool IsAllocated() const { |
434 | return !!buffer; |
435 | } |
436 | |
437 | u64 GetCount() const { |
438 | return n; |
439 | } |
440 | |
441 | uptr Get(uptr i) const { |
442 | DCHECK_LT(i, n); |
443 | uptr index = i >> packing_ratio_log; |
444 | uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; |
445 | return (buffer[index] >> bit_offset) & counter_mask; |
446 | } |
447 | |
448 | void Inc(uptr i) const { |
449 | DCHECK_LT(Get(i), counter_mask); |
450 | uptr index = i >> packing_ratio_log; |
451 | uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; |
452 | buffer[index] += 1ULL << bit_offset; |
453 | } |
454 | |
455 | void IncRange(uptr from, uptr to) const { |
456 | DCHECK_LE(from, to); |
457 | for (uptr i = from; i <= to; i++) |
458 | Inc(i); |
459 | } |
460 | |
461 | private: |
462 | const u64 n; |
463 | u64 counter_size_bits_log; |
464 | u64 counter_mask; |
465 | u64 packing_ratio_log; |
466 | u64 bit_offset_mask; |
467 | u64* buffer; |
468 | }; |
469 | |
470 | template <class MemoryMapperT> |
471 | class FreePagesRangeTracker { |
472 | public: |
473 | FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id) |
474 | : memory_mapper(mapper), |
475 | class_id(class_id), |
476 | page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {} |
477 | |
478 | void NextPage(bool freed) { |
479 | if (freed) { |
480 | if (!in_the_range) { |
481 | current_range_start_page = current_page; |
482 | in_the_range = true; |
483 | } |
484 | } else { |
485 | CloseOpenedRange(); |
486 | } |
487 | current_page++; |
488 | } |
489 | |
490 | void Done() { |
491 | CloseOpenedRange(); |
492 | } |
493 | |
494 | private: |
495 | void CloseOpenedRange() { |
496 | if (in_the_range) { |
497 | memory_mapper->ReleasePageRangeToOS( |
498 | class_id, current_range_start_page << page_size_scaled_log, |
499 | current_page << page_size_scaled_log); |
500 | in_the_range = false; |
501 | } |
502 | } |
503 | |
504 | MemoryMapperT *const memory_mapper = nullptr; |
505 | const uptr class_id = 0; |
506 | const uptr page_size_scaled_log = 0; |
507 | bool in_the_range = false; |
508 | uptr current_page = 0; |
509 | uptr current_range_start_page = 0; |
510 | }; |
511 | |
512 | // Iterates over the free_array to identify memory pages containing freed |
513 | // chunks only and returns these pages back to OS. |
514 | // allocated_pages_count is the total number of pages allocated for the |
515 | // current bucket. |
516 | template <typename MemoryMapper> |
517 | static void ReleaseFreeMemoryToOS(CompactPtrT *free_array, |
518 | uptr free_array_count, uptr chunk_size, |
519 | uptr allocated_pages_count, |
520 | MemoryMapper *memory_mapper, |
521 | uptr class_id) { |
522 | const uptr page_size = GetPageSizeCached(); |
523 | |
524 | // Figure out the number of chunks per page and whether we can take a fast |
525 | // path (the number of chunks per page is the same for all pages). |
526 | uptr full_pages_chunk_count_max; |
527 | bool same_chunk_count_per_page; |
528 | if (chunk_size <= page_size && page_size % chunk_size == 0) { |
529 | // Same number of chunks per page, no cross overs. |
530 | full_pages_chunk_count_max = page_size / chunk_size; |
531 | same_chunk_count_per_page = true; |
532 | } else if (chunk_size <= page_size && page_size % chunk_size != 0 && |
533 | chunk_size % (page_size % chunk_size) == 0) { |
534 | // Some chunks are crossing page boundaries, which means that the page |
535 | // contains one or two partial chunks, but all pages contain the same |
536 | // number of chunks. |
537 | full_pages_chunk_count_max = page_size / chunk_size + 1; |
538 | same_chunk_count_per_page = true; |
539 | } else if (chunk_size <= page_size) { |
540 | // Some chunks are crossing page boundaries, which means that the page |
541 | // contains one or two partial chunks. |
542 | full_pages_chunk_count_max = page_size / chunk_size + 2; |
543 | same_chunk_count_per_page = false; |
544 | } else if (chunk_size > page_size && chunk_size % page_size == 0) { |
545 | // One chunk covers multiple pages, no cross overs. |
546 | full_pages_chunk_count_max = 1; |
547 | same_chunk_count_per_page = true; |
548 | } else if (chunk_size > page_size) { |
549 | // One chunk covers multiple pages, Some chunks are crossing page |
550 | // boundaries. Some pages contain one chunk, some contain two. |
551 | full_pages_chunk_count_max = 2; |
552 | same_chunk_count_per_page = false; |
553 | } else { |
554 | UNREACHABLE("All chunk_size/page_size ratios must be handled.")do { do { __sanitizer::u64 v1 = (__sanitizer::u64)((0 && "All chunk_size/page_size ratios must be handled.")); __sanitizer ::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 554, "(" "(0 && \"All chunk_size/page_size ratios must be handled.\")" ") " "!=" " (" "0" ")", v1, v2); } while (false); Die(); } while (0); |
555 | } |
556 | |
557 | PackedCounterArray counters(allocated_pages_count, |
558 | full_pages_chunk_count_max, memory_mapper); |
559 | if (!counters.IsAllocated()) |
560 | return; |
561 | |
562 | const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale; |
563 | const uptr page_size_scaled = page_size >> kCompactPtrScale; |
564 | const uptr page_size_scaled_log = Log2(page_size_scaled); |
565 | |
566 | // Iterate over free chunks and count how many free chunks affect each |
567 | // allocated page. |
568 | if (chunk_size <= page_size && page_size % chunk_size == 0) { |
569 | // Each chunk affects one page only. |
570 | for (uptr i = 0; i < free_array_count; i++) |
571 | counters.Inc(free_array[i] >> page_size_scaled_log); |
572 | } else { |
573 | // In all other cases chunks might affect more than one page. |
574 | for (uptr i = 0; i < free_array_count; i++) { |
575 | counters.IncRange( |
576 | free_array[i] >> page_size_scaled_log, |
577 | (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log); |
578 | } |
579 | } |
580 | |
581 | // Iterate over pages detecting ranges of pages with chunk counters equal |
582 | // to the expected number of chunks for the particular page. |
583 | FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id); |
584 | if (same_chunk_count_per_page) { |
585 | // Fast path, every page has the same number of chunks affecting it. |
586 | for (uptr i = 0; i < counters.GetCount(); i++) |
587 | range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max); |
588 | } else { |
589 | // Show path, go through the pages keeping count how many chunks affect |
590 | // each page. |
591 | const uptr pn = |
592 | chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1; |
593 | const uptr pnc = pn * chunk_size_scaled; |
594 | // The idea is to increment the current page pointer by the first chunk |
595 | // size, middle portion size (the portion of the page covered by chunks |
596 | // except the first and the last one) and then the last chunk size, adding |
597 | // up the number of chunks on the current page and checking on every step |
598 | // whether the page boundary was crossed. |
599 | uptr prev_page_boundary = 0; |
600 | uptr current_boundary = 0; |
601 | for (uptr i = 0; i < counters.GetCount(); i++) { |
602 | uptr page_boundary = prev_page_boundary + page_size_scaled; |
603 | uptr chunks_per_page = pn; |
604 | if (current_boundary < page_boundary) { |
605 | if (current_boundary > prev_page_boundary) |
606 | chunks_per_page++; |
607 | current_boundary += pnc; |
608 | if (current_boundary < page_boundary) { |
609 | chunks_per_page++; |
610 | current_boundary += chunk_size_scaled; |
611 | } |
612 | } |
613 | prev_page_boundary = page_boundary; |
614 | |
615 | range_tracker.NextPage(counters.Get(i) == chunks_per_page); |
616 | } |
617 | } |
618 | range_tracker.Done(); |
619 | } |
620 | |
621 | private: |
622 | friend class MemoryMapper<ThisT>; |
623 | |
624 | ReservedAddressRange address_range; |
625 | |
626 | static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; |
627 | // FreeArray is the array of free-d chunks (stored as 4-byte offsets). |
628 | // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize |
629 | // elements, but in reality this will not happen. For simplicity we |
630 | // dedicate 1/8 of the region's virtual space to FreeArray. |
631 | static const uptr kFreeArraySize = kRegionSize / 8; |
632 | |
633 | static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; |
634 | uptr NonConstSpaceBeg; |
635 | uptr SpaceBeg() const { |
636 | return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; |
637 | } |
638 | uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } |
639 | // kRegionSize must be >= 2^32. |
640 | COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)))static_assert((kRegionSize) >= (1ULL << (64 / 2)), "" ); |
641 | // kRegionSize must be <= 2^36, see CompactPtrT. |
642 | COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)))static_assert((kRegionSize) <= (1ULL << (64 / 2 + 4) ), ""); |
643 | // Call mmap for user memory with at least this size. |
644 | static const uptr kUserMapSize = 1 << 16; |
645 | // Call mmap for metadata memory with at least this size. |
646 | static const uptr kMetaMapSize = 1 << 16; |
647 | // Call mmap for free array memory with at least this size. |
648 | static const uptr kFreeArrayMapSize = 1 << 16; |
649 | |
650 | atomic_sint32_t release_to_os_interval_ms_; |
651 | |
652 | uptr RegionInfoSpace; |
653 | |
654 | // True if the user has already mapped the entire heap R/W. |
655 | bool PremappedHeap; |
656 | |
657 | struct Stats { |
658 | uptr n_allocated; |
659 | uptr n_freed; |
660 | }; |
661 | |
662 | struct ReleaseToOsInfo { |
663 | uptr n_freed_at_last_release; |
664 | uptr num_releases; |
665 | u64 last_release_at_ns; |
666 | u64 last_released_bytes; |
667 | }; |
668 | |
669 | struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) RegionInfo { |
670 | Mutex mutex; |
671 | uptr num_freed_chunks; // Number of elements in the freearray. |
672 | uptr mapped_free_array; // Bytes mapped for freearray. |
673 | uptr allocated_user; // Bytes allocated for user memory. |
674 | uptr allocated_meta; // Bytes allocated for metadata. |
675 | uptr mapped_user; // Bytes mapped for user memory. |
676 | uptr mapped_meta; // Bytes mapped for metadata. |
677 | u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. |
678 | bool exhausted; // Whether region is out of space for new chunks. |
679 | Stats stats; |
680 | ReleaseToOsInfo rtoi; |
681 | }; |
682 | COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0)static_assert(sizeof(RegionInfo) % kCacheLineSize == 0, ""); |
683 | |
684 | RegionInfo *GetRegionInfo(uptr class_id) const { |
685 | DCHECK_LT(class_id, kNumClasses); |
686 | RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace); |
687 | return ®ions[class_id]; |
688 | } |
689 | |
690 | uptr GetMetadataEnd(uptr region_beg) const { |
691 | return region_beg + kRegionSize - kFreeArraySize; |
692 | } |
693 | |
694 | uptr GetChunkIdx(uptr chunk, uptr size) const { |
695 | if (!kUsingConstantSpaceBeg) |
696 | chunk -= SpaceBeg(); |
697 | |
698 | uptr offset = chunk % kRegionSize; |
699 | // Here we divide by a non-constant. This is costly. |
700 | // size always fits into 32-bits. If the offset fits too, use 32-bit div. |
701 | if (offset >> (SANITIZER_WORDSIZE64 / 2)) |
702 | return offset / size; |
703 | return (u32)offset / (u32)size; |
704 | } |
705 | |
706 | CompactPtrT *GetFreeArray(uptr region_beg) const { |
707 | return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg)); |
708 | } |
709 | |
710 | bool MapWithCallback(uptr beg, uptr size, const char *name) { |
711 | if (PremappedHeap) |
712 | return beg >= NonConstSpaceBeg && |
713 | beg + size <= NonConstSpaceBeg + kSpaceSize; |
714 | uptr mapped = address_range.Map(beg, size, name); |
715 | if (UNLIKELY(!mapped)__builtin_expect(!!(!mapped), 0)) |
716 | return false; |
717 | CHECK_EQ(beg, mapped)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((mapped)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 717, "(" "(beg)" ") " "==" " (" "(mapped)" ")", v1, v2); } while (false); |
718 | MapUnmapCallback().OnMap(beg, size); |
719 | return true; |
720 | } |
721 | |
722 | void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { |
723 | if (PremappedHeap) { |
724 | CHECK_GE(beg, NonConstSpaceBeg)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((NonConstSpaceBeg)); if (__builtin_expect (!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 724, "(" "(beg)" ") " ">=" " (" "(NonConstSpaceBeg)" ")" , v1, v2); } while (false); |
725 | CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg + size)); __sanitizer ::u64 v2 = (__sanitizer::u64)((NonConstSpaceBeg + kSpaceSize) ); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 725, "(" "(beg + size)" ") " "<=" " (" "(NonConstSpaceBeg + kSpaceSize)" ")", v1, v2); } while (false); |
726 | return; |
727 | } |
728 | CHECK_EQ(beg, address_range.MapOrDie(beg, size, name))do { __sanitizer::u64 v1 = (__sanitizer::u64)((beg)); __sanitizer ::u64 v2 = (__sanitizer::u64)((address_range.MapOrDie(beg, size , name))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer ::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 728, "(" "(beg)" ") " "==" " (" "(address_range.MapOrDie(beg, size, name))" ")", v1, v2); } while (false); |
729 | MapUnmapCallback().OnMap(beg, size); |
730 | } |
731 | |
732 | void UnmapWithCallbackOrDie(uptr beg, uptr size) { |
733 | if (PremappedHeap) |
734 | return; |
735 | MapUnmapCallback().OnUnmap(beg, size); |
736 | address_range.Unmap(beg, size); |
737 | } |
738 | |
739 | bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, |
740 | uptr num_freed_chunks) { |
741 | uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); |
742 | if (region->mapped_free_array < needed_space) { |
743 | uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize); |
744 | CHECK_LE(new_mapped_free_array, kFreeArraySize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_mapped_free_array )); __sanitizer::u64 v2 = (__sanitizer::u64)((kFreeArraySize) ); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 744, "(" "(new_mapped_free_array)" ") " "<=" " (" "(kFreeArraySize)" ")", v1, v2); } while (false); |
745 | uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + |
746 | region->mapped_free_array; |
747 | uptr new_map_size = new_mapped_free_array - region->mapped_free_array; |
748 | if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size , "SizeClassAllocator: freearray")), 0) |
749 | "SizeClassAllocator: freearray"))__builtin_expect(!!(!MapWithCallback(current_map_end, new_map_size , "SizeClassAllocator: freearray")), 0)) |
750 | return false; |
751 | region->mapped_free_array = new_mapped_free_array; |
752 | } |
753 | return true; |
754 | } |
755 | |
756 | // Check whether this size class is exhausted. |
757 | bool IsRegionExhausted(RegionInfo *region, uptr class_id, |
758 | uptr additional_map_size) { |
759 | if (LIKELY(region->mapped_user + region->mapped_meta +__builtin_expect(!!(region->mapped_user + region->mapped_meta + additional_map_size <= kRegionSize - kFreeArraySize), 1 ) |
760 | additional_map_size <= kRegionSize - kFreeArraySize)__builtin_expect(!!(region->mapped_user + region->mapped_meta + additional_map_size <= kRegionSize - kFreeArraySize), 1 )) |
761 | return false; |
762 | if (!region->exhausted) { |
763 | region->exhausted = true; |
764 | Printf("%s: Out of memory. ", SanitizerToolName); |
765 | Printf("The process has exhausted %zuMB for size class %zu.\n", |
766 | kRegionSize >> 20, ClassIdToSize(class_id)); |
767 | } |
768 | return true; |
769 | } |
770 | |
771 | NOINLINE__attribute__((noinline)) bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, |
772 | RegionInfo *region, uptr requested_count) { |
773 | // region->mutex is held. |
774 | const uptr region_beg = GetRegionBeginBySizeClass(class_id); |
775 | const uptr size = ClassIdToSize(class_id); |
776 | |
777 | const uptr total_user_bytes = |
778 | region->allocated_user + requested_count * size; |
779 | // Map more space for chunks, if necessary. |
780 | if (LIKELY(total_user_bytes > region->mapped_user)__builtin_expect(!!(total_user_bytes > region->mapped_user ), 1)) { |
781 | if (UNLIKELY(region->mapped_user == 0)__builtin_expect(!!(region->mapped_user == 0), 0)) { |
782 | if (!kUsingConstantSpaceBeg && kRandomShuffleChunks) |
783 | // The random state is initialized from ASLR. |
784 | region->rand_state = static_cast<u32>(region_beg >> 12); |
785 | // Postpone the first release to OS attempt for ReleaseToOSIntervalMs, |
786 | // preventing just allocated memory from being released sooner than |
787 | // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls |
788 | // for short lived processes. |
789 | // Do it only when the feature is turned on, to avoid a potentially |
790 | // extraneous syscall. |
791 | if (ReleaseToOSIntervalMs() >= 0) |
792 | region->rtoi.last_release_at_ns = MonotonicNanoTime(); |
793 | } |
794 | // Do the mmap for the user memory. |
795 | const uptr user_map_size = |
796 | RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize); |
797 | if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, user_map_size )), 0)) |
798 | return false; |
799 | if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0) |
800 | user_map_size,__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0) |
801 | "SizeClassAllocator: region data"))__builtin_expect(!!(!MapWithCallback(region_beg + region-> mapped_user, user_map_size, "SizeClassAllocator: region data" )), 0)) |
802 | return false; |
803 | stat->Add(AllocatorStatMapped, user_map_size); |
804 | region->mapped_user += user_map_size; |
805 | } |
806 | const uptr new_chunks_count = |
807 | (region->mapped_user - region->allocated_user) / size; |
808 | |
809 | if (kMetadataSize) { |
810 | // Calculate the required space for metadata. |
811 | const uptr total_meta_bytes = |
812 | region->allocated_meta + new_chunks_count * kMetadataSize; |
813 | const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ? |
814 | RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0; |
815 | // Map more space for metadata, if necessary. |
816 | if (meta_map_size) { |
817 | if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))__builtin_expect(!!(IsRegionExhausted(region, class_id, meta_map_size )), 0)) |
818 | return false; |
819 | if (UNLIKELY(!MapWithCallback(__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0) |
820 | GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0) |
821 | meta_map_size, "SizeClassAllocator: region metadata"))__builtin_expect(!!(!MapWithCallback( GetMetadataEnd(region_beg ) - region->mapped_meta - meta_map_size, meta_map_size, "SizeClassAllocator: region metadata" )), 0)) |
822 | return false; |
823 | region->mapped_meta += meta_map_size; |
824 | } |
825 | } |
826 | |
827 | // If necessary, allocate more space for the free array and populate it with |
828 | // newly allocated chunks. |
829 | const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; |
830 | if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))__builtin_expect(!!(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)), 0)) |
831 | return false; |
832 | CompactPtrT *free_array = GetFreeArray(region_beg); |
833 | for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count; |
834 | i++, chunk += size) |
835 | free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); |
836 | if (kRandomShuffleChunks) |
837 | RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, |
838 | ®ion->rand_state); |
839 | |
840 | // All necessary memory is mapped and now it is safe to advance all |
841 | // 'allocated_*' counters. |
842 | region->num_freed_chunks += new_chunks_count; |
843 | region->allocated_user += new_chunks_count * size; |
844 | CHECK_LE(region->allocated_user, region->mapped_user)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_user )); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_user )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 844, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)" ")", v1, v2); } while (false); |
845 | region->allocated_meta += new_chunks_count * kMetadataSize; |
846 | CHECK_LE(region->allocated_meta, region->mapped_meta)do { __sanitizer::u64 v1 = (__sanitizer::u64)((region->allocated_meta )); __sanitizer::u64 v2 = (__sanitizer::u64)((region->mapped_meta )); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer:: CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary64.h" , 846, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)" ")", v1, v2); } while (false); |
847 | region->exhausted = false; |
848 | |
849 | // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent |
850 | // MaybeReleaseToOS from releasing just allocated pages or protect these |
851 | // not yet used chunks some other way. |
852 | |
853 | return true; |
854 | } |
855 | |
856 | // Attempts to release RAM occupied by freed chunks back to OS. The region is |
857 | // expected to be locked. |
858 | // |
859 | // TODO(morehouse): Support a callback on memory release so HWASan can release |
860 | // aliases as well. |
861 | void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id, |
862 | bool force) { |
863 | RegionInfo *region = GetRegionInfo(class_id); |
864 | const uptr chunk_size = ClassIdToSize(class_id); |
865 | const uptr page_size = GetPageSizeCached(); |
866 | |
867 | uptr n = region->num_freed_chunks; |
868 | if (n * chunk_size < page_size) |
869 | return; // No chance to release anything. |
870 | if ((region->stats.n_freed - |
871 | region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { |
872 | return; // Nothing new to release. |
873 | } |
874 | |
875 | if (!force) { |
876 | s32 interval_ms = ReleaseToOSIntervalMs(); |
877 | if (interval_ms < 0) |
878 | return; |
879 | |
880 | if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > |
881 | MonotonicNanoTime()) { |
882 | return; // Memory was returned recently. |
883 | } |
884 | } |
885 | |
886 | ReleaseFreeMemoryToOS( |
887 | GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size, |
888 | RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper, |
889 | class_id); |
890 | |
891 | uptr ranges, bytes; |
892 | if (memory_mapper->GetAndResetStats(ranges, bytes)) { |
893 | region->rtoi.n_freed_at_last_release = region->stats.n_freed; |
894 | region->rtoi.num_releases += ranges; |
895 | region->rtoi.last_released_bytes = bytes; |
896 | } |
897 | region->rtoi.last_release_at_ns = MonotonicNanoTime(); |
898 | } |
899 | }; |
1 | //===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // Part of the Sanitizer Allocator. | |||
10 | // | |||
11 | //===----------------------------------------------------------------------===// | |||
12 | #ifndef SANITIZER_ALLOCATOR_H | |||
13 | #error This file must be included inside sanitizer_allocator.h | |||
14 | #endif | |||
15 | ||||
16 | // SizeClassMap maps allocation sizes into size classes and back. | |||
17 | // Class 0 always corresponds to size 0. | |||
18 | // The other sizes are controlled by the template parameters: | |||
19 | // kMinSizeLog: defines the class 1 as 2^kMinSizeLog. | |||
20 | // kMaxSizeLog: defines the last class as 2^kMaxSizeLog. | |||
21 | // kMidSizeLog: the classes starting from 1 increase with step | |||
22 | // 2^kMinSizeLog until 2^kMidSizeLog. | |||
23 | // kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog. | |||
24 | // E.g. with kNumBits==3 all size classes after 2^kMidSizeLog | |||
25 | // look like 0b1xx0..0, where x is either 0 or 1. | |||
26 | // | |||
27 | // Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17: | |||
28 | // | |||
29 | // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). | |||
30 | // Next 4 classes: 256 + i * 64 (i = 1 to 4). | |||
31 | // Next 4 classes: 512 + i * 128 (i = 1 to 4). | |||
32 | // ... | |||
33 | // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4). | |||
34 | // Last class corresponds to kMaxSize = 1 << kMaxSizeLog. | |||
35 | // | |||
36 | // This structure of the size class map gives us: | |||
37 | // - Efficient table-free class-to-size and size-to-class functions. | |||
38 | // - Difference between two consequent size classes is between 14% and 25% | |||
39 | // | |||
40 | // This class also gives a hint to a thread-caching allocator about the amount | |||
41 | // of chunks that need to be cached per-thread: | |||
42 | // - kMaxNumCachedHint is a hint for maximal number of chunks per size class. | |||
43 | // The actual number is computed in TransferBatch. | |||
44 | // - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class. | |||
45 | // | |||
46 | // Part of output of SizeClassMap::Print(): | |||
47 | // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 | |||
48 | // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1 | |||
49 | // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2 | |||
50 | // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3 | |||
51 | // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4 | |||
52 | // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5 | |||
53 | // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6 | |||
54 | // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7 | |||
55 | // | |||
56 | // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8 | |||
57 | // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9 | |||
58 | // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10 | |||
59 | // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11 | |||
60 | // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12 | |||
61 | // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13 | |||
62 | // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14 | |||
63 | // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15 | |||
64 | // | |||
65 | // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16 | |||
66 | // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17 | |||
67 | // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18 | |||
68 | // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19 | |||
69 | // | |||
70 | // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20 | |||
71 | // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21 | |||
72 | // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22 | |||
73 | // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23 | |||
74 | // | |||
75 | // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24 | |||
76 | // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25 | |||
77 | // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26 | |||
78 | // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27 | |||
79 | // | |||
80 | // ... | |||
81 | // | |||
82 | // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48 | |||
83 | // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49 | |||
84 | // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50 | |||
85 | // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51 | |||
86 | // | |||
87 | // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52 | |||
88 | // | |||
89 | // | |||
90 | // Another example (kNumBits=2): | |||
91 | // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 | |||
92 | // c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1 | |||
93 | // c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2 | |||
94 | // c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3 | |||
95 | // c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4 | |||
96 | // c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5 | |||
97 | // c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6 | |||
98 | // c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7 | |||
99 | // c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8 | |||
100 | // c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9 | |||
101 | // c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10 | |||
102 | // c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11 | |||
103 | // c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12 | |||
104 | // c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13 | |||
105 | // c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14 | |||
106 | // c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15 | |||
107 | // c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16 | |||
108 | // c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17 | |||
109 | // c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18 | |||
110 | // c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19 | |||
111 | // c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20 | |||
112 | // c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21 | |||
113 | // c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22 | |||
114 | // c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23 | |||
115 | // c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24 | |||
116 | // c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25 | |||
117 | // c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26 | |||
118 | ||||
119 | template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog, | |||
120 | uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog> | |||
121 | class SizeClassMap { | |||
122 | static const uptr kMinSize = 1 << kMinSizeLog; | |||
123 | static const uptr kMidSize = 1 << kMidSizeLog; | |||
124 | static const uptr kMidClass = kMidSize / kMinSize; | |||
125 | static const uptr S = kNumBits - 1; | |||
126 | static const uptr M = (1 << S) - 1; | |||
127 | ||||
128 | public: | |||
129 | // kMaxNumCachedHintT is a power of two. It serves as a hint | |||
130 | // for the size of TransferBatch, the actual size could be a bit smaller. | |||
131 | static const uptr kMaxNumCachedHint = kMaxNumCachedHintT; | |||
132 | COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)static_assert((kMaxNumCachedHint & (kMaxNumCachedHint - 1 )) == 0, ""); | |||
133 | ||||
134 | static const uptr kMaxSize = 1UL << kMaxSizeLog; | |||
135 | static const uptr kNumClasses = | |||
136 | kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1; | |||
137 | static const uptr kLargestClassID = kNumClasses - 2; | |||
138 | static const uptr kBatchClassID = kNumClasses - 1; | |||
139 | COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)static_assert(kNumClasses >= 16 && kNumClasses <= 256, ""); | |||
140 | static const uptr kNumClassesRounded = | |||
141 | kNumClasses <= 32 ? 32 : | |||
142 | kNumClasses <= 64 ? 64 : | |||
143 | kNumClasses <= 128 ? 128 : 256; | |||
144 | ||||
145 | static uptr Size(uptr class_id) { | |||
146 | // Estimate the result for kBatchClassID because this class does not know | |||
147 | // the exact size of TransferBatch. It's OK since we are using the actual | |||
148 | // sizeof(TransferBatch) where it matters. | |||
149 | if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0)) | |||
150 | return kMaxNumCachedHint * sizeof(uptr); | |||
151 | if (class_id <= kMidClass) | |||
152 | return kMinSize * class_id; | |||
153 | class_id -= kMidClass; | |||
154 | uptr t = kMidSize << (class_id >> S); | |||
| ||||
155 | return t + (t >> S) * (class_id & M); | |||
156 | } | |||
157 | ||||
158 | static uptr ClassID(uptr size) { | |||
159 | if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0)) | |||
160 | return 0; | |||
161 | if (size <= kMidSize) | |||
162 | return (size + kMinSize - 1) >> kMinSizeLog; | |||
163 | const uptr l = MostSignificantSetBitIndex(size); | |||
164 | const uptr hbits = (size >> (l - S)) & M; | |||
165 | const uptr lbits = size & ((1U << (l - S)) - 1); | |||
166 | const uptr l1 = l - kMidSizeLog; | |||
167 | return kMidClass + (l1 << S) + hbits + (lbits > 0); | |||
168 | } | |||
169 | ||||
170 | static uptr MaxCachedHint(uptr size) { | |||
171 | DCHECK_LE(size, kMaxSize); | |||
172 | if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0)) | |||
173 | return 0; | |||
174 | uptr n; | |||
175 | // Force a 32-bit division if the template parameters allow for it. | |||
176 | if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31) | |||
177 | n = (1UL << kMaxBytesCachedLog) / size; | |||
178 | else | |||
179 | n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size); | |||
180 | return Max<uptr>(1U, Min(kMaxNumCachedHint, n)); | |||
181 | } | |||
182 | ||||
183 | static void Print() { | |||
184 | uptr prev_s = 0; | |||
185 | uptr total_cached = 0; | |||
186 | for (uptr i = 0; i < kNumClasses; i++) { | |||
187 | uptr s = Size(i); | |||
188 | if (s >= kMidSize / 2 && (s & (s - 1)) == 0) | |||
189 | Printf("\n"); | |||
190 | uptr d = s - prev_s; | |||
191 | uptr p = prev_s ? (d * 100 / prev_s) : 0; | |||
192 | uptr l = s ? MostSignificantSetBitIndex(s) : 0; | |||
193 | uptr cached = MaxCachedHint(s) * s; | |||
194 | if (i == kBatchClassID) | |||
195 | d = p = l = 0; | |||
196 | Printf( | |||
197 | "c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n", | |||
198 | i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s)); | |||
199 | total_cached += cached; | |||
200 | prev_s = s; | |||
201 | } | |||
202 | Printf("Total cached: %zu\n", total_cached); | |||
203 | } | |||
204 | ||||
205 | static void Validate() { | |||
206 | for (uptr c = 1; c < kNumClasses; c++) { | |||
207 | // Printf("Validate: c%zd\n", c); | |||
208 | uptr s = Size(c); | |||
209 | CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer ::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!( !(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 209, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while ( false); | |||
210 | if (c == kBatchClassID) | |||
211 | continue; | |||
212 | CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer ::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(! (v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 212, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); } while (false); | |||
213 | if (c < kLargestClassID) | |||
214 | CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1) )); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 214, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1 , v2); } while (false); | |||
215 | CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1) )); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 215, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2 ); } while (false); | |||
216 | CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer ::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 216, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1 , v2); } while (false); | |||
217 | } | |||
218 | CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize + 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 218, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")" , v1, v2); } while (false); | |||
219 | ||||
220 | for (uptr s = 1; s <= kMaxSize; s++) { | |||
221 | uptr c = ClassID(s); | |||
222 | // Printf("s%zd => c%zd\n", s, c); | |||
223 | CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer ::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 223, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2 ); } while (false); | |||
224 | CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer ::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(! (v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 224, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); } while (false); | |||
225 | if (c > 0) | |||
226 | CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 226, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2 ); } while (false); | |||
227 | } | |||
228 | } | |||
229 | }; | |||
230 | ||||
231 | typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap; | |||
232 | typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap; | |||
233 | typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap; | |||
234 | ||||
235 | // The following SizeClassMap only holds a way small number of cached entries, | |||
236 | // allowing for denser per-class arrays, smaller memory footprint and usually | |||
237 | // better performances in threaded environments. | |||
238 | typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap; | |||
239 | // Similar to VeryCompact map above, this one has a small number of different | |||
240 | // size classes, and also reduced thread-local caches. | |||
241 | typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap; |