| File: | compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h |
| Warning: | line 154, column 23 The result of the left shift is undefined due to shifting '256' by '1073741820', which is unrepresentable in the unsigned version of the return type '__sanitizer::uptr' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// | ||||||
| 2 | // | ||||||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
| 6 | // | ||||||
| 7 | //===----------------------------------------------------------------------===// | ||||||
| 8 | /// | ||||||
| 9 | /// Scudo Hardened Allocator implementation. | ||||||
| 10 | /// It uses the sanitizer_common allocator as a base and aims at mitigating | ||||||
| 11 | /// heap corruption vulnerabilities. It provides a checksum-guarded chunk | ||||||
| 12 | /// header, a delayed free list, and additional sanity checks. | ||||||
| 13 | /// | ||||||
| 14 | //===----------------------------------------------------------------------===// | ||||||
| 15 | |||||||
| 16 | #include "scudo_allocator.h" | ||||||
| 17 | #include "scudo_crc32.h" | ||||||
| 18 | #include "scudo_errors.h" | ||||||
| 19 | #include "scudo_flags.h" | ||||||
| 20 | #include "scudo_interface_internal.h" | ||||||
| 21 | #include "scudo_tsd.h" | ||||||
| 22 | #include "scudo_utils.h" | ||||||
| 23 | |||||||
| 24 | #include "sanitizer_common/sanitizer_allocator_checks.h" | ||||||
| 25 | #include "sanitizer_common/sanitizer_allocator_interface.h" | ||||||
| 26 | #include "sanitizer_common/sanitizer_quarantine.h" | ||||||
| 27 | |||||||
| 28 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 29 | # include "gwp_asan/guarded_pool_allocator.h" | ||||||
| 30 | # include "gwp_asan/optional/backtrace.h" | ||||||
| 31 | # include "gwp_asan/optional/options_parser.h" | ||||||
| 32 | #include "gwp_asan/optional/segv_handler.h" | ||||||
| 33 | #endif // GWP_ASAN_HOOKS | ||||||
| 34 | |||||||
| 35 | #include <errno(*__errno_location ()).h> | ||||||
| 36 | #include <string.h> | ||||||
| 37 | |||||||
| 38 | namespace __scudo { | ||||||
| 39 | |||||||
| 40 | // Global static cookie, initialized at start-up. | ||||||
| 41 | static u32 Cookie; | ||||||
| 42 | |||||||
| 43 | // We default to software CRC32 if the alternatives are not supported, either | ||||||
| 44 | // at compilation or at runtime. | ||||||
| 45 | static atomic_uint8_t HashAlgorithm = { CRC32Software }; | ||||||
| 46 | |||||||
| 47 | inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { | ||||||
| 48 | // If the hardware CRC32 feature is defined here, it was enabled everywhere, | ||||||
| 49 | // as opposed to only for scudo_crc32.cpp. This means that other hardware | ||||||
| 50 | // specific instructions were likely emitted at other places, and as a | ||||||
| 51 | // result there is no reason to not use it here. | ||||||
| 52 | #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) | ||||||
| 53 | Crc = CRC32_INTRINSIC(Crc, Value); | ||||||
| 54 | for (uptr i = 0; i < ArraySize; i++) | ||||||
| 55 | Crc = CRC32_INTRINSIC(Crc, Array[i]); | ||||||
| 56 | return Crc; | ||||||
| 57 | #else | ||||||
| 58 | if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) { | ||||||
| 59 | Crc = computeHardwareCRC32(Crc, Value); | ||||||
| 60 | for (uptr i = 0; i < ArraySize; i++) | ||||||
| 61 | Crc = computeHardwareCRC32(Crc, Array[i]); | ||||||
| 62 | return Crc; | ||||||
| 63 | } | ||||||
| 64 | Crc = computeSoftwareCRC32(Crc, Value); | ||||||
| 65 | for (uptr i = 0; i < ArraySize; i++) | ||||||
| 66 | Crc = computeSoftwareCRC32(Crc, Array[i]); | ||||||
| 67 | return Crc; | ||||||
| 68 | #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) | ||||||
| 69 | } | ||||||
| 70 | |||||||
| 71 | static BackendT &getBackend(); | ||||||
| 72 | |||||||
| 73 | namespace Chunk { | ||||||
| 74 | static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) { | ||||||
| 75 | return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) - | ||||||
| 76 | getHeaderSize()); | ||||||
| 77 | } | ||||||
| 78 | static inline | ||||||
| 79 | const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { | ||||||
| 80 | return reinterpret_cast<const AtomicPackedHeader *>( | ||||||
| 81 | reinterpret_cast<uptr>(Ptr) - getHeaderSize()); | ||||||
| 82 | } | ||||||
| 83 | |||||||
| 84 | static inline bool isAligned(const void *Ptr) { | ||||||
| 85 | return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment); | ||||||
| 86 | } | ||||||
| 87 | |||||||
| 88 | // We can't use the offset member of the chunk itself, as we would double | ||||||
| 89 | // fetch it without any warranty that it wouldn't have been tampered. To | ||||||
| 90 | // prevent this, we work with a local copy of the header. | ||||||
| 91 | static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { | ||||||
| 92 | return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - | ||||||
| 93 | getHeaderSize() - (Header->Offset << MinAlignmentLog)); | ||||||
| 94 | } | ||||||
| 95 | |||||||
| 96 | // Returns the usable size for a chunk, meaning the amount of bytes from the | ||||||
| 97 | // beginning of the user data to the end of the backend allocated chunk. | ||||||
| 98 | static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { | ||||||
| 99 | const uptr ClassId = Header->ClassId; | ||||||
| 100 | if (ClassId) | ||||||
| 101 | return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - | ||||||
| 102 | (Header->Offset << MinAlignmentLog); | ||||||
| 103 | return SecondaryT::GetActuallyAllocatedSize( | ||||||
| 104 | getBackendPtr(Ptr, Header)) - getHeaderSize(); | ||||||
| 105 | } | ||||||
| 106 | |||||||
| 107 | // Returns the size the user requested when allocating the chunk. | ||||||
| 108 | static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) { | ||||||
| 109 | const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; | ||||||
| 110 | if (Header->ClassId) | ||||||
| 111 | return SizeOrUnusedBytes; | ||||||
| 112 | return SecondaryT::GetActuallyAllocatedSize( | ||||||
| 113 | getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; | ||||||
| 114 | } | ||||||
| 115 | |||||||
| 116 | // Compute the checksum of the chunk pointer and its header. | ||||||
| 117 | static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) { | ||||||
| 118 | UnpackedHeader ZeroChecksumHeader = *Header; | ||||||
| 119 | ZeroChecksumHeader.Checksum = 0; | ||||||
| 120 | uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; | ||||||
| 121 | memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); | ||||||
| 122 | const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr), | ||||||
| 123 | HeaderHolder, ARRAY_SIZE(HeaderHolder)(sizeof(HeaderHolder)/sizeof((HeaderHolder)[0]))); | ||||||
| 124 | return static_cast<u16>(Crc); | ||||||
| 125 | } | ||||||
| 126 | |||||||
| 127 | // Checks the validity of a chunk by verifying its checksum. It doesn't | ||||||
| 128 | // incur termination in the event of an invalid chunk. | ||||||
| 129 | static inline bool isValid(const void *Ptr) { | ||||||
| 130 | PackedHeader NewPackedHeader = | ||||||
| 131 | atomic_load_relaxed(getConstAtomicHeader(Ptr)); | ||||||
| 132 | UnpackedHeader NewUnpackedHeader = | ||||||
| 133 | bit_cast<UnpackedHeader>(NewPackedHeader); | ||||||
| 134 | return (NewUnpackedHeader.Checksum == | ||||||
| 135 | computeChecksum(Ptr, &NewUnpackedHeader)); | ||||||
| 136 | } | ||||||
| 137 | |||||||
| 138 | // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid | ||||||
| 139 | // for a fully nulled out header, its state will be available anyway. | ||||||
| 140 | COMPILER_CHECK(ChunkAvailable == 0)static_assert(ChunkAvailable == 0, ""); | ||||||
| 141 | |||||||
| 142 | // Loads and unpacks the header, verifying the checksum in the process. | ||||||
| 143 | static inline | ||||||
| 144 | void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) { | ||||||
| 145 | PackedHeader NewPackedHeader = | ||||||
| 146 | atomic_load_relaxed(getConstAtomicHeader(Ptr)); | ||||||
| 147 | *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader); | ||||||
| 148 | if (UNLIKELY(NewUnpackedHeader->Checksum !=__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum (Ptr, NewUnpackedHeader)), 0) | ||||||
| 149 | computeChecksum(Ptr, NewUnpackedHeader))__builtin_expect(!!(NewUnpackedHeader->Checksum != computeChecksum (Ptr, NewUnpackedHeader)), 0)) | ||||||
| 150 | dieWithMessage("corrupted chunk header at address %p\n", Ptr); | ||||||
| 151 | } | ||||||
| 152 | |||||||
| 153 | // Packs and stores the header, computing the checksum in the process. | ||||||
| 154 | static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) { | ||||||
| 155 | NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); | ||||||
| 156 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); | ||||||
| 157 | atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader); | ||||||
| 158 | } | ||||||
| 159 | |||||||
| 160 | // Packs and stores the header, computing the checksum in the process. We | ||||||
| 161 | // compare the current header with the expected provided one to ensure that | ||||||
| 162 | // we are not being raced by a corruption occurring in another thread. | ||||||
| 163 | static inline void compareExchangeHeader(void *Ptr, | ||||||
| 164 | UnpackedHeader *NewUnpackedHeader, | ||||||
| 165 | UnpackedHeader *OldUnpackedHeader) { | ||||||
| 166 | NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); | ||||||
| 167 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); | ||||||
| 168 | PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader); | ||||||
| 169 | if (UNLIKELY(!atomic_compare_exchange_strong(__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0) | ||||||
| 170 | getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0) | ||||||
| 171 | memory_order_relaxed))__builtin_expect(!!(!atomic_compare_exchange_strong( getAtomicHeader (Ptr), &OldPackedHeader, NewPackedHeader, memory_order_relaxed )), 0)) | ||||||
| 172 | dieWithMessage("race on chunk header at address %p\n", Ptr); | ||||||
| 173 | } | ||||||
| 174 | } // namespace Chunk | ||||||
| 175 | |||||||
| 176 | struct QuarantineCallback { | ||||||
| 177 | explicit QuarantineCallback(AllocatorCacheT *Cache) | ||||||
| 178 | : Cache_(Cache) {} | ||||||
| 179 | |||||||
| 180 | // Chunk recycling function, returns a quarantined chunk to the backend, | ||||||
| 181 | // first making sure it hasn't been tampered with. | ||||||
| 182 | void Recycle(void *Ptr) { | ||||||
| 183 | UnpackedHeader Header; | ||||||
| 184 | Chunk::loadHeader(Ptr, &Header); | ||||||
| 185 | if (UNLIKELY(Header.State != ChunkQuarantine)__builtin_expect(!!(Header.State != ChunkQuarantine), 0)) | ||||||
| 186 | dieWithMessage("invalid chunk state when recycling address %p\n", Ptr); | ||||||
| 187 | UnpackedHeader NewHeader = Header; | ||||||
| 188 | NewHeader.State = ChunkAvailable; | ||||||
| 189 | Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header); | ||||||
| 190 | void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); | ||||||
| 191 | if (Header.ClassId) | ||||||
| 192 | getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId); | ||||||
| 193 | else | ||||||
| 194 | getBackend().deallocateSecondary(BackendPtr); | ||||||
| 195 | } | ||||||
| 196 | |||||||
| 197 | // Internal quarantine allocation and deallocation functions. We first check | ||||||
| 198 | // that the batches are indeed serviced by the Primary. | ||||||
| 199 | // TODO(kostyak): figure out the best way to protect the batches. | ||||||
| 200 | void *Allocate(uptr Size) { | ||||||
| 201 | const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); | ||||||
| 202 | return getBackend().allocatePrimary(Cache_, BatchClassId); | ||||||
| 203 | } | ||||||
| 204 | |||||||
| 205 | void Deallocate(void *Ptr) { | ||||||
| 206 | const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); | ||||||
| 207 | getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId); | ||||||
| 208 | } | ||||||
| 209 | |||||||
| 210 | AllocatorCacheT *Cache_; | ||||||
| 211 | COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize)static_assert(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize , ""); | ||||||
| 212 | }; | ||||||
| 213 | |||||||
| 214 | typedef Quarantine<QuarantineCallback, void> QuarantineT; | ||||||
| 215 | typedef QuarantineT::Cache QuarantineCacheT; | ||||||
| 216 | COMPILER_CHECK(sizeof(QuarantineCacheT) <=static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD:: QuarantineCachePlaceHolder), "") | ||||||
| 217 | sizeof(ScudoTSD::QuarantineCachePlaceHolder))static_assert(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD:: QuarantineCachePlaceHolder), ""); | ||||||
| 218 | |||||||
| 219 | QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) { | ||||||
| 220 | return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder); | ||||||
| 221 | } | ||||||
| 222 | |||||||
| 223 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 224 | static gwp_asan::GuardedPoolAllocator GuardedAlloc; | ||||||
| 225 | #endif // GWP_ASAN_HOOKS | ||||||
| 226 | |||||||
| 227 | struct Allocator { | ||||||
| 228 | static const uptr MaxAllowedMallocSize = | ||||||
| 229 | FIRST_32_SECOND_64(2UL << 30, 1ULL << 40)(2UL << 30); | ||||||
| 230 | |||||||
| 231 | BackendT Backend; | ||||||
| 232 | QuarantineT Quarantine; | ||||||
| 233 | |||||||
| 234 | u32 QuarantineChunksUpToSize; | ||||||
| 235 | |||||||
| 236 | bool DeallocationTypeMismatch; | ||||||
| 237 | bool ZeroContents; | ||||||
| 238 | bool DeleteSizeMismatch; | ||||||
| 239 | |||||||
| 240 | bool CheckRssLimit; | ||||||
| 241 | uptr HardRssLimitMb; | ||||||
| 242 | uptr SoftRssLimitMb; | ||||||
| 243 | atomic_uint8_t RssLimitExceeded; | ||||||
| 244 | atomic_uint64_t RssLastCheckedAtNS; | ||||||
| 245 | |||||||
| 246 | explicit Allocator(LinkerInitialized) | ||||||
| 247 | : Quarantine(LINKER_INITIALIZED) {} | ||||||
| 248 | |||||||
| 249 | NOINLINE__attribute__((noinline)) void performSanityChecks(); | ||||||
| 250 | |||||||
| 251 | void init() { | ||||||
| 252 | SanitizerToolName = "Scudo"; | ||||||
| 253 | PrimaryAllocatorName = "ScudoPrimary"; | ||||||
| 254 | SecondaryAllocatorName = "ScudoSecondary"; | ||||||
| 255 | |||||||
| 256 | initFlags(); | ||||||
| 257 | |||||||
| 258 | performSanityChecks(); | ||||||
| 259 | |||||||
| 260 | // Check if hardware CRC32 is supported in the binary and by the platform, | ||||||
| 261 | // if so, opt for the CRC32 hardware version of the checksum. | ||||||
| 262 | if (&computeHardwareCRC32 && hasHardwareCRC32()) | ||||||
| 263 | atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); | ||||||
| 264 | |||||||
| 265 | SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); | ||||||
| 266 | Backend.init(common_flags()->allocator_release_to_os_interval_ms); | ||||||
| 267 | HardRssLimitMb = common_flags()->hard_rss_limit_mb; | ||||||
| 268 | SoftRssLimitMb = common_flags()->soft_rss_limit_mb; | ||||||
| 269 | Quarantine.Init( | ||||||
| 270 | static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10, | ||||||
| 271 | static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10); | ||||||
| 272 | QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 : | ||||||
| 273 | getFlags()->QuarantineChunksUpToSize; | ||||||
| 274 | DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch; | ||||||
| 275 | DeleteSizeMismatch = getFlags()->DeleteSizeMismatch; | ||||||
| 276 | ZeroContents = getFlags()->ZeroContents; | ||||||
| 277 | |||||||
| 278 | if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),__builtin_expect(!!(!GetRandom(reinterpret_cast<void *> (&Cookie), sizeof(Cookie), false)), 0) | ||||||
| 279 | /*blocking=*/false))__builtin_expect(!!(!GetRandom(reinterpret_cast<void *> (&Cookie), sizeof(Cookie), false)), 0)) { | ||||||
| 280 | Cookie = static_cast<u32>((NanoTime() >> 12) ^ | ||||||
| 281 | (reinterpret_cast<uptr>(this) >> 4)); | ||||||
| 282 | } | ||||||
| 283 | |||||||
| 284 | CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; | ||||||
| 285 | if (CheckRssLimit) | ||||||
| 286 | atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime()); | ||||||
| 287 | } | ||||||
| 288 | |||||||
| 289 | // Helper function that checks for a valid Scudo chunk. nullptr isn't. | ||||||
| 290 | bool isValidPointer(const void *Ptr) { | ||||||
| 291 | initThreadMaybe(); | ||||||
| 292 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
| 293 | return false; | ||||||
| 294 | if (!Chunk::isAligned(Ptr)) | ||||||
| 295 | return false; | ||||||
| 296 | return Chunk::isValid(Ptr); | ||||||
| 297 | } | ||||||
| 298 | |||||||
| 299 | NOINLINE__attribute__((noinline)) bool isRssLimitExceeded(); | ||||||
| 300 | |||||||
| 301 | // Allocates a chunk. | ||||||
| 302 | void *allocate(uptr Size, uptr Alignment, AllocType Type, | ||||||
| 303 | bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { | ||||||
| 304 | initThreadMaybe(); | ||||||
| 305 | |||||||
| 306 | if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) { | ||||||
| 307 | if (AllocatorMayReturnNull()) | ||||||
| 308 | return nullptr; | ||||||
| 309 | reportAllocationAlignmentTooBig(Alignment, MaxAlignment); | ||||||
| 310 | } | ||||||
| 311 | if (UNLIKELY(Alignment < MinAlignment)__builtin_expect(!!(Alignment < MinAlignment), 0)) | ||||||
| 312 | Alignment = MinAlignment; | ||||||
| 313 | |||||||
| 314 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 315 | if (UNLIKELY(GuardedAlloc.shouldSample())__builtin_expect(!!(GuardedAlloc.shouldSample()), 0)) { | ||||||
| 316 | if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) { | ||||||
| 317 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook) | ||||||
| 318 | __sanitizer_malloc_hook(Ptr, Size); | ||||||
| 319 | return Ptr; | ||||||
| 320 | } | ||||||
| 321 | } | ||||||
| 322 | #endif // GWP_ASAN_HOOKS | ||||||
| 323 | |||||||
| 324 | const uptr NeededSize = RoundUpTo(Size
| ||||||
| 325 | Chunk::getHeaderSize(); | ||||||
| 326 | const uptr AlignedSize = (Alignment > MinAlignment) ? | ||||||
| 327 | NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize; | ||||||
| 328 | if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0) || | ||||||
| 329 | UNLIKELY(AlignedSize >= MaxAllowedMallocSize)__builtin_expect(!!(AlignedSize >= MaxAllowedMallocSize), 0 )) { | ||||||
| 330 | if (AllocatorMayReturnNull()) | ||||||
| 331 | return nullptr; | ||||||
| 332 | reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize); | ||||||
| 333 | } | ||||||
| 334 | |||||||
| 335 | if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())__builtin_expect(!!(isRssLimitExceeded()), 0)) { | ||||||
| 336 | if (AllocatorMayReturnNull()) | ||||||
| 337 | return nullptr; | ||||||
| 338 | reportRssLimitExceeded(); | ||||||
| 339 | } | ||||||
| 340 | |||||||
| 341 | // Primary and Secondary backed allocations have a different treatment. We | ||||||
| 342 | // deal with alignment requirements of Primary serviced allocations here, | ||||||
| 343 | // but the Secondary will take care of its own alignment needs. | ||||||
| 344 | void *BackendPtr; | ||||||
| 345 | uptr BackendSize; | ||||||
| 346 | u8 ClassId; | ||||||
| 347 | if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) { | ||||||
| 348 | BackendSize = AlignedSize; | ||||||
| 349 | ClassId = SizeClassMap::ClassID(BackendSize); | ||||||
| 350 | bool UnlockRequired; | ||||||
| 351 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
| 352 | BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId); | ||||||
| 353 | if (UnlockRequired
| ||||||
| 354 | TSD->unlock(); | ||||||
| 355 | } else { | ||||||
| 356 | BackendSize = NeededSize; | ||||||
| 357 | ClassId = 0; | ||||||
| 358 | BackendPtr = Backend.allocateSecondary(BackendSize, Alignment); | ||||||
| 359 | } | ||||||
| 360 | if (UNLIKELY(!BackendPtr)__builtin_expect(!!(!BackendPtr), 0)) { | ||||||
| 361 | SetAllocatorOutOfMemory(); | ||||||
| 362 | if (AllocatorMayReturnNull()) | ||||||
| 363 | return nullptr; | ||||||
| 364 | reportOutOfMemory(Size); | ||||||
| 365 | } | ||||||
| 366 | |||||||
| 367 | // If requested, we will zero out the entire contents of the returned chunk. | ||||||
| 368 | if ((ForceZeroContents
| ||||||
| 369 | memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId)); | ||||||
| 370 | |||||||
| 371 | UnpackedHeader Header = {}; | ||||||
| 372 | uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize(); | ||||||
| 373 | if (UNLIKELY(!IsAligned(UserPtr, Alignment))__builtin_expect(!!(!IsAligned(UserPtr, Alignment)), 0)) { | ||||||
| 374 | // Since the Secondary takes care of alignment, a non-aligned pointer | ||||||
| 375 | // means it is from the Primary. It is also the only case where the offset | ||||||
| 376 | // field of the header would be non-zero. | ||||||
| 377 | DCHECK(ClassId); | ||||||
| 378 | const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment); | ||||||
| 379 | Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog; | ||||||
| 380 | UserPtr = AlignedUserPtr; | ||||||
| 381 | } | ||||||
| 382 | DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize); | ||||||
| 383 | Header.State = ChunkAllocated; | ||||||
| 384 | Header.AllocType = Type; | ||||||
| 385 | if (ClassId) { | ||||||
| 386 | Header.ClassId = ClassId; | ||||||
| 387 | Header.SizeOrUnusedBytes = Size; | ||||||
| 388 | } else { | ||||||
| 389 | // The secondary fits the allocations to a page, so the amount of unused | ||||||
| 390 | // bytes is the difference between the end of the user allocation and the | ||||||
| 391 | // next page boundary. | ||||||
| 392 | const uptr PageSize = GetPageSizeCached(); | ||||||
| 393 | const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1); | ||||||
| 394 | if (TrailingBytes) | ||||||
| 395 | Header.SizeOrUnusedBytes = PageSize - TrailingBytes; | ||||||
| 396 | } | ||||||
| 397 | void *Ptr = reinterpret_cast<void *>(UserPtr); | ||||||
| 398 | Chunk::storeHeader(Ptr, &Header); | ||||||
| 399 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_malloc_hook) | ||||||
| 400 | __sanitizer_malloc_hook(Ptr, Size); | ||||||
| 401 | return Ptr; | ||||||
| 402 | } | ||||||
| 403 | |||||||
| 404 | // Place a chunk in the quarantine or directly deallocate it in the event of | ||||||
| 405 | // a zero-sized quarantine, or if the size of the chunk is greater than the | ||||||
| 406 | // quarantine chunk size threshold. | ||||||
| 407 | void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, | ||||||
| 408 | uptr Size) NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { | ||||||
| 409 | const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize); | ||||||
| 410 | if (BypassQuarantine) { | ||||||
| 411 | UnpackedHeader NewHeader = *Header; | ||||||
| 412 | NewHeader.State = ChunkAvailable; | ||||||
| 413 | Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); | ||||||
| 414 | void *BackendPtr = Chunk::getBackendPtr(Ptr, Header); | ||||||
| 415 | if (Header->ClassId) { | ||||||
| 416 | bool UnlockRequired; | ||||||
| 417 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
| 418 | getBackend().deallocatePrimary(&TSD->Cache, BackendPtr, | ||||||
| 419 | Header->ClassId); | ||||||
| 420 | if (UnlockRequired) | ||||||
| 421 | TSD->unlock(); | ||||||
| 422 | } else { | ||||||
| 423 | getBackend().deallocateSecondary(BackendPtr); | ||||||
| 424 | } | ||||||
| 425 | } else { | ||||||
| 426 | // If a small memory amount was allocated with a larger alignment, we want | ||||||
| 427 | // to take that into account. Otherwise the Quarantine would be filled | ||||||
| 428 | // with tiny chunks, taking a lot of VA memory. This is an approximation | ||||||
| 429 | // of the usable size, that allows us to not call | ||||||
| 430 | // GetActuallyAllocatedSize. | ||||||
| 431 | const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog); | ||||||
| 432 | UnpackedHeader NewHeader = *Header; | ||||||
| 433 | NewHeader.State = ChunkQuarantine; | ||||||
| 434 | Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); | ||||||
| 435 | bool UnlockRequired; | ||||||
| 436 | ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); | ||||||
| 437 | Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache), | ||||||
| 438 | Ptr, EstimatedSize); | ||||||
| 439 | if (UnlockRequired) | ||||||
| 440 | TSD->unlock(); | ||||||
| 441 | } | ||||||
| 442 | } | ||||||
| 443 | |||||||
| 444 | // Deallocates a Chunk, which means either adding it to the quarantine or | ||||||
| 445 | // directly returning it to the backend if criteria are met. | ||||||
| 446 | void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment, | ||||||
| 447 | AllocType Type) { | ||||||
| 448 | // For a deallocation, we only ensure minimal initialization, meaning thread | ||||||
| 449 | // local data will be left uninitialized for now (when using ELF TLS). The | ||||||
| 450 | // fallback cache will be used instead. This is a workaround for a situation | ||||||
| 451 | // where the only heap operation performed in a thread would be a free past | ||||||
| 452 | // the TLS destructors, ending up in initialized thread specific data never | ||||||
| 453 | // being destroyed properly. Any other heap operation will do a full init. | ||||||
| 454 | initThreadMaybe(/*MinimalInit=*/true); | ||||||
| 455 | if (SCUDO_CAN_USE_HOOKS0 && &__sanitizer_free_hook) | ||||||
| 456 | __sanitizer_free_hook(Ptr); | ||||||
| 457 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
| 458 | return; | ||||||
| 459 | |||||||
| 460 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 461 | if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) { | ||||||
| 462 | GuardedAlloc.deallocate(Ptr); | ||||||
| 463 | return; | ||||||
| 464 | } | ||||||
| 465 | #endif // GWP_ASAN_HOOKS | ||||||
| 466 | |||||||
| 467 | if (UNLIKELY(!Chunk::isAligned(Ptr))__builtin_expect(!!(!Chunk::isAligned(Ptr)), 0)) | ||||||
| 468 | dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr); | ||||||
| 469 | UnpackedHeader Header; | ||||||
| 470 | Chunk::loadHeader(Ptr, &Header); | ||||||
| 471 | if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0)) | ||||||
| 472 | dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr); | ||||||
| 473 | if (DeallocationTypeMismatch) { | ||||||
| 474 | // The deallocation type has to match the allocation one. | ||||||
| 475 | if (Header.AllocType != Type) { | ||||||
| 476 | // With the exception of memalign'd Chunks, that can be still be free'd. | ||||||
| 477 | if (Header.AllocType != FromMemalign || Type != FromMalloc) | ||||||
| 478 | dieWithMessage("allocation type mismatch when deallocating address " | ||||||
| 479 | "%p\n", Ptr); | ||||||
| 480 | } | ||||||
| 481 | } | ||||||
| 482 | const uptr Size = Chunk::getSize(Ptr, &Header); | ||||||
| 483 | if (DeleteSizeMismatch) { | ||||||
| 484 | if (DeleteSize && DeleteSize != Size) | ||||||
| 485 | dieWithMessage("invalid sized delete when deallocating address %p\n", | ||||||
| 486 | Ptr); | ||||||
| 487 | } | ||||||
| 488 | (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches. | ||||||
| 489 | quarantineOrDeallocateChunk(Ptr, &Header, Size); | ||||||
| 490 | } | ||||||
| 491 | |||||||
| 492 | // Reallocates a chunk. We can save on a new allocation if the new requested | ||||||
| 493 | // size still fits in the chunk. | ||||||
| 494 | void *reallocate(void *OldPtr, uptr NewSize) { | ||||||
| 495 | initThreadMaybe(); | ||||||
| 496 | |||||||
| 497 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 498 | if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(OldPtr)), 0)) { | ||||||
| 499 | size_t OldSize = GuardedAlloc.getSize(OldPtr); | ||||||
| 500 | void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); | ||||||
| 501 | if (NewPtr) | ||||||
| 502 | memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize); | ||||||
| 503 | GuardedAlloc.deallocate(OldPtr); | ||||||
| 504 | return NewPtr; | ||||||
| 505 | } | ||||||
| 506 | #endif // GWP_ASAN_HOOKS | ||||||
| 507 | |||||||
| 508 | if (UNLIKELY(!Chunk::isAligned(OldPtr))__builtin_expect(!!(!Chunk::isAligned(OldPtr)), 0)) | ||||||
| 509 | dieWithMessage("misaligned address when reallocating address %p\n", | ||||||
| 510 | OldPtr); | ||||||
| 511 | UnpackedHeader OldHeader; | ||||||
| 512 | Chunk::loadHeader(OldPtr, &OldHeader); | ||||||
| 513 | if (UNLIKELY(OldHeader.State != ChunkAllocated)__builtin_expect(!!(OldHeader.State != ChunkAllocated), 0)) | ||||||
| 514 | dieWithMessage("invalid chunk state when reallocating address %p\n", | ||||||
| 515 | OldPtr); | ||||||
| 516 | if (DeallocationTypeMismatch) { | ||||||
| 517 | if (UNLIKELY(OldHeader.AllocType != FromMalloc)__builtin_expect(!!(OldHeader.AllocType != FromMalloc), 0)) | ||||||
| 518 | dieWithMessage("allocation type mismatch when reallocating address " | ||||||
| 519 | "%p\n", OldPtr); | ||||||
| 520 | } | ||||||
| 521 | const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader); | ||||||
| 522 | // The new size still fits in the current chunk, and the size difference | ||||||
| 523 | // is reasonable. | ||||||
| 524 | if (NewSize <= UsableSize && | ||||||
| 525 | (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { | ||||||
| 526 | UnpackedHeader NewHeader = OldHeader; | ||||||
| 527 | NewHeader.SizeOrUnusedBytes = | ||||||
| 528 | OldHeader.ClassId ? NewSize : UsableSize - NewSize; | ||||||
| 529 | Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader); | ||||||
| 530 | return OldPtr; | ||||||
| 531 | } | ||||||
| 532 | // Otherwise, we have to allocate a new chunk and copy the contents of the | ||||||
| 533 | // old one. | ||||||
| 534 | void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); | ||||||
| 535 | if (NewPtr) { | ||||||
| 536 | const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes : | ||||||
| 537 | UsableSize - OldHeader.SizeOrUnusedBytes; | ||||||
| 538 | memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize)); | ||||||
| 539 | quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); | ||||||
| 540 | } | ||||||
| 541 | return NewPtr; | ||||||
| 542 | } | ||||||
| 543 | |||||||
| 544 | // Helper function that returns the actual usable size of a chunk. | ||||||
| 545 | uptr getUsableSize(const void *Ptr) { | ||||||
| 546 | initThreadMaybe(); | ||||||
| 547 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
| 548 | return 0; | ||||||
| 549 | |||||||
| 550 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 551 | if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) | ||||||
| 552 | return GuardedAlloc.getSize(Ptr); | ||||||
| 553 | #endif // GWP_ASAN_HOOKS | ||||||
| 554 | |||||||
| 555 | UnpackedHeader Header; | ||||||
| 556 | Chunk::loadHeader(Ptr, &Header); | ||||||
| 557 | // Getting the usable size of a chunk only makes sense if it's allocated. | ||||||
| 558 | if (UNLIKELY(Header.State != ChunkAllocated)__builtin_expect(!!(Header.State != ChunkAllocated), 0)) | ||||||
| 559 | dieWithMessage("invalid chunk state when sizing address %p\n", Ptr); | ||||||
| 560 | return Chunk::getUsableSize(Ptr, &Header); | ||||||
| 561 | } | ||||||
| 562 | |||||||
| 563 | void *calloc(uptr NMemB, uptr Size) { | ||||||
| 564 | initThreadMaybe(); | ||||||
| 565 | if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))__builtin_expect(!!(CheckForCallocOverflow(NMemB, Size)), 0)) { | ||||||
| 566 | if (AllocatorMayReturnNull()) | ||||||
| 567 | return nullptr; | ||||||
| 568 | reportCallocOverflow(NMemB, Size); | ||||||
| 569 | } | ||||||
| 570 | return allocate(NMemB * Size, MinAlignment, FromMalloc, true); | ||||||
| 571 | } | ||||||
| 572 | |||||||
| 573 | void commitBack(ScudoTSD *TSD) { | ||||||
| 574 | Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache)); | ||||||
| 575 | Backend.destroyCache(&TSD->Cache); | ||||||
| 576 | } | ||||||
| 577 | |||||||
| 578 | uptr getStats(AllocatorStat StatType) { | ||||||
| 579 | initThreadMaybe(); | ||||||
| 580 | uptr stats[AllocatorStatCount]; | ||||||
| 581 | Backend.getStats(stats); | ||||||
| 582 | return stats[StatType]; | ||||||
| 583 | } | ||||||
| 584 | |||||||
| 585 | bool canReturnNull() { | ||||||
| 586 | initThreadMaybe(); | ||||||
| 587 | return AllocatorMayReturnNull(); | ||||||
| 588 | } | ||||||
| 589 | |||||||
| 590 | void setRssLimit(uptr LimitMb, bool HardLimit) { | ||||||
| 591 | if (HardLimit) | ||||||
| 592 | HardRssLimitMb = LimitMb; | ||||||
| 593 | else | ||||||
| 594 | SoftRssLimitMb = LimitMb; | ||||||
| 595 | CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; | ||||||
| 596 | } | ||||||
| 597 | |||||||
| 598 | void printStats() { | ||||||
| 599 | initThreadMaybe(); | ||||||
| 600 | Backend.printStats(); | ||||||
| 601 | } | ||||||
| 602 | }; | ||||||
| 603 | |||||||
| 604 | NOINLINE__attribute__((noinline)) void Allocator::performSanityChecks() { | ||||||
| 605 | // Verify that the header offset field can hold the maximum offset. In the | ||||||
| 606 | // case of the Secondary allocator, it takes care of alignment and the | ||||||
| 607 | // offset will always be 0. In the case of the Primary, the worst case | ||||||
| 608 | // scenario happens in the last size class, when the backend allocation | ||||||
| 609 | // would already be aligned on the requested alignment, which would happen | ||||||
| 610 | // to be the maximum alignment that would fit in that size class. As a | ||||||
| 611 | // result, the maximum offset will be at most the maximum alignment for the | ||||||
| 612 | // last size class minus the header size, in multiples of MinAlignment. | ||||||
| 613 | UnpackedHeader Header = {}; | ||||||
| 614 | const uptr MaxPrimaryAlignment = | ||||||
| 615 | 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); | ||||||
| 616 | const uptr MaxOffset = | ||||||
| 617 | (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; | ||||||
| 618 | Header.Offset = MaxOffset; | ||||||
| 619 | if (Header.Offset != MaxOffset) | ||||||
| 620 | dieWithMessage("maximum possible offset doesn't fit in header\n"); | ||||||
| 621 | // Verify that we can fit the maximum size or amount of unused bytes in the | ||||||
| 622 | // header. Given that the Secondary fits the allocation to a page, the worst | ||||||
| 623 | // case scenario happens in the Primary. It will depend on the second to | ||||||
| 624 | // last and last class sizes, as well as the dynamic base for the Primary. | ||||||
| 625 | // The following is an over-approximation that works for our needs. | ||||||
| 626 | const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; | ||||||
| 627 | Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; | ||||||
| 628 | if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) | ||||||
| 629 | dieWithMessage("maximum possible unused bytes doesn't fit in header\n"); | ||||||
| 630 | |||||||
| 631 | const uptr LargestClassId = SizeClassMap::kLargestClassID; | ||||||
| 632 | Header.ClassId = LargestClassId; | ||||||
| 633 | if (Header.ClassId != LargestClassId) | ||||||
| 634 | dieWithMessage("largest class ID doesn't fit in header\n"); | ||||||
| 635 | } | ||||||
| 636 | |||||||
| 637 | // Opportunistic RSS limit check. This will update the RSS limit status, if | ||||||
| 638 | // it can, every 250ms, otherwise it will just return the current one. | ||||||
| 639 | NOINLINE__attribute__((noinline)) bool Allocator::isRssLimitExceeded() { | ||||||
| 640 | u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); | ||||||
| 641 | const u64 CurrentCheck = MonotonicNanoTime(); | ||||||
| 642 | if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL))__builtin_expect(!!(CurrentCheck < LastCheck + (250ULL * 1000000ULL )), 1)) | ||||||
| 643 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
| 644 | if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck, | ||||||
| 645 | CurrentCheck, memory_order_relaxed)) | ||||||
| 646 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
| 647 | // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the | ||||||
| 648 | // RSS from /proc/self/statm by default. We might want to | ||||||
| 649 | // call getrusage directly, even if it's less accurate. | ||||||
| 650 | const uptr CurrentRssMb = GetRSS() >> 20; | ||||||
| 651 | if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)__builtin_expect(!!(HardRssLimitMb < CurrentRssMb), 0)) | ||||||
| 652 | dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n", | ||||||
| 653 | HardRssLimitMb, CurrentRssMb); | ||||||
| 654 | if (SoftRssLimitMb) { | ||||||
| 655 | if (atomic_load_relaxed(&RssLimitExceeded)) { | ||||||
| 656 | if (CurrentRssMb <= SoftRssLimitMb) | ||||||
| 657 | atomic_store_relaxed(&RssLimitExceeded, false); | ||||||
| 658 | } else { | ||||||
| 659 | if (CurrentRssMb > SoftRssLimitMb) { | ||||||
| 660 | atomic_store_relaxed(&RssLimitExceeded, true); | ||||||
| 661 | Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n", | ||||||
| 662 | SoftRssLimitMb, CurrentRssMb); | ||||||
| 663 | } | ||||||
| 664 | } | ||||||
| 665 | } | ||||||
| 666 | return atomic_load_relaxed(&RssLimitExceeded); | ||||||
| 667 | } | ||||||
| 668 | |||||||
| 669 | static Allocator Instance(LINKER_INITIALIZED); | ||||||
| 670 | |||||||
| 671 | static BackendT &getBackend() { | ||||||
| 672 | return Instance.Backend; | ||||||
| 673 | } | ||||||
| 674 | |||||||
| 675 | void initScudo() { | ||||||
| 676 | Instance.init(); | ||||||
| 677 | #ifdef GWP_ASAN_HOOKS1 | ||||||
| 678 | gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"), | ||||||
| 679 | Printf); | ||||||
| 680 | gwp_asan::options::Options &Opts = gwp_asan::options::getOptions(); | ||||||
| 681 | Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction(); | ||||||
| 682 | GuardedAlloc.init(Opts); | ||||||
| 683 | |||||||
| 684 | if (Opts.InstallSignalHandlers) | ||||||
| 685 | gwp_asan::segv_handler::installSignalHandlers( | ||||||
| 686 | &GuardedAlloc, __sanitizer::Printf, | ||||||
| 687 | gwp_asan::backtrace::getPrintBacktraceFunction(), | ||||||
| 688 | gwp_asan::backtrace::getSegvBacktraceFunction()); | ||||||
| 689 | #endif // GWP_ASAN_HOOKS | ||||||
| 690 | } | ||||||
| 691 | |||||||
| 692 | void ScudoTSD::init() { | ||||||
| 693 | getBackend().initCache(&Cache); | ||||||
| 694 | memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); | ||||||
| 695 | } | ||||||
| 696 | |||||||
| 697 | void ScudoTSD::commitBack() { | ||||||
| 698 | Instance.commitBack(this); | ||||||
| 699 | } | ||||||
| 700 | |||||||
| 701 | void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) { | ||||||
| 702 | if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))__builtin_expect(!!(!IsPowerOfTwo(Alignment)), 0)) { | ||||||
| 703 | errno(*__errno_location ()) = EINVAL22; | ||||||
| 704 | if (Instance.canReturnNull()) | ||||||
| 705 | return nullptr; | ||||||
| 706 | reportAllocationAlignmentNotPowerOfTwo(Alignment); | ||||||
| 707 | } | ||||||
| 708 | return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type)); | ||||||
| 709 | } | ||||||
| 710 | |||||||
| 711 | void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) { | ||||||
| 712 | Instance.deallocate(Ptr, Size, Alignment, Type); | ||||||
| 713 | } | ||||||
| 714 | |||||||
| 715 | void *scudoRealloc(void *Ptr, uptr Size) { | ||||||
| 716 | if (!Ptr) | ||||||
| 717 | return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); | ||||||
| 718 | if (Size == 0) { | ||||||
| 719 | Instance.deallocate(Ptr, 0, 0, FromMalloc); | ||||||
| 720 | return nullptr; | ||||||
| 721 | } | ||||||
| 722 | return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); | ||||||
| 723 | } | ||||||
| 724 | |||||||
| 725 | void *scudoCalloc(uptr NMemB, uptr Size) { | ||||||
| 726 | return SetErrnoOnNull(Instance.calloc(NMemB, Size)); | ||||||
| 727 | } | ||||||
| 728 | |||||||
| 729 | void *scudoValloc(uptr Size) { | ||||||
| 730 | return SetErrnoOnNull( | ||||||
| 731 | Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); | ||||||
| 732 | } | ||||||
| 733 | |||||||
| 734 | void *scudoPvalloc(uptr Size) { | ||||||
| 735 | const uptr PageSize = GetPageSizeCached(); | ||||||
| 736 | if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(Size, PageSize)), 0)) { | ||||||
| |||||||
| 737 | errno(*__errno_location ()) = ENOMEM12; | ||||||
| 738 | if (Instance.canReturnNull()) | ||||||
| 739 | return nullptr; | ||||||
| 740 | reportPvallocOverflow(Size); | ||||||
| 741 | } | ||||||
| 742 | // pvalloc(0) should allocate one page. | ||||||
| 743 | Size = Size ? RoundUpTo(Size, PageSize) : PageSize; | ||||||
| 744 | return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); | ||||||
| 745 | } | ||||||
| 746 | |||||||
| 747 | int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { | ||||||
| 748 | if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(Alignment)), 0)) { | ||||||
| 749 | if (!Instance.canReturnNull()) | ||||||
| 750 | reportInvalidPosixMemalignAlignment(Alignment); | ||||||
| 751 | return EINVAL22; | ||||||
| 752 | } | ||||||
| 753 | void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); | ||||||
| 754 | if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0)) | ||||||
| 755 | return ENOMEM12; | ||||||
| 756 | *MemPtr = Ptr; | ||||||
| 757 | return 0; | ||||||
| 758 | } | ||||||
| 759 | |||||||
| 760 | void *scudoAlignedAlloc(uptr Alignment, uptr Size) { | ||||||
| 761 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(Alignment , Size)), 0)) { | ||||||
| 762 | errno(*__errno_location ()) = EINVAL22; | ||||||
| 763 | if (Instance.canReturnNull()) | ||||||
| 764 | return nullptr; | ||||||
| 765 | reportInvalidAlignedAllocAlignment(Size, Alignment); | ||||||
| 766 | } | ||||||
| 767 | return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); | ||||||
| 768 | } | ||||||
| 769 | |||||||
| 770 | uptr scudoMallocUsableSize(void *Ptr) { | ||||||
| 771 | return Instance.getUsableSize(Ptr); | ||||||
| 772 | } | ||||||
| 773 | |||||||
| 774 | } // namespace __scudo | ||||||
| 775 | |||||||
| 776 | using namespace __scudo; | ||||||
| 777 | |||||||
| 778 | // MallocExtension helper functions | ||||||
| 779 | |||||||
| 780 | uptr __sanitizer_get_current_allocated_bytes() { | ||||||
| 781 | return Instance.getStats(AllocatorStatAllocated); | ||||||
| 782 | } | ||||||
| 783 | |||||||
| 784 | uptr __sanitizer_get_heap_size() { | ||||||
| 785 | return Instance.getStats(AllocatorStatMapped); | ||||||
| 786 | } | ||||||
| 787 | |||||||
| 788 | uptr __sanitizer_get_free_bytes() { | ||||||
| 789 | return 1; | ||||||
| 790 | } | ||||||
| 791 | |||||||
| 792 | uptr __sanitizer_get_unmapped_bytes() { | ||||||
| 793 | return 1; | ||||||
| 794 | } | ||||||
| 795 | |||||||
| 796 | uptr __sanitizer_get_estimated_allocated_size(uptr Size) { | ||||||
| 797 | return Size; | ||||||
| 798 | } | ||||||
| 799 | |||||||
| 800 | int __sanitizer_get_ownership(const void *Ptr) { | ||||||
| 801 | return Instance.isValidPointer(Ptr); | ||||||
| 802 | } | ||||||
| 803 | |||||||
| 804 | uptr __sanitizer_get_allocated_size(const void *Ptr) { | ||||||
| 805 | return Instance.getUsableSize(Ptr); | ||||||
| 806 | } | ||||||
| 807 | |||||||
| 808 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS1 | ||||||
| 809 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size) | ||||||
| 810 | void *Ptr, uptr Size)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_malloc_hook(void *Ptr, uptr Size) { | ||||||
| 811 | (void)Ptr; | ||||||
| 812 | (void)Size; | ||||||
| 813 | } | ||||||
| 814 | |||||||
| 815 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr)extern "C" __attribute__((visibility("default"))) __attribute__ ((weak)) void __sanitizer_free_hook(void *Ptr) { | ||||||
| 816 | (void)Ptr; | ||||||
| 817 | } | ||||||
| 818 | #endif | ||||||
| 819 | |||||||
| 820 | // Interface functions | ||||||
| 821 | |||||||
| 822 | void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) { | ||||||
| 823 | if (!SCUDO_CAN_USE_PUBLIC_INTERFACE1) | ||||||
| 824 | return; | ||||||
| 825 | Instance.setRssLimit(LimitMb, !!HardLimit); | ||||||
| 826 | } | ||||||
| 827 | |||||||
| 828 | void __scudo_print_stats() { | ||||||
| 829 | Instance.printStats(); | ||||||
| 830 | } |
| 1 | //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Part of the Sanitizer Allocator. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | #ifndef SANITIZER_ALLOCATOR_H |
| 13 | #error This file must be included inside sanitizer_allocator.h |
| 14 | #endif |
| 15 | |
| 16 | template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache; |
| 17 | |
| 18 | // SizeClassAllocator32 -- allocator for 32-bit address space. |
| 19 | // This allocator can theoretically be used on 64-bit arch, but there it is less |
| 20 | // efficient than SizeClassAllocator64. |
| 21 | // |
| 22 | // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can |
| 23 | // be returned by MmapOrDie(). |
| 24 | // |
| 25 | // Region: |
| 26 | // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize, |
| 27 | // kRegionSize). |
| 28 | // Since the regions are aligned by kRegionSize, there are exactly |
| 29 | // kNumPossibleRegions possible regions in the address space and so we keep |
| 30 | // a ByteMap possible_regions to store the size classes of each Region. |
| 31 | // 0 size class means the region is not used by the allocator. |
| 32 | // |
| 33 | // One Region is used to allocate chunks of a single size class. |
| 34 | // A Region looks like this: |
| 35 | // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1 |
| 36 | // |
| 37 | // In order to avoid false sharing the objects of this class should be |
| 38 | // chache-line aligned. |
| 39 | |
| 40 | struct SizeClassAllocator32FlagMasks { // Bit masks. |
| 41 | enum { |
| 42 | kRandomShuffleChunks = 1, |
| 43 | kUseSeparateSizeClassForBatch = 2, |
| 44 | }; |
| 45 | }; |
| 46 | |
| 47 | template <class Params> |
| 48 | class SizeClassAllocator32 { |
| 49 | private: |
| 50 | static const u64 kTwoLevelByteMapSize1 = |
| 51 | (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12; |
| 52 | static const u64 kMinFirstMapSizeTwoLevelByteMap = 4; |
| 53 | |
| 54 | public: |
| 55 | using AddressSpaceView = typename Params::AddressSpaceView; |
| 56 | static const uptr kSpaceBeg = Params::kSpaceBeg; |
| 57 | static const u64 kSpaceSize = Params::kSpaceSize; |
| 58 | static const uptr kMetadataSize = Params::kMetadataSize; |
| 59 | typedef typename Params::SizeClassMap SizeClassMap; |
| 60 | static const uptr kRegionSizeLog = Params::kRegionSizeLog; |
| 61 | typedef typename Params::MapUnmapCallback MapUnmapCallback; |
| 62 | using ByteMap = typename conditional< |
| 63 | (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap), |
| 64 | FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog), |
| 65 | AddressSpaceView>, |
| 66 | TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type; |
| 67 | |
| 68 | COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0, "") |
| 69 | (kSpaceSize & (kSpaceSize - 1)) == 0)static_assert(!0 || (kSpaceSize & (kSpaceSize - 1)) == 0, ""); |
| 70 | |
| 71 | static const bool kRandomShuffleChunks = Params::kFlags & |
| 72 | SizeClassAllocator32FlagMasks::kRandomShuffleChunks; |
| 73 | static const bool kUseSeparateSizeClassForBatch = Params::kFlags & |
| 74 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
| 75 | |
| 76 | struct TransferBatch { |
| 77 | static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; |
| 78 | void SetFromArray(void *batch[], uptr count) { |
| 79 | DCHECK_LE(count, kMaxNumCached); |
| 80 | count_ = count; |
| 81 | for (uptr i = 0; i < count; i++) |
| 82 | batch_[i] = batch[i]; |
| 83 | } |
| 84 | uptr Count() const { return count_; } |
| 85 | void Clear() { count_ = 0; } |
| 86 | void Add(void *ptr) { |
| 87 | batch_[count_++] = ptr; |
| 88 | DCHECK_LE(count_, kMaxNumCached); |
| 89 | } |
| 90 | void CopyToArray(void *to_batch[]) const { |
| 91 | for (uptr i = 0, n = Count(); i < n; i++) |
| 92 | to_batch[i] = batch_[i]; |
| 93 | } |
| 94 | |
| 95 | // How much memory do we need for a batch containing n elements. |
| 96 | static uptr AllocationSizeRequiredForNElements(uptr n) { |
| 97 | return sizeof(uptr) * 2 + sizeof(void *) * n; |
| 98 | } |
| 99 | static uptr MaxCached(uptr size) { |
| 100 | return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size)); |
| 101 | } |
| 102 | |
| 103 | TransferBatch *next; |
| 104 | |
| 105 | private: |
| 106 | uptr count_; |
| 107 | void *batch_[kMaxNumCached]; |
| 108 | }; |
| 109 | |
| 110 | static const uptr kBatchSize = sizeof(TransferBatch); |
| 111 | COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)static_assert((kBatchSize & (kBatchSize - 1)) == 0, ""); |
| 112 | COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))static_assert(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr), ""); |
| 113 | |
| 114 | static uptr ClassIdToSize(uptr class_id) { |
| 115 | return (class_id == SizeClassMap::kBatchClassID) ? |
| 116 | kBatchSize : SizeClassMap::Size(class_id); |
| 117 | } |
| 118 | |
| 119 | typedef SizeClassAllocator32<Params> ThisT; |
| 120 | typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache; |
| 121 | |
| 122 | void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { |
| 123 | CHECK(!heap_start)do { __sanitizer::u64 v1 = (__sanitizer::u64)((!heap_start)); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 123, "(" "(!heap_start)" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
| 124 | possible_regions.Init(); |
| 125 | internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); |
| 126 | } |
| 127 | |
| 128 | s32 ReleaseToOSIntervalMs() const { |
| 129 | return kReleaseToOSIntervalNever; |
| 130 | } |
| 131 | |
| 132 | void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { |
| 133 | // This is empty here. Currently only implemented in 64-bit allocator. |
| 134 | } |
| 135 | |
| 136 | void ForceReleaseToOS() { |
| 137 | // Currently implemented in 64-bit allocator only. |
| 138 | } |
| 139 | |
| 140 | void *MapWithCallback(uptr size) { |
| 141 | void *res = MmapOrDie(size, PrimaryAllocatorName); |
| 142 | MapUnmapCallback().OnMap((uptr)res, size); |
| 143 | return res; |
| 144 | } |
| 145 | |
| 146 | void UnmapWithCallback(uptr beg, uptr size) { |
| 147 | MapUnmapCallback().OnUnmap(beg, size); |
| 148 | UnmapOrDie(reinterpret_cast<void *>(beg), size); |
| 149 | } |
| 150 | |
| 151 | static bool CanAllocate(uptr size, uptr alignment) { |
| 152 | return size <= SizeClassMap::kMaxSize && |
| 153 | alignment <= SizeClassMap::kMaxSize; |
| 154 | } |
| 155 | |
| 156 | void *GetMetaData(const void *p) { |
| 157 | CHECK(kMetadataSize)do { __sanitizer::u64 v1 = (__sanitizer::u64)((kMetadataSize) ); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 157, "(" "(kMetadataSize)" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
| 158 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 158, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
| 159 | uptr mem = reinterpret_cast<uptr>(p); |
| 160 | uptr beg = ComputeRegionBeg(mem); |
| 161 | uptr size = ClassIdToSize(GetSizeClass(p)); |
| 162 | u32 offset = mem - beg; |
| 163 | uptr n = offset / (u32)size; // 32-bit division |
| 164 | uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; |
| 165 | return reinterpret_cast<void*>(meta); |
| 166 | } |
| 167 | |
| 168 | NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, |
| 169 | uptr class_id) { |
| 170 | DCHECK_LT(class_id, kNumClasses); |
| 171 | SizeClassInfo *sci = GetSizeClassInfo(class_id); |
| 172 | SpinMutexLock l(&sci->mutex); |
| 173 | if (sci->free_list.empty()) { |
| 174 | if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id) ), 0)) |
| 175 | return nullptr; |
| 176 | DCHECK(!sci->free_list.empty()); |
| 177 | } |
| 178 | TransferBatch *b = sci->free_list.front(); |
| 179 | sci->free_list.pop_front(); |
| 180 | return b; |
| 181 | } |
| 182 | |
| 183 | NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id, |
| 184 | TransferBatch *b) { |
| 185 | DCHECK_LT(class_id, kNumClasses); |
| 186 | CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count()) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 186, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
| 187 | SizeClassInfo *sci = GetSizeClassInfo(class_id); |
| 188 | SpinMutexLock l(&sci->mutex); |
| 189 | sci->free_list.push_front(b); |
| 190 | } |
| 191 | |
| 192 | bool PointerIsMine(const void *p) { |
| 193 | uptr mem = reinterpret_cast<uptr>(p); |
| 194 | if (SANITIZER_SIGN_EXTENDED_ADDRESSES0) |
| 195 | mem &= (kSpaceSize - 1); |
| 196 | if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) |
| 197 | return false; |
| 198 | return GetSizeClass(p) != 0; |
| 199 | } |
| 200 | |
| 201 | uptr GetSizeClass(const void *p) { |
| 202 | return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; |
| 203 | } |
| 204 | |
| 205 | void *GetBlockBegin(const void *p) { |
| 206 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 206, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
| 207 | uptr mem = reinterpret_cast<uptr>(p); |
| 208 | uptr beg = ComputeRegionBeg(mem); |
| 209 | uptr size = ClassIdToSize(GetSizeClass(p)); |
| 210 | u32 offset = mem - beg; |
| 211 | u32 n = offset / (u32)size; // 32-bit division |
| 212 | uptr res = beg + (n * (u32)size); |
| 213 | return reinterpret_cast<void*>(res); |
| 214 | } |
| 215 | |
| 216 | uptr GetActuallyAllocatedSize(void *p) { |
| 217 | CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine( p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect (!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 217, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2 ); } while (false); |
| 218 | return ClassIdToSize(GetSizeClass(p)); |
| 219 | } |
| 220 | |
| 221 | static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } |
| 222 | |
| 223 | uptr TotalMemoryUsed() { |
| 224 | // No need to lock here. |
| 225 | uptr res = 0; |
| 226 | for (uptr i = 0; i < kNumPossibleRegions; i++) |
| 227 | if (possible_regions[i]) |
| 228 | res += kRegionSize; |
| 229 | return res; |
| 230 | } |
| 231 | |
| 232 | void TestOnlyUnmap() { |
| 233 | for (uptr i = 0; i < kNumPossibleRegions; i++) |
| 234 | if (possible_regions[i]) |
| 235 | UnmapWithCallback((i * kRegionSize), kRegionSize); |
| 236 | } |
| 237 | |
| 238 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone |
| 239 | // introspection API. |
| 240 | void ForceLock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { |
| 241 | for (uptr i = 0; i < kNumClasses; i++) { |
| 242 | GetSizeClassInfo(i)->mutex.Lock(); |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) { |
| 247 | for (int i = kNumClasses - 1; i >= 0; i--) { |
| 248 | GetSizeClassInfo(i)->mutex.Unlock(); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | // Iterate over all existing chunks. |
| 253 | // The allocator must be locked when calling this function. |
| 254 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
| 255 | for (uptr region = 0; region < kNumPossibleRegions; region++) |
| 256 | if (possible_regions[region]) { |
| 257 | uptr chunk_size = ClassIdToSize(possible_regions[region]); |
| 258 | uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); |
| 259 | uptr region_beg = region * kRegionSize; |
| 260 | for (uptr chunk = region_beg; |
| 261 | chunk < region_beg + max_chunks_in_region * chunk_size; |
| 262 | chunk += chunk_size) { |
| 263 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); |
| 264 | callback(chunk, arg); |
| 265 | } |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | void PrintStats() {} |
| 270 | |
| 271 | static uptr AdditionalSize() { return 0; } |
| 272 | |
| 273 | typedef SizeClassMap SizeClassMapT; |
| 274 | static const uptr kNumClasses = SizeClassMap::kNumClasses; |
| 275 | |
| 276 | private: |
| 277 | static const uptr kRegionSize = 1 << kRegionSizeLog; |
| 278 | static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; |
| 279 | |
| 280 | struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo { |
| 281 | StaticSpinMutex mutex; |
| 282 | IntrusiveList<TransferBatch> free_list; |
| 283 | u32 rand_state; |
| 284 | }; |
| 285 | COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)static_assert(sizeof(SizeClassInfo) % kCacheLineSize == 0, "" ); |
| 286 | |
| 287 | uptr ComputeRegionId(uptr mem) const { |
| 288 | if (SANITIZER_SIGN_EXTENDED_ADDRESSES0) |
| 289 | mem &= (kSpaceSize - 1); |
| 290 | const uptr res = mem >> kRegionSizeLog; |
| 291 | CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer ::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 291, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")" , v1, v2); } while (false); |
| 292 | return res; |
| 293 | } |
| 294 | |
| 295 | uptr ComputeRegionBeg(uptr mem) { |
| 296 | return mem & ~(kRegionSize - 1); |
| 297 | } |
| 298 | |
| 299 | uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { |
| 300 | DCHECK_LT(class_id, kNumClasses); |
| 301 | const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( |
| 302 | kRegionSize, kRegionSize, PrimaryAllocatorName)); |
| 303 | if (UNLIKELY(!res)__builtin_expect(!!(!res), 0)) |
| 304 | return 0; |
| 305 | MapUnmapCallback().OnMap(res, kRegionSize); |
| 306 | stat->Add(AllocatorStatMapped, kRegionSize); |
| 307 | CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res, kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed ("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 307, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0" ")", v1, v2); } while (false); |
| 308 | possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); |
| 309 | return res; |
| 310 | } |
| 311 | |
| 312 | SizeClassInfo *GetSizeClassInfo(uptr class_id) { |
| 313 | DCHECK_LT(class_id, kNumClasses); |
| 314 | return &size_class_info_array[class_id]; |
| 315 | } |
| 316 | |
| 317 | bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id, |
| 318 | TransferBatch **current_batch, uptr max_count, |
| 319 | uptr *pointers_array, uptr count) { |
| 320 | // If using a separate class for batches, we do not need to shuffle it. |
| 321 | if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch || |
| 322 | class_id != SizeClassMap::kBatchClassID)) |
| 323 | RandomShuffle(pointers_array, count, &sci->rand_state); |
| 324 | TransferBatch *b = *current_batch; |
| 325 | for (uptr i = 0; i < count; i++) { |
| 326 | if (!b) { |
| 327 | b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]); |
| 328 | if (UNLIKELY(!b)__builtin_expect(!!(!b), 0)) |
| 329 | return false; |
| 330 | b->Clear(); |
| 331 | } |
| 332 | b->Add((void*)pointers_array[i]); |
| 333 | if (b->Count() == max_count) { |
| 334 | sci->free_list.push_back(b); |
| 335 | b = nullptr; |
| 336 | } |
| 337 | } |
| 338 | *current_batch = b; |
| 339 | return true; |
| 340 | } |
| 341 | |
| 342 | bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, |
| 343 | SizeClassInfo *sci, uptr class_id) { |
| 344 | const uptr region = AllocateRegion(stat, class_id); |
| 345 | if (UNLIKELY(!region)__builtin_expect(!!(!region), 0)) |
| 346 | return false; |
| 347 | if (kRandomShuffleChunks) |
| 348 | if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0)) |
| 349 | // The random state is initialized from ASLR (PIE) and time. |
| 350 | sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime(); |
| 351 | const uptr size = ClassIdToSize(class_id); |
| 352 | const uptr n_chunks = kRegionSize / (size + kMetadataSize); |
| 353 | const uptr max_count = TransferBatch::MaxCached(size); |
| 354 | DCHECK_GT(max_count, 0); |
| 355 | TransferBatch *b = nullptr; |
| 356 | constexpr uptr kShuffleArraySize = 48; |
| 357 | uptr shuffle_array[kShuffleArraySize]; |
| 358 | uptr count = 0; |
| 359 | for (uptr i = region; i < region + n_chunks * size; i += size) { |
| 360 | shuffle_array[count++] = i; |
| 361 | if (count == kShuffleArraySize) { |
| 362 | if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0) |
| 363 | shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0)) |
| 364 | return false; |
| 365 | count = 0; |
| 366 | } |
| 367 | } |
| 368 | if (count) { |
| 369 | if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0) |
| 370 | shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b , max_count, shuffle_array, count)), 0)) |
| 371 | return false; |
| 372 | } |
| 373 | if (b) { |
| 374 | CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count()) ); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_primary32.h" , 374, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2 ); } while (false); |
| 375 | sci->free_list.push_back(b); |
| 376 | } |
| 377 | return true; |
| 378 | } |
| 379 | |
| 380 | ByteMap possible_regions; |
| 381 | SizeClassInfo size_class_info_array[kNumClasses]; |
| 382 | }; |
| 1 | //===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | // | |||
| 9 | // Part of the Sanitizer Allocator. | |||
| 10 | // | |||
| 11 | //===----------------------------------------------------------------------===// | |||
| 12 | #ifndef SANITIZER_ALLOCATOR_H | |||
| 13 | #error This file must be included inside sanitizer_allocator.h | |||
| 14 | #endif | |||
| 15 | ||||
| 16 | // SizeClassMap maps allocation sizes into size classes and back. | |||
| 17 | // Class 0 always corresponds to size 0. | |||
| 18 | // The other sizes are controlled by the template parameters: | |||
| 19 | // kMinSizeLog: defines the class 1 as 2^kMinSizeLog. | |||
| 20 | // kMaxSizeLog: defines the last class as 2^kMaxSizeLog. | |||
| 21 | // kMidSizeLog: the classes starting from 1 increase with step | |||
| 22 | // 2^kMinSizeLog until 2^kMidSizeLog. | |||
| 23 | // kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog. | |||
| 24 | // E.g. with kNumBits==3 all size classes after 2^kMidSizeLog | |||
| 25 | // look like 0b1xx0..0, where x is either 0 or 1. | |||
| 26 | // | |||
| 27 | // Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17: | |||
| 28 | // | |||
| 29 | // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). | |||
| 30 | // Next 4 classes: 256 + i * 64 (i = 1 to 4). | |||
| 31 | // Next 4 classes: 512 + i * 128 (i = 1 to 4). | |||
| 32 | // ... | |||
| 33 | // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4). | |||
| 34 | // Last class corresponds to kMaxSize = 1 << kMaxSizeLog. | |||
| 35 | // | |||
| 36 | // This structure of the size class map gives us: | |||
| 37 | // - Efficient table-free class-to-size and size-to-class functions. | |||
| 38 | // - Difference between two consequent size classes is between 14% and 25% | |||
| 39 | // | |||
| 40 | // This class also gives a hint to a thread-caching allocator about the amount | |||
| 41 | // of chunks that need to be cached per-thread: | |||
| 42 | // - kMaxNumCachedHint is a hint for maximal number of chunks per size class. | |||
| 43 | // The actual number is computed in TransferBatch. | |||
| 44 | // - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class. | |||
| 45 | // | |||
| 46 | // Part of output of SizeClassMap::Print(): | |||
| 47 | // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 | |||
| 48 | // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1 | |||
| 49 | // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2 | |||
| 50 | // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3 | |||
| 51 | // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4 | |||
| 52 | // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5 | |||
| 53 | // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6 | |||
| 54 | // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7 | |||
| 55 | // | |||
| 56 | // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8 | |||
| 57 | // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9 | |||
| 58 | // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10 | |||
| 59 | // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11 | |||
| 60 | // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12 | |||
| 61 | // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13 | |||
| 62 | // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14 | |||
| 63 | // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15 | |||
| 64 | // | |||
| 65 | // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16 | |||
| 66 | // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17 | |||
| 67 | // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18 | |||
| 68 | // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19 | |||
| 69 | // | |||
| 70 | // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20 | |||
| 71 | // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21 | |||
| 72 | // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22 | |||
| 73 | // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23 | |||
| 74 | // | |||
| 75 | // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24 | |||
| 76 | // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25 | |||
| 77 | // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26 | |||
| 78 | // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27 | |||
| 79 | // | |||
| 80 | // ... | |||
| 81 | // | |||
| 82 | // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48 | |||
| 83 | // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49 | |||
| 84 | // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50 | |||
| 85 | // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51 | |||
| 86 | // | |||
| 87 | // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52 | |||
| 88 | // | |||
| 89 | // | |||
| 90 | // Another example (kNumBits=2): | |||
| 91 | // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 | |||
| 92 | // c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1 | |||
| 93 | // c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2 | |||
| 94 | // c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3 | |||
| 95 | // c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4 | |||
| 96 | // c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5 | |||
| 97 | // c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6 | |||
| 98 | // c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7 | |||
| 99 | // c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8 | |||
| 100 | // c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9 | |||
| 101 | // c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10 | |||
| 102 | // c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11 | |||
| 103 | // c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12 | |||
| 104 | // c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13 | |||
| 105 | // c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14 | |||
| 106 | // c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15 | |||
| 107 | // c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16 | |||
| 108 | // c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17 | |||
| 109 | // c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18 | |||
| 110 | // c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19 | |||
| 111 | // c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20 | |||
| 112 | // c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21 | |||
| 113 | // c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22 | |||
| 114 | // c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23 | |||
| 115 | // c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24 | |||
| 116 | // c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25 | |||
| 117 | // c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26 | |||
| 118 | ||||
| 119 | template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog, | |||
| 120 | uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog> | |||
| 121 | class SizeClassMap { | |||
| 122 | static const uptr kMinSize = 1 << kMinSizeLog; | |||
| 123 | static const uptr kMidSize = 1 << kMidSizeLog; | |||
| 124 | static const uptr kMidClass = kMidSize / kMinSize; | |||
| 125 | static const uptr S = kNumBits - 1; | |||
| 126 | static const uptr M = (1 << S) - 1; | |||
| 127 | ||||
| 128 | public: | |||
| 129 | // kMaxNumCachedHintT is a power of two. It serves as a hint | |||
| 130 | // for the size of TransferBatch, the actual size could be a bit smaller. | |||
| 131 | static const uptr kMaxNumCachedHint = kMaxNumCachedHintT; | |||
| 132 | COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)static_assert((kMaxNumCachedHint & (kMaxNumCachedHint - 1 )) == 0, ""); | |||
| 133 | ||||
| 134 | static const uptr kMaxSize = 1UL << kMaxSizeLog; | |||
| 135 | static const uptr kNumClasses = | |||
| 136 | kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1; | |||
| 137 | static const uptr kLargestClassID = kNumClasses - 2; | |||
| 138 | static const uptr kBatchClassID = kNumClasses - 1; | |||
| 139 | COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)static_assert(kNumClasses >= 16 && kNumClasses <= 256, ""); | |||
| 140 | static const uptr kNumClassesRounded = | |||
| 141 | kNumClasses <= 32 ? 32 : | |||
| 142 | kNumClasses <= 64 ? 64 : | |||
| 143 | kNumClasses <= 128 ? 128 : 256; | |||
| 144 | ||||
| 145 | static uptr Size(uptr class_id) { | |||
| 146 | // Estimate the result for kBatchClassID because this class does not know | |||
| 147 | // the exact size of TransferBatch. It's OK since we are using the actual | |||
| 148 | // sizeof(TransferBatch) where it matters. | |||
| 149 | if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0)) | |||
| 150 | return kMaxNumCachedHint * sizeof(uptr); | |||
| 151 | if (class_id <= kMidClass) | |||
| 152 | return kMinSize * class_id; | |||
| 153 | class_id -= kMidClass; | |||
| 154 | uptr t = kMidSize << (class_id >> S); | |||
| ||||
| 155 | return t + (t >> S) * (class_id & M); | |||
| 156 | } | |||
| 157 | ||||
| 158 | static uptr ClassID(uptr size) { | |||
| 159 | if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0)) | |||
| 160 | return 0; | |||
| 161 | if (size <= kMidSize) | |||
| 162 | return (size + kMinSize - 1) >> kMinSizeLog; | |||
| 163 | const uptr l = MostSignificantSetBitIndex(size); | |||
| 164 | const uptr hbits = (size >> (l - S)) & M; | |||
| 165 | const uptr lbits = size & ((1U << (l - S)) - 1); | |||
| 166 | const uptr l1 = l - kMidSizeLog; | |||
| 167 | return kMidClass + (l1 << S) + hbits + (lbits > 0); | |||
| 168 | } | |||
| 169 | ||||
| 170 | static uptr MaxCachedHint(uptr size) { | |||
| 171 | DCHECK_LE(size, kMaxSize); | |||
| 172 | if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0)) | |||
| 173 | return 0; | |||
| 174 | uptr n; | |||
| 175 | // Force a 32-bit division if the template parameters allow for it. | |||
| 176 | if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31) | |||
| 177 | n = (1UL << kMaxBytesCachedLog) / size; | |||
| 178 | else | |||
| 179 | n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size); | |||
| 180 | return Max<uptr>(1U, Min(kMaxNumCachedHint, n)); | |||
| 181 | } | |||
| 182 | ||||
| 183 | static void Print() { | |||
| 184 | uptr prev_s = 0; | |||
| 185 | uptr total_cached = 0; | |||
| 186 | for (uptr i = 0; i < kNumClasses; i++) { | |||
| 187 | uptr s = Size(i); | |||
| 188 | if (s >= kMidSize / 2 && (s & (s - 1)) == 0) | |||
| 189 | Printf("\n"); | |||
| 190 | uptr d = s - prev_s; | |||
| 191 | uptr p = prev_s ? (d * 100 / prev_s) : 0; | |||
| 192 | uptr l = s ? MostSignificantSetBitIndex(s) : 0; | |||
| 193 | uptr cached = MaxCachedHint(s) * s; | |||
| 194 | if (i == kBatchClassID) | |||
| 195 | d = p = l = 0; | |||
| 196 | Printf( | |||
| 197 | "c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n", | |||
| 198 | i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s)); | |||
| 199 | total_cached += cached; | |||
| 200 | prev_s = s; | |||
| 201 | } | |||
| 202 | Printf("Total cached: %zu\n", total_cached); | |||
| 203 | } | |||
| 204 | ||||
| 205 | static void Validate() { | |||
| 206 | for (uptr c = 1; c < kNumClasses; c++) { | |||
| 207 | // Printf("Validate: c%zd\n", c); | |||
| 208 | uptr s = Size(c); | |||
| 209 | CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer ::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!( !(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 209, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while ( false); | |||
| 210 | if (c == kBatchClassID) | |||
| 211 | continue; | |||
| 212 | CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer ::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(! (v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 212, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); } while (false); | |||
| 213 | if (c < kLargestClassID) | |||
| 214 | CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1) )); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 214, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1 , v2); } while (false); | |||
| 215 | CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1) )); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 215, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2 ); } while (false); | |||
| 216 | CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer ::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect (!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 216, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1 , v2); } while (false); | |||
| 217 | } | |||
| 218 | CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize + 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect (!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 218, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")" , v1, v2); } while (false); | |||
| 219 | ||||
| 220 | for (uptr s = 1; s <= kMaxSize; s++) { | |||
| 221 | uptr c = ClassID(s); | |||
| 222 | // Printf("s%zd => c%zd\n", s, c); | |||
| 223 | CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer ::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 223, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2 ); } while (false); | |||
| 224 | CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer ::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(! (v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 224, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); } while (false); | |||
| 225 | if (c > 0) | |||
| 226 | CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect (!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/compiler-rt/lib/scudo/../sanitizer_common/sanitizer_allocator_size_class_map.h" , 226, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2 ); } while (false); | |||
| 227 | } | |||
| 228 | } | |||
| 229 | }; | |||
| 230 | ||||
| 231 | typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap; | |||
| 232 | typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap; | |||
| 233 | typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap; | |||
| 234 | ||||
| 235 | // The following SizeClassMap only holds a way small number of cached entries, | |||
| 236 | // allowing for denser per-class arrays, smaller memory footprint and usually | |||
| 237 | // better performances in threaded environments. | |||
| 238 | typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap; | |||
| 239 | // Similar to VeryCompact map above, this one has a small number of different | |||
| 240 | // size classes, and also reduced thread-local caches. | |||
| 241 | typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap; |