Bug Summary

File:compiler-rt/lib/scudo/standalone/combined.h
Warning:line 238, column 59
The left operand of '-' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name wrappers_cpp.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -ffreestanding -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/../.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/projects/compiler-rt/lib/scudo/standalone -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fcxx-exceptions -fexceptions -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

1//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "platform.h"
10
11// Skip this compilation unit if compiled as part of Bionic.
12#if !SCUDO_ANDROID0 || !_BIONIC
13
14#include "allocator_config.h"
15
16#include <stdint.h>
17
18extern scudo::Allocator<scudo::Config> *AllocatorPtr;
19
20namespace std {
21struct nothrow_t {};
22enum class align_val_t : size_t {};
23} // namespace std
24
25INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size) {
26 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
27}
28INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size) {
29 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
30}
31INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size,
32 std::nothrow_t const &) NOEXCEPTnoexcept {
33 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
34}
35INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size,
36 std::nothrow_t const &) NOEXCEPTnoexcept {
37 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
1
Calling 'Allocator::allocate'
38}
39INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align) {
40 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
41 static_cast<scudo::uptr>(align));
42}
43INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align) {
44 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
45 static_cast<scudo::uptr>(align));
46}
47INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align,
48 std::nothrow_t const &) NOEXCEPTnoexcept {
49 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
50 static_cast<scudo::uptr>(align));
51}
52INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align,
53 std::nothrow_t const &) NOEXCEPTnoexcept {
54 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
55 static_cast<scudo::uptr>(align));
56}
57
58INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr)NOEXCEPTnoexcept {
59 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
60}
61INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr) NOEXCEPTnoexcept {
62 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
63}
64INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPTnoexcept {
65 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
66}
67INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
68 std::nothrow_t const &) NOEXCEPTnoexcept {
69 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
70}
71INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size)NOEXCEPTnoexcept {
72 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
73}
74INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size) NOEXCEPTnoexcept {
75 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
76}
77INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align)NOEXCEPTnoexcept {
78 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
79 static_cast<scudo::uptr>(align));
80}
81INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
82 std::align_val_t align) NOEXCEPTnoexcept {
83 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
84 static_cast<scudo::uptr>(align));
85}
86INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align,
87 std::nothrow_t const &)NOEXCEPTnoexcept {
88 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
89 static_cast<scudo::uptr>(align));
90}
91INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, std::align_val_t align,
92 std::nothrow_t const &) NOEXCEPTnoexcept {
93 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
94 static_cast<scudo::uptr>(align));
95}
96INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size,
97 std::align_val_t align)NOEXCEPTnoexcept {
98 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
99 static_cast<scudo::uptr>(align));
100}
101INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size,
102 std::align_val_t align) NOEXCEPTnoexcept {
103 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
104 static_cast<scudo::uptr>(align));
105}
106
107#endif // !SCUDO_ANDROID || !_BIONIC

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/combined.h

1//===-- combined.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_COMBINED_H_
10#define SCUDO_COMBINED_H_
11
12#include "chunk.h"
13#include "common.h"
14#include "flags.h"
15#include "flags_parser.h"
16#include "interface.h"
17#include "local_cache.h"
18#include "quarantine.h"
19#include "report.h"
20#include "secondary.h"
21#include "tsd.h"
22
23namespace scudo {
24
25template <class Params> class Allocator {
26public:
27 using PrimaryT = typename Params::Primary;
28 using CacheT = typename PrimaryT::CacheT;
29 typedef Allocator<Params> ThisT;
30 typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
31
32 struct QuarantineCallback {
33 explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
34 : Allocator(Instance), Cache(LocalCache) {}
35
36 // Chunk recycling function, returns a quarantined chunk to the backend,
37 // first making sure it hasn't been tampered with.
38 void recycle(void *Ptr) {
39 Chunk::UnpackedHeader Header;
40 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
41 if (UNLIKELY(Header.State != Chunk::State::Quarantined)__builtin_expect(!!(Header.State != Chunk::State::Quarantined
), 0)
)
42 reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
43
44 Chunk::UnpackedHeader NewHeader = Header;
45 NewHeader.State = Chunk::State::Available;
46 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
47
48 void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
49 const uptr ClassId = NewHeader.ClassId;
50 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1))
51 Cache.deallocate(ClassId, BlockBegin);
52 else
53 Allocator.Secondary.deallocate(BlockBegin);
54 }
55
56 // We take a shortcut when allocating a quarantine batch by working with the
57 // appropriate class ID instead of using Size. The compiler should optimize
58 // the class ID computation and work with the associated cache directly.
59 void *allocate(UNUSED__attribute__((unused)) uptr Size) {
60 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
61 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
62 void *Ptr = Cache.allocate(QuarantineClassId);
63 // Quarantine batch allocation failure is fatal.
64 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
65 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
66
67 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
68 Chunk::getHeaderSize());
69 Chunk::UnpackedHeader Header = {};
70 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
71 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
72 Header.State = Chunk::State::Allocated;
73 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
74
75 return Ptr;
76 }
77
78 void deallocate(void *Ptr) {
79 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
80 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
81 Chunk::UnpackedHeader Header;
82 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
83
84 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
85 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
86 DCHECK_EQ(Header.ClassId, QuarantineClassId);
87 DCHECK_EQ(Header.Offset, 0);
88 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
89
90 Chunk::UnpackedHeader NewHeader = Header;
91 NewHeader.State = Chunk::State::Available;
92 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
93 Cache.deallocate(QuarantineClassId,
94 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
95 Chunk::getHeaderSize()));
96 }
97
98 private:
99 ThisT &Allocator;
100 CacheT &Cache;
101 };
102
103 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
104 typedef typename QuarantineT::CacheT QuarantineCacheT;
105
106 void initLinkerInitialized() {
107 performSanityChecks();
108
109 // Check if hardware CRC32 is supported in the binary and by the platform,
110 // if so, opt for the CRC32 hardware version of the checksum.
111 if (&computeHardwareCRC32 && hasHardwareCRC32())
112 HashAlgorithm = Checksum::HardwareCRC32;
113
114 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie)))__builtin_expect(!!(!getRandom(&Cookie, sizeof(Cookie))),
0)
)
115 Cookie = static_cast<u32>(getMonotonicTime() ^
116 (reinterpret_cast<uptr>(this) >> 4));
117
118 initFlags();
119 reportUnrecognizedFlags();
120
121 // Store some flags locally.
122 Options.MayReturnNull = getFlags()->may_return_null;
123 Options.ZeroContents = getFlags()->zero_contents;
124 Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
125 Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
126 Options.QuarantineMaxChunkSize =
127 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
128
129 Stats.initLinkerInitialized();
130 Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
131 Secondary.initLinkerInitialized(&Stats);
132
133 Quarantine.init(
134 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
135 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
136 }
137
138 void reset() { memset(this, 0, sizeof(*this)); }
139
140 void unmapTestOnly() {
141 TSDRegistry.unmapTestOnly();
142 Primary.unmapTestOnly();
143 }
144
145 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
146
147 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
148
149 // Release the resources used by a TSD, which involves:
150 // - draining the local quarantine cache to the global quarantine;
151 // - releasing the cached pointers back to the Primary;
152 // - unlinking the local stats from the global ones (destroying the cache does
153 // the last two items).
154 void commitBack(TSD<ThisT> *TSD) {
155 Quarantine.drain(&TSD->QuarantineCache,
156 QuarantineCallback(*this, TSD->Cache));
157 TSD->Cache.destroy(&Stats);
158 }
159
160 NOINLINE__attribute__((noinline)) void *allocate(uptr Size, Chunk::Origin Origin,
161 uptr Alignment = MinAlignment,
162 bool ZeroContents = false) {
163 initThreadMaybe();
164 ZeroContents = ZeroContents
1.1
'ZeroContents' is false
1.1
'ZeroContents' is false
1.1
'ZeroContents' is false
|| Options.ZeroContents;
165
166 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
2
Taking false branch
167 if (Options.MayReturnNull)
168 return nullptr;
169 reportAlignmentTooBig(Alignment, MaxAlignment);
170 }
171 if (Alignment
2.1
'Alignment' is >= 'MinAlignment'
2.1
'Alignment' is >= 'MinAlignment'
2.1
'Alignment' is >= 'MinAlignment'
< MinAlignment)
3
Taking false branch
172 Alignment = MinAlignment;
173
174 // If the requested size happens to be 0 (more common than you might think),
175 // allocate MinAlignment bytes on top of the header. Then add the extra
176 // bytes required to fulfill the alignment requirements: we allocate enough
177 // to be sure that there will be an address in the block that will satisfy
178 // the alignment.
179 const uptr NeededSize =
180 roundUpTo(Size, MinAlignment) +
181 ((Alignment
3.1
'Alignment' is <= 'MinAlignment'
3.1
'Alignment' is <= 'MinAlignment'
3.1
'Alignment' is <= 'MinAlignment'
> MinAlignment) ? Alignment : Chunk::getHeaderSize());
4
'?' condition is false
182
183 // Takes care of extravagantly large sizes as well as integer overflows.
184 if (UNLIKELY(Size >= MaxAllowedMallocSize ||__builtin_expect(!!(Size >= MaxAllowedMallocSize || NeededSize
>= MaxAllowedMallocSize), 0)
5
Assuming 'Size' is < 'MaxAllowedMallocSize'
6
Assuming 'NeededSize' is < 'MaxAllowedMallocSize'
7
Taking false branch
185 NeededSize >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize || NeededSize
>= MaxAllowedMallocSize), 0)
) {
186 if (Options.MayReturnNull)
187 return nullptr;
188 reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
189 }
190
191 void *Block;
192 uptr ClassId;
193 uptr BlockEnd;
8
'BlockEnd' declared without an initial value
194 if (LIKELY(PrimaryT::canAllocate(NeededSize))__builtin_expect(!!(PrimaryT::canAllocate(NeededSize)), 1)) {
9
Taking true branch
195 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
10
Calling 'SizeClassMap::getClassIdBySize'
14
Returning from 'SizeClassMap::getClassIdBySize'
196 DCHECK_NE(ClassId, 0U);
197 bool UnlockRequired;
198 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
199 Block = TSD->Cache.allocate(ClassId);
200 if (UnlockRequired
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
)
15
Taking false branch
201 TSD->unlock();
202 } else {
203 ClassId = 0;
204 Block =
205 Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents);
206 }
207
208 if (UNLIKELY(!Block)__builtin_expect(!!(!Block), 0)) {
16
Assuming 'Block' is non-null
17
Taking false branch
209 if (Options.MayReturnNull)
210 return nullptr;
211 reportOutOfMemory(NeededSize);
212 }
213
214 // We only need to zero the contents for Primary backed allocations. This
215 // condition is not necessarily unlikely, but since memset is costly, we
216 // might as well mark it as such.
217 if (UNLIKELY(ZeroContents && ClassId)__builtin_expect(!!(ZeroContents && ClassId), 0))
18
Assuming 'ZeroContents' is false
19
Taking false branch
218 memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
219
220 Chunk::UnpackedHeader Header = {};
221 uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
222 if (UNLIKELY(!isAligned(UserPtr, Alignment))__builtin_expect(!!(!isAligned(UserPtr, Alignment)), 0)) {
20
Taking false branch
223 const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
224 const uptr Offset = AlignedUserPtr - UserPtr;
225 DCHECK_GT(Offset, 2 * sizeof(u32));
226 // The BlockMarker has no security purpose, but is specifically meant for
227 // the chunk iteration function that can be used in debugging situations.
228 // It is the only situation where we have to locate the start of a chunk
229 // based on its block address.
230 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
231 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
232 UserPtr = AlignedUserPtr;
233 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
234 }
235 Header.ClassId = ClassId & Chunk::ClassIdMask;
236 Header.State = Chunk::State::Allocated;
237 Header.Origin = Origin & Chunk::OriginMask;
238 Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
21
Assuming 'ClassId' is 0
22
'?' condition is false
23
The left operand of '-' is a garbage value
239 Chunk::SizeOrUnusedBytesMask;
240 void *Ptr = reinterpret_cast<void *>(UserPtr);
241 Chunk::storeHeader(Cookie, Ptr, &Header);
242
243 if (&__scudo_allocate_hook)
244 __scudo_allocate_hook(Ptr, Size);
245
246 return Ptr;
247 }
248
249 NOINLINE__attribute__((noinline)) void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
250 UNUSED__attribute__((unused)) uptr Alignment = MinAlignment) {
251 // For a deallocation, we only ensure minimal initialization, meaning thread
252 // local data will be left uninitialized for now (when using ELF TLS). The
253 // fallback cache will be used instead. This is a workaround for a situation
254 // where the only heap operation performed in a thread would be a free past
255 // the TLS destructors, ending up in initialized thread specific data never
256 // being destroyed properly. Any other heap operation will do a full init.
257 initThreadMaybe(/*MinimalInit=*/true);
258
259 if (&__scudo_deallocate_hook)
260 __scudo_deallocate_hook(Ptr);
261
262 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
263 return;
264 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(Ptr
), MinAlignment)), 0)
)
265 reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
266
267 Chunk::UnpackedHeader Header;
268 Chunk::loadHeader(Cookie, Ptr, &Header);
269
270 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
271 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
272 if (Options.DeallocTypeMismatch) {
273 if (Header.Origin != Origin) {
274 // With the exception of memalign'd chunks, that can be still be free'd.
275 if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
276 Origin != Chunk::Origin::Malloc)__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
)
277 reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
278 Header.Origin, Origin);
279 }
280 }
281
282 const uptr Size = getSize(Ptr, &Header);
283 if (DeleteSize && Options.DeleteSizeMismatch) {
284 if (UNLIKELY(DeleteSize != Size)__builtin_expect(!!(DeleteSize != Size), 0))
285 reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
286 }
287
288 quarantineOrDeallocateChunk(Ptr, &Header, Size);
289 }
290
291 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
292 initThreadMaybe();
293
294 // The following cases are handled by the C wrappers.
295 DCHECK_NE(OldPtr, nullptr);
296 DCHECK_NE(NewSize, 0);
297
298 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(OldPtr
), MinAlignment)), 0)
)
299 reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
300
301 Chunk::UnpackedHeader OldHeader;
302 Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
303
304 if (UNLIKELY(OldHeader.State != Chunk::State::Allocated)__builtin_expect(!!(OldHeader.State != Chunk::State::Allocated
), 0)
)
305 reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
306
307 // Pointer has to be allocated with a malloc-type function. Some
308 // applications think that it is OK to realloc a memalign'ed pointer, which
309 // will trigger this check. It really isn't.
310 if (Options.DeallocTypeMismatch) {
311 if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc)__builtin_expect(!!(OldHeader.Origin != Chunk::Origin::Malloc
), 0)
)
312 reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
313 OldHeader.Origin, Chunk::Origin::Malloc);
314 }
315
316 void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
317 uptr BlockEnd;
318 uptr OldSize;
319 const uptr ClassId = OldHeader.ClassId;
320 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
321 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
322 SizeClassMap::getSizeByClassId(ClassId);
323 OldSize = OldHeader.SizeOrUnusedBytes;
324 } else {
325 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
326 OldSize = BlockEnd -
327 (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
328 }
329 // If the new chunk still fits in the previously allocated block (with a
330 // reasonable delta), we just keep the old block, and update the chunk
331 // header to reflect the size change.
332 if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
333 const uptr Delta =
334 OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
335 if (Delta <= SizeClassMap::MaxSize / 2) {
336 Chunk::UnpackedHeader NewHeader = OldHeader;
337 NewHeader.SizeOrUnusedBytes =
338 (ClassId ? NewSize
339 : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
340 Chunk::SizeOrUnusedBytesMask;
341 Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
342 return OldPtr;
343 }
344 }
345
346 // Otherwise we allocate a new one, and deallocate the old one. Some
347 // allocators will allocate an even larger chunk (by a fixed factor) to
348 // allow for potential further in-place realloc. The gains of such a trick
349 // are currently unclear.
350 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
351 if (NewPtr) {
352 const uptr OldSize = getSize(OldPtr, &OldHeader);
353 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
354 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
355 }
356 return NewPtr;
357 }
358
359 // TODO(kostyak): while this locks the Primary & Secondary, it still allows
360 // pointers to be fetched from the TSD. We ultimately want to
361 // lock the registry as well. For now, it's good enough.
362 void disable() {
363 initThreadMaybe();
364 Primary.disable();
365 Secondary.disable();
366 }
367
368 void enable() {
369 initThreadMaybe();
370 Secondary.enable();
371 Primary.enable();
372 }
373
374 // The function returns the amount of bytes required to store the statistics,
375 // which might be larger than the amount of bytes provided. Note that the
376 // statistics buffer is not necessarily constant between calls to this
377 // function. This can be called with a null buffer or zero size for buffer
378 // sizing purposes.
379 uptr getStats(char *Buffer, uptr Size) {
380 ScopedString Str(1024);
381 disable();
382 const uptr Length = getStats(&Str) + 1;
383 enable();
384 if (Length < Size)
385 Size = Length;
386 if (Buffer && Size) {
387 memcpy(Buffer, Str.data(), Size);
388 Buffer[Size - 1] = '\0';
389 }
390 return Length;
391 }
392
393 void printStats() {
394 ScopedString Str(1024);
395 disable();
396 getStats(&Str);
397 enable();
398 Str.output();
399 }
400
401 void releaseToOS() { Primary.releaseToOS(); }
402
403 // Iterate over all chunks and call a callback for all busy chunks located
404 // within the provided memory range. Said callback must not use this allocator
405 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
406 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
407 void *Arg) {
408 initThreadMaybe();
409 const uptr From = Base;
410 const uptr To = Base + Size;
411 auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
412 if (Block < From || Block >= To)
413 return;
414 uptr ChunkSize;
415 const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
416 if (ChunkBase != InvalidChunk)
417 Callback(ChunkBase, ChunkSize, Arg);
418 };
419 Primary.iterateOverBlocks(Lambda);
420 Secondary.iterateOverBlocks(Lambda);
421 }
422
423 bool canReturnNull() {
424 initThreadMaybe();
425 return Options.MayReturnNull;
426 }
427
428 // TODO(kostyak): implement this as a "backend" to mallopt.
429 bool setOption(UNUSED__attribute__((unused)) uptr Option, UNUSED__attribute__((unused)) uptr Value) { return false; }
430
431 // Return the usable size for a given chunk. Technically we lie, as we just
432 // report the actual size of a chunk. This is done to counteract code actively
433 // writing past the end of a chunk (like sqlite3) when the usable size allows
434 // for it, which then forces realloc to copy the usable size of a chunk as
435 // opposed to its actual size.
436 uptr getUsableSize(const void *Ptr) {
437 initThreadMaybe();
438 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
439 return 0;
440 Chunk::UnpackedHeader Header;
441 Chunk::loadHeader(Cookie, Ptr, &Header);
442 // Getting the usable size of a chunk only makes sense if it's allocated.
443 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
444 reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
445 return getSize(Ptr, &Header);
446 }
447
448 void getStats(StatCounters S) {
449 initThreadMaybe();
450 Stats.get(S);
451 }
452
453private:
454 using SecondaryT = typename Params::Secondary;
455 typedef typename PrimaryT::SizeClassMap SizeClassMap;
456
457 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG(4);
458 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
459 static const uptr MinAlignment = 1UL << MinAlignmentLog;
460 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
461 static const uptr MaxAllowedMallocSize =
462 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40)(1ULL << 40);
463
464 // Constants used by the chunk iteration mechanism.
465 static const u32 BlockMarker = 0x44554353U;
466 static const uptr InvalidChunk = ~static_cast<uptr>(0);
467
468 GlobalStats Stats;
469 TSDRegistryT TSDRegistry;
470 PrimaryT Primary;
471 SecondaryT Secondary;
472 QuarantineT Quarantine;
473
474 u32 Cookie;
475
476 struct {
477 u8 MayReturnNull : 1; // may_return_null
478 u8 ZeroContents : 1; // zero_contents
479 u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
480 u8 DeleteSizeMismatch : 1; // delete_size_mismatch
481 u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
482 } Options;
483
484 // The following might get optimized out by the compiler.
485 NOINLINE__attribute__((noinline)) void performSanityChecks() {
486 // Verify that the header offset field can hold the maximum offset. In the
487 // case of the Secondary allocator, it takes care of alignment and the
488 // offset will always be small. In the case of the Primary, the worst case
489 // scenario happens in the last size class, when the backend allocation
490 // would already be aligned on the requested alignment, which would happen
491 // to be the maximum alignment that would fit in that size class. As a
492 // result, the maximum offset will be at most the maximum alignment for the
493 // last size class minus the header size, in multiples of MinAlignment.
494 Chunk::UnpackedHeader Header = {};
495 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
496 SizeClassMap::MaxSize - MinAlignment);
497 const uptr MaxOffset =
498 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
499 Header.Offset = MaxOffset & Chunk::OffsetMask;
500 if (UNLIKELY(Header.Offset != MaxOffset)__builtin_expect(!!(Header.Offset != MaxOffset), 0))
501 reportSanityCheckError("offset");
502
503 // Verify that we can fit the maximum size or amount of unused bytes in the
504 // header. Given that the Secondary fits the allocation to a page, the worst
505 // case scenario happens in the Primary. It will depend on the second to
506 // last and last class sizes, as well as the dynamic base for the Primary.
507 // The following is an over-approximation that works for our needs.
508 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
509 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
510 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)__builtin_expect(!!(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes
), 0)
)
511 reportSanityCheckError("size (or unused bytes)");
512
513 const uptr LargestClassId = SizeClassMap::LargestClassId;
514 Header.ClassId = LargestClassId;
515 if (UNLIKELY(Header.ClassId != LargestClassId)__builtin_expect(!!(Header.ClassId != LargestClassId), 0))
516 reportSanityCheckError("class ID");
517 }
518
519 static INLINEinline void *getBlockBegin(const void *Ptr,
520 Chunk::UnpackedHeader *Header) {
521 return reinterpret_cast<void *>(
522 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
523 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
524 }
525
526 // Return the size of a chunk as requested during its allocation.
527 INLINEinline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
528 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
529 if (LIKELY(Header->ClassId)__builtin_expect(!!(Header->ClassId), 1))
530 return SizeOrUnusedBytes;
531 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
532 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
533 }
534
535 ALWAYS_INLINEinline __attribute__((always_inline)) void initThreadMaybe(bool MinimalInit = false) {
536 TSDRegistry.initThreadMaybe(this, MinimalInit);
537 }
538
539 void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
540 uptr Size) {
541 Chunk::UnpackedHeader NewHeader = *Header;
542 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
543 // than the maximum allowed, we return a chunk directly to the backend.
544 const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
545 (Size > Options.QuarantineMaxChunkSize);
546 if (BypassQuarantine) {
547 NewHeader.State = Chunk::State::Available;
548 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
549 void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
550 const uptr ClassId = NewHeader.ClassId;
551 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
552 bool UnlockRequired;
553 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
554 TSD->Cache.deallocate(ClassId, BlockBegin);
555 if (UnlockRequired)
556 TSD->unlock();
557 } else {
558 Secondary.deallocate(BlockBegin);
559 }
560 } else {
561 NewHeader.State = Chunk::State::Quarantined;
562 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
563 bool UnlockRequired;
564 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
565 Quarantine.put(&TSD->QuarantineCache,
566 QuarantineCallback(*this, TSD->Cache), Ptr, Size);
567 if (UnlockRequired)
568 TSD->unlock();
569 }
570 }
571
572 // This only cares about valid busy chunks. This might change in the future.
573 uptr getChunkFromBlock(uptr Block, uptr *Size) {
574 u32 Offset = 0;
575 if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
576 Offset = reinterpret_cast<u32 *>(Block)[1];
577 const uptr P = Block + Offset + Chunk::getHeaderSize();
578 const void *Ptr = reinterpret_cast<const void *>(P);
579 Chunk::UnpackedHeader Header;
580 if (!Chunk::isValid(Cookie, Ptr, &Header) ||
581 Header.State != Chunk::State::Allocated)
582 return InvalidChunk;
583 if (Size)
584 *Size = getSize(Ptr, &Header);
585 return P;
586 }
587
588 uptr getStats(ScopedString *Str) {
589 Primary.getStats(Str);
590 Secondary.getStats(Str);
591 Quarantine.getStats(Str);
592 return Str->length();
593 }
594};
595
596} // namespace scudo
597
598#endif // SCUDO_COMBINED_H_

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h

1//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_SIZE_CLASS_MAP_H_
10#define SCUDO_SIZE_CLASS_MAP_H_
11
12#include "common.h"
13#include "string_utils.h"
14
15namespace scudo {
16
17// SizeClassMap maps allocation sizes into size classes and back, in an
18// efficient table-free manner.
19//
20// Class 0 is a special class that doesn't abide by the same rules as other
21// classes. The allocator uses it to hold batches.
22//
23// The other sizes are controlled by the template parameters:
24// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
25// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
26// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
27// 2^MidSizeLog bytes.
28// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
29// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
30// 0b1xx0..0 (where x is either 0 or 1).
31//
32// This class also gives a hint to a thread-caching allocator about the amount
33// of chunks that can be cached per-thread:
34// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
35// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
36
37template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
38 u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
39class SizeClassMap {
40 static const uptr MinSize = 1UL << MinSizeLog;
41 static const uptr MidSize = 1UL << MidSizeLog;
42 static const uptr MidClass = MidSize / MinSize;
43 static const u8 S = NumBits - 1;
44 static const uptr M = (1UL << S) - 1;
45
46public:
47 static const u32 MaxNumCachedHint = MaxNumCachedHintT;
48
49 static const uptr MaxSize = 1UL << MaxSizeLog;
50 static const uptr NumClasses =
51 MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
52 COMPILER_CHECK(NumClasses <= 256)static_assert(NumClasses <= 256, "");
53 static const uptr LargestClassId = NumClasses - 1;
54 static const uptr BatchClassId = 0;
55
56 static uptr getSizeByClassId(uptr ClassId) {
57 DCHECK_NE(ClassId, BatchClassId);
58 if (ClassId <= MidClass)
59 return ClassId << MinSizeLog;
60 ClassId -= MidClass;
61 const uptr T = MidSize << (ClassId >> S);
62 return T + (T >> S) * (ClassId & M);
63 }
64
65 static uptr getClassIdBySize(uptr Size) {
66 DCHECK_LE(Size, MaxSize);
67 if (Size <= MidSize)
11
Assuming 'Size' is <= 'MidSize'
12
Taking true branch
68 return (Size + MinSize - 1) >> MinSizeLog;
13
Returning value, which participates in a condition later
69 const uptr L = getMostSignificantSetBitIndex(Size);
70 const uptr HBits = (Size >> (L - S)) & M;
71 const uptr LBits = Size & ((1UL << (L - S)) - 1);
72 const uptr L1 = L - MidSizeLog;
73 return MidClass + (L1 << S) + HBits + (LBits > 0);
74 }
75
76 static u32 getMaxCachedHint(uptr Size) {
77 DCHECK_LE(Size, MaxSize);
78 DCHECK_NE(Size, 0);
79 u32 N;
80 // Force a 32-bit division if the template parameters allow for it.
81 if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
82 N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
83 else
84 N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
85 return Max(1U, Min(MaxNumCachedHint, N));
86 }
87
88 static void print() {
89 ScopedString Buffer(1024);
90 uptr PrevS = 0;
91 uptr TotalCached = 0;
92 for (uptr I = 0; I < NumClasses; I++) {
93 if (I == BatchClassId)
94 continue;
95 const uptr S = getSizeByClassId(I);
96 if (S >= MidSize / 2 && (S & (S - 1)) == 0)
97 Buffer.append("\n");
98 const uptr D = S - PrevS;
99 const uptr P = PrevS ? (D * 100 / PrevS) : 0;
100 const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
101 const uptr Cached = getMaxCachedHint(S) * S;
102 Buffer.append(
103 "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
104 I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
105 getClassIdBySize(S));
106 TotalCached += Cached;
107 PrevS = S;
108 }
109 Buffer.append("Total Cached: %zu\n", TotalCached);
110 Buffer.output();
111 }
112
113 static void validate() {
114 for (uptr C = 0; C < NumClasses; C++) {
115 if (C == BatchClassId)
116 continue;
117 const uptr S = getSizeByClassId(C);
118 CHECK_NE(S, 0U)do { u64 V1 = (u64)((S)); u64 V2 = (u64)((0U)); if (__builtin_expect
(!!(!(V1 != V2)), 0)) { reportCheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 118, "(" "(S)" ") " "!=" " (" "(0U)" ")", V1, V2); die(); }
} while (false)
;
119 CHECK_EQ(getClassIdBySize(S), C)do { u64 V1 = (u64)((getClassIdBySize(S))); u64 V2 = (u64)((C
)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 119, "(" "(getClassIdBySize(S))" ") " "==" " (" "(C)" ")", V1
, V2); die(); } } while (false)
;
120 if (C < LargestClassId)
121 CHECK_EQ(getClassIdBySize(S + 1), C + 1)do { u64 V1 = (u64)((getClassIdBySize(S + 1))); u64 V2 = (u64
)((C + 1)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 121, "(" "(getClassIdBySize(S + 1))" ") " "==" " (" "(C + 1)"
")", V1, V2); die(); } } while (false)
;
122 CHECK_EQ(getClassIdBySize(S - 1), C)do { u64 V1 = (u64)((getClassIdBySize(S - 1))); u64 V2 = (u64
)((C)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 122, "(" "(getClassIdBySize(S - 1))" ") " "==" " (" "(C)" ")"
, V1, V2); die(); } } while (false)
;
123 CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1))do { u64 V1 = (u64)((getSizeByClassId(C))); u64 V2 = (u64)((getSizeByClassId
(C - 1))); if (__builtin_expect(!!(!(V1 > V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 123, "(" "(getSizeByClassId(C))" ") " ">" " (" "(getSizeByClassId(C - 1))"
")", V1, V2); die(); } } while (false)
;
124 }
125 // Do not perform the loop if the maximum size is too large.
126 if (MaxSizeLog > 19)
127 return;
128 for (uptr S = 1; S <= MaxSize; S++) {
129 const uptr C = getClassIdBySize(S);
130 CHECK_LT(C, NumClasses)do { u64 V1 = (u64)((C)); u64 V2 = (u64)((NumClasses)); if (__builtin_expect
(!!(!(V1 < V2)), 0)) { reportCheckFailed("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 130, "(" "(C)" ") " "<" " (" "(NumClasses)" ")", V1, V2)
; die(); } } while (false)
;
131 CHECK_GE(getSizeByClassId(C), S)do { u64 V1 = (u64)((getSizeByClassId(C))); u64 V2 = (u64)((S
)); if (__builtin_expect(!!(!(V1 >= V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 131, "(" "(getSizeByClassId(C))" ") " ">=" " (" "(S)" ")"
, V1, V2); die(); } } while (false)
;
132 if (C > 0)
133 CHECK_LT(getSizeByClassId(C - 1), S)do { u64 V1 = (u64)((getSizeByClassId(C - 1))); u64 V2 = (u64
)((S)); if (__builtin_expect(!!(!(V1 < V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 133, "(" "(getSizeByClassId(C - 1))" ") " "<" " (" "(S)"
")", V1, V2); die(); } } while (false)
;
134 }
135 }
136};
137
138typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
139
140// TODO(kostyak): further tune class maps for Android & Fuchsia.
141#if SCUDO_WORDSIZE64U == 64U
142typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
143typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
144#else
145typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
146typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
147#endif
148
149} // namespace scudo
150
151#endif // SCUDO_SIZE_CLASS_MAP_H_