Bug Summary

File:projects/compiler-rt/lib/scudo/standalone/combined.h
Warning:line 236, column 59
The left operand of '-' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name wrappers_cpp.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -ffreestanding -fuse-init-array -target-cpu i686 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/projects/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374877/include -I /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/../.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/projects/compiler-rt/lib/scudo/standalone -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374877=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fcxx-exceptions -fexceptions -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-15-233810-7101-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

1//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "platform.h"
10
11// Skip this compilation unit if compiled as part of Bionic.
12#if !SCUDO_ANDROID0 || !_BIONIC
13
14#include "allocator_config.h"
15
16#include <stdint.h>
17
18extern scudo::Allocator<scudo::Config> *AllocatorPtr;
19
20namespace std {
21struct nothrow_t {};
22enum class align_val_t : size_t {};
23} // namespace std
24
25INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size) {
26 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
27}
28INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size) {
29 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
30}
31INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size,
32 std::nothrow_t const &) NOEXCEPTnoexcept {
33 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
34}
35INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size,
36 std::nothrow_t const &) NOEXCEPTnoexcept {
37 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
1
Calling 'Allocator::allocate'
38}
39INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align) {
40 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
41 static_cast<scudo::uptr>(align));
42}
43INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align) {
44 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
45 static_cast<scudo::uptr>(align));
46}
47INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align,
48 std::nothrow_t const &) NOEXCEPTnoexcept {
49 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
50 static_cast<scudo::uptr>(align));
51}
52INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align,
53 std::nothrow_t const &) NOEXCEPTnoexcept {
54 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
55 static_cast<scudo::uptr>(align));
56}
57
58INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr)NOEXCEPTnoexcept {
59 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
60}
61INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr) NOEXCEPTnoexcept {
62 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
63}
64INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPTnoexcept {
65 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
66}
67INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
68 std::nothrow_t const &) NOEXCEPTnoexcept {
69 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
70}
71INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size)NOEXCEPTnoexcept {
72 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
73}
74INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size) NOEXCEPTnoexcept {
75 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
76}
77INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align)NOEXCEPTnoexcept {
78 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
79 static_cast<scudo::uptr>(align));
80}
81INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
82 std::align_val_t align) NOEXCEPTnoexcept {
83 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
84 static_cast<scudo::uptr>(align));
85}
86INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align,
87 std::nothrow_t const &)NOEXCEPTnoexcept {
88 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
89 static_cast<scudo::uptr>(align));
90}
91INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, std::align_val_t align,
92 std::nothrow_t const &) NOEXCEPTnoexcept {
93 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
94 static_cast<scudo::uptr>(align));
95}
96INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size,
97 std::align_val_t align)NOEXCEPTnoexcept {
98 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
99 static_cast<scudo::uptr>(align));
100}
101INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size,
102 std::align_val_t align) NOEXCEPTnoexcept {
103 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
104 static_cast<scudo::uptr>(align));
105}
106
107#endif // !SCUDO_ANDROID || !_BIONIC

/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/combined.h

1//===-- combined.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_COMBINED_H_
10#define SCUDO_COMBINED_H_
11
12#include "chunk.h"
13#include "common.h"
14#include "flags.h"
15#include "flags_parser.h"
16#include "interface.h"
17#include "local_cache.h"
18#include "quarantine.h"
19#include "report.h"
20#include "secondary.h"
21#include "tsd.h"
22
23namespace scudo {
24
25template <class Params> class Allocator {
26public:
27 using PrimaryT = typename Params::Primary;
28 using CacheT = typename PrimaryT::CacheT;
29 typedef Allocator<Params> ThisT;
30 typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
31
32 struct QuarantineCallback {
33 explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
34 : Allocator(Instance), Cache(LocalCache) {}
35
36 // Chunk recycling function, returns a quarantined chunk to the backend,
37 // first making sure it hasn't been tampered with.
38 void recycle(void *Ptr) {
39 Chunk::UnpackedHeader Header;
40 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
41 if (UNLIKELY(Header.State != Chunk::State::Quarantined)__builtin_expect(!!(Header.State != Chunk::State::Quarantined
), 0)
)
42 reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
43
44 Chunk::UnpackedHeader NewHeader = Header;
45 NewHeader.State = Chunk::State::Available;
46 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
47
48 void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
49 const uptr ClassId = NewHeader.ClassId;
50 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1))
51 Cache.deallocate(ClassId, BlockBegin);
52 else
53 Allocator.Secondary.deallocate(BlockBegin);
54 }
55
56 // We take a shortcut when allocating a quarantine batch by working with the
57 // appropriate class ID instead of using Size. The compiler should optimize
58 // the class ID computation and work with the associated cache directly.
59 void *allocate(UNUSED__attribute__((unused)) uptr Size) {
60 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
61 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
62 void *Ptr = Cache.allocate(QuarantineClassId);
63 // Quarantine batch allocation failure is fatal.
64 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
65 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
66
67 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
68 Chunk::getHeaderSize());
69 Chunk::UnpackedHeader Header = {};
70 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
71 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
72 Header.State = Chunk::State::Allocated;
73 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
74
75 return Ptr;
76 }
77
78 void deallocate(void *Ptr) {
79 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
80 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
81 Chunk::UnpackedHeader Header;
82 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
83
84 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
85 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
86 DCHECK_EQ(Header.ClassId, QuarantineClassId);
87 DCHECK_EQ(Header.Offset, 0);
88 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
89
90 Chunk::UnpackedHeader NewHeader = Header;
91 NewHeader.State = Chunk::State::Available;
92 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
93 Cache.deallocate(QuarantineClassId,
94 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
95 Chunk::getHeaderSize()));
96 }
97
98 private:
99 ThisT &Allocator;
100 CacheT &Cache;
101 };
102
103 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
104 typedef typename QuarantineT::CacheT QuarantineCacheT;
105
106 void initLinkerInitialized() {
107 performSanityChecks();
108
109 // Check if hardware CRC32 is supported in the binary and by the platform,
110 // if so, opt for the CRC32 hardware version of the checksum.
111 if (&computeHardwareCRC32 && hasHardwareCRC32())
112 HashAlgorithm = Checksum::HardwareCRC32;
113
114 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie)))__builtin_expect(!!(!getRandom(&Cookie, sizeof(Cookie))),
0)
)
115 Cookie = static_cast<u32>(getMonotonicTime() ^
116 (reinterpret_cast<uptr>(this) >> 4));
117
118 initFlags();
119 reportUnrecognizedFlags();
120
121 // Store some flags locally.
122 Options.MayReturnNull = getFlags()->may_return_null;
123 Options.ZeroContents = getFlags()->zero_contents;
124 Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
125 Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
126 Options.QuarantineMaxChunkSize =
127 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
128
129 Stats.initLinkerInitialized();
130 Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
131 Secondary.initLinkerInitialized(&Stats);
132
133 Quarantine.init(
134 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
135 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
136 }
137
138 void reset() { memset(this, 0, sizeof(*this)); }
139
140 void unmapTestOnly() {
141 TSDRegistry.unmapTestOnly();
142 Primary.unmapTestOnly();
143 }
144
145 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
146
147 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
148
149 // Release the resources used by a TSD, which involves:
150 // - draining the local quarantine cache to the global quarantine;
151 // - releasing the cached pointers back to the Primary;
152 // - unlinking the local stats from the global ones (destroying the cache does
153 // the last two items).
154 void commitBack(TSD<ThisT> *TSD) {
155 Quarantine.drain(&TSD->QuarantineCache,
156 QuarantineCallback(*this, TSD->Cache));
157 TSD->Cache.destroy(&Stats);
158 }
159
160 NOINLINE__attribute__((noinline)) void *allocate(uptr Size, Chunk::Origin Origin,
161 uptr Alignment = MinAlignment,
162 bool ZeroContents = false) {
163 initThreadMaybe();
164
165 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
2
Taking false branch
166 if (Options.MayReturnNull)
167 return nullptr;
168 reportAlignmentTooBig(Alignment, MaxAlignment);
169 }
170 if (Alignment
2.1
'Alignment' is >= 'MinAlignment'
2.1
'Alignment' is >= 'MinAlignment'
2.1
'Alignment' is >= 'MinAlignment'
< MinAlignment)
3
Taking false branch
171 Alignment = MinAlignment;
172
173 // If the requested size happens to be 0 (more common than you might think),
174 // allocate MinAlignment bytes on top of the header. Then add the extra
175 // bytes required to fulfill the alignment requirements: we allocate enough
176 // to be sure that there will be an address in the block that will satisfy
177 // the alignment.
178 const uptr NeededSize =
179 roundUpTo(Size, MinAlignment) +
180 ((Alignment
3.1
'Alignment' is <= 'MinAlignment'
3.1
'Alignment' is <= 'MinAlignment'
3.1
'Alignment' is <= 'MinAlignment'
> MinAlignment) ? Alignment : Chunk::getHeaderSize());
4
'?' condition is false
181
182 // Takes care of extravagantly large sizes as well as integer overflows.
183 if (UNLIKELY(Size >= MaxAllowedMallocSize ||__builtin_expect(!!(Size >= MaxAllowedMallocSize || NeededSize
>= MaxAllowedMallocSize), 0)
5
Assuming 'Size' is < 'MaxAllowedMallocSize'
6
Assuming 'NeededSize' is < 'MaxAllowedMallocSize'
7
Taking false branch
184 NeededSize >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize || NeededSize
>= MaxAllowedMallocSize), 0)
) {
185 if (Options.MayReturnNull)
186 return nullptr;
187 reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
188 }
189
190 void *Block;
191 uptr ClassId;
192 uptr BlockEnd;
8
'BlockEnd' declared without an initial value
193 if (LIKELY(PrimaryT::canAllocate(NeededSize))__builtin_expect(!!(PrimaryT::canAllocate(NeededSize)), 1)) {
9
Taking true branch
194 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
10
Calling 'SizeClassMap::getClassIdBySize'
14
Returning from 'SizeClassMap::getClassIdBySize'
195 DCHECK_NE(ClassId, 0U);
196 bool UnlockRequired;
197 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
198 Block = TSD->Cache.allocate(ClassId);
199 if (UnlockRequired
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
)
15
Taking false branch
200 TSD->unlock();
201 } else {
202 ClassId = 0;
203 Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
204 }
205
206 if (UNLIKELY(!Block)__builtin_expect(!!(!Block), 0)) {
16
Assuming 'Block' is non-null
17
Taking false branch
207 if (Options.MayReturnNull)
208 return nullptr;
209 reportOutOfMemory(NeededSize);
210 }
211
212 // We only need to zero the contents for Primary backed allocations. This
213 // condition is not necessarily unlikely, but since memset is costly, we
214 // might as well mark it as such.
215 if (UNLIKELY
17.1
'ZeroContents' is false
17.1
'ZeroContents' is false
17.1
'ZeroContents' is false
((ZeroContents || Options.ZeroContents) && ClassId)__builtin_expect(!!((ZeroContents || Options.ZeroContents) &&
ClassId), 0)
)
18
Assuming field 'ZeroContents' is 0
19
Taking false branch
216 memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
217
218 Chunk::UnpackedHeader Header = {};
219 uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
220 if (UNLIKELY(!isAligned(UserPtr, Alignment))__builtin_expect(!!(!isAligned(UserPtr, Alignment)), 0)) {
20
Taking false branch
221 const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
222 const uptr Offset = AlignedUserPtr - UserPtr;
223 DCHECK_GT(Offset, 2 * sizeof(u32));
224 // The BlockMarker has no security purpose, but is specifically meant for
225 // the chunk iteration function that can be used in debugging situations.
226 // It is the only situation where we have to locate the start of a chunk
227 // based on its block address.
228 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
229 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
230 UserPtr = AlignedUserPtr;
231 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
232 }
233 Header.ClassId = ClassId & Chunk::ClassIdMask;
234 Header.State = Chunk::State::Allocated;
235 Header.Origin = Origin & Chunk::OriginMask;
236 Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
21
Assuming 'ClassId' is 0
22
'?' condition is false
23
The left operand of '-' is a garbage value
237 Chunk::SizeOrUnusedBytesMask;
238 void *Ptr = reinterpret_cast<void *>(UserPtr);
239 Chunk::storeHeader(Cookie, Ptr, &Header);
240
241 if (&__scudo_allocate_hook)
242 __scudo_allocate_hook(Ptr, Size);
243
244 return Ptr;
245 }
246
247 NOINLINE__attribute__((noinline)) void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
248 UNUSED__attribute__((unused)) uptr Alignment = MinAlignment) {
249 // For a deallocation, we only ensure minimal initialization, meaning thread
250 // local data will be left uninitialized for now (when using ELF TLS). The
251 // fallback cache will be used instead. This is a workaround for a situation
252 // where the only heap operation performed in a thread would be a free past
253 // the TLS destructors, ending up in initialized thread specific data never
254 // being destroyed properly. Any other heap operation will do a full init.
255 initThreadMaybe(/*MinimalInit=*/true);
256
257 if (&__scudo_deallocate_hook)
258 __scudo_deallocate_hook(Ptr);
259
260 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
261 return;
262 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(Ptr
), MinAlignment)), 0)
)
263 reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
264
265 Chunk::UnpackedHeader Header;
266 Chunk::loadHeader(Cookie, Ptr, &Header);
267
268 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
269 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
270 if (Options.DeallocTypeMismatch) {
271 if (Header.Origin != Origin) {
272 // With the exception of memalign'd chunks, that can be still be free'd.
273 if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
274 Origin != Chunk::Origin::Malloc)__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
)
275 reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
276 Header.Origin, Origin);
277 }
278 }
279
280 const uptr Size = getSize(Ptr, &Header);
281 if (DeleteSize && Options.DeleteSizeMismatch) {
282 if (UNLIKELY(DeleteSize != Size)__builtin_expect(!!(DeleteSize != Size), 0))
283 reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
284 }
285
286 quarantineOrDeallocateChunk(Ptr, &Header, Size);
287 }
288
289 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
290 initThreadMaybe();
291
292 // The following cases are handled by the C wrappers.
293 DCHECK_NE(OldPtr, nullptr);
294 DCHECK_NE(NewSize, 0);
295
296 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(OldPtr
), MinAlignment)), 0)
)
297 reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
298
299 Chunk::UnpackedHeader OldHeader;
300 Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
301
302 if (UNLIKELY(OldHeader.State != Chunk::State::Allocated)__builtin_expect(!!(OldHeader.State != Chunk::State::Allocated
), 0)
)
303 reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
304
305 // Pointer has to be allocated with a malloc-type function. Some
306 // applications think that it is OK to realloc a memalign'ed pointer, which
307 // will trigger this check. It really isn't.
308 if (Options.DeallocTypeMismatch) {
309 if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc)__builtin_expect(!!(OldHeader.Origin != Chunk::Origin::Malloc
), 0)
)
310 reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
311 OldHeader.Origin, Chunk::Origin::Malloc);
312 }
313
314 void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
315 uptr BlockEnd;
316 uptr OldSize;
317 const uptr ClassId = OldHeader.ClassId;
318 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
319 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
320 SizeClassMap::getSizeByClassId(ClassId);
321 OldSize = OldHeader.SizeOrUnusedBytes;
322 } else {
323 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
324 OldSize = BlockEnd -
325 (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
326 }
327 // If the new chunk still fits in the previously allocated block (with a
328 // reasonable delta), we just keep the old block, and update the chunk
329 // header to reflect the size change.
330 if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
331 const uptr Delta =
332 OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
333 if (Delta <= SizeClassMap::MaxSize / 2) {
334 Chunk::UnpackedHeader NewHeader = OldHeader;
335 NewHeader.SizeOrUnusedBytes =
336 (ClassId ? NewSize
337 : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
338 Chunk::SizeOrUnusedBytesMask;
339 Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
340 return OldPtr;
341 }
342 }
343
344 // Otherwise we allocate a new one, and deallocate the old one. Some
345 // allocators will allocate an even larger chunk (by a fixed factor) to
346 // allow for potential further in-place realloc. The gains of such a trick
347 // are currently unclear.
348 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
349 if (NewPtr) {
350 const uptr OldSize = getSize(OldPtr, &OldHeader);
351 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
352 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
353 }
354 return NewPtr;
355 }
356
357 // TODO(kostyak): while this locks the Primary & Secondary, it still allows
358 // pointers to be fetched from the TSD. We ultimately want to
359 // lock the registry as well. For now, it's good enough.
360 void disable() {
361 initThreadMaybe();
362 Primary.disable();
363 Secondary.disable();
364 }
365
366 void enable() {
367 initThreadMaybe();
368 Secondary.enable();
369 Primary.enable();
370 }
371
372 // The function returns the amount of bytes required to store the statistics,
373 // which might be larger than the amount of bytes provided. Note that the
374 // statistics buffer is not necessarily constant between calls to this
375 // function. This can be called with a null buffer or zero size for buffer
376 // sizing purposes.
377 uptr getStats(char *Buffer, uptr Size) {
378 ScopedString Str(1024);
379 disable();
380 const uptr Length = getStats(&Str) + 1;
381 enable();
382 if (Length < Size)
383 Size = Length;
384 if (Buffer && Size) {
385 memcpy(Buffer, Str.data(), Size);
386 Buffer[Size - 1] = '\0';
387 }
388 return Length;
389 }
390
391 void printStats() {
392 ScopedString Str(1024);
393 disable();
394 getStats(&Str);
395 enable();
396 Str.output();
397 }
398
399 void releaseToOS() { Primary.releaseToOS(); }
400
401 // Iterate over all chunks and call a callback for all busy chunks located
402 // within the provided memory range. Said callback must not use this allocator
403 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
404 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
405 void *Arg) {
406 initThreadMaybe();
407 const uptr From = Base;
408 const uptr To = Base + Size;
409 auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
410 if (Block < From || Block >= To)
411 return;
412 uptr ChunkSize;
413 const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
414 if (ChunkBase != InvalidChunk)
415 Callback(ChunkBase, ChunkSize, Arg);
416 };
417 Primary.iterateOverBlocks(Lambda);
418 Secondary.iterateOverBlocks(Lambda);
419 }
420
421 bool canReturnNull() {
422 initThreadMaybe();
423 return Options.MayReturnNull;
424 }
425
426 // TODO(kostyak): implement this as a "backend" to mallopt.
427 bool setOption(UNUSED__attribute__((unused)) uptr Option, UNUSED__attribute__((unused)) uptr Value) { return false; }
428
429 // Return the usable size for a given chunk. Technically we lie, as we just
430 // report the actual size of a chunk. This is done to counteract code actively
431 // writing past the end of a chunk (like sqlite3) when the usable size allows
432 // for it, which then forces realloc to copy the usable size of a chunk as
433 // opposed to its actual size.
434 uptr getUsableSize(const void *Ptr) {
435 initThreadMaybe();
436 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
437 return 0;
438 Chunk::UnpackedHeader Header;
439 Chunk::loadHeader(Cookie, Ptr, &Header);
440 // Getting the usable size of a chunk only makes sense if it's allocated.
441 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
442 reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
443 return getSize(Ptr, &Header);
444 }
445
446 void getStats(StatCounters S) {
447 initThreadMaybe();
448 Stats.get(S);
449 }
450
451private:
452 typedef MapAllocator SecondaryT;
453 typedef typename PrimaryT::SizeClassMap SizeClassMap;
454
455 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG(3);
456 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
457 static const uptr MinAlignment = 1UL << MinAlignmentLog;
458 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
459 static const uptr MaxAllowedMallocSize =
460 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40)(1UL << 31);
461
462 // Constants used by the chunk iteration mechanism.
463 static const u32 BlockMarker = 0x44554353U;
464 static const uptr InvalidChunk = ~static_cast<uptr>(0);
465
466 GlobalStats Stats;
467 TSDRegistryT TSDRegistry;
468 PrimaryT Primary;
469 SecondaryT Secondary;
470 QuarantineT Quarantine;
471
472 u32 Cookie;
473
474 struct {
475 u8 MayReturnNull : 1; // may_return_null
476 u8 ZeroContents : 1; // zero_contents
477 u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
478 u8 DeleteSizeMismatch : 1; // delete_size_mismatch
479 u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
480 } Options;
481
482 // The following might get optimized out by the compiler.
483 NOINLINE__attribute__((noinline)) void performSanityChecks() {
484 // Verify that the header offset field can hold the maximum offset. In the
485 // case of the Secondary allocator, it takes care of alignment and the
486 // offset will always be small. In the case of the Primary, the worst case
487 // scenario happens in the last size class, when the backend allocation
488 // would already be aligned on the requested alignment, which would happen
489 // to be the maximum alignment that would fit in that size class. As a
490 // result, the maximum offset will be at most the maximum alignment for the
491 // last size class minus the header size, in multiples of MinAlignment.
492 Chunk::UnpackedHeader Header = {};
493 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
494 SizeClassMap::MaxSize - MinAlignment);
495 const uptr MaxOffset =
496 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
497 Header.Offset = MaxOffset & Chunk::OffsetMask;
498 if (UNLIKELY(Header.Offset != MaxOffset)__builtin_expect(!!(Header.Offset != MaxOffset), 0))
499 reportSanityCheckError("offset");
500
501 // Verify that we can fit the maximum size or amount of unused bytes in the
502 // header. Given that the Secondary fits the allocation to a page, the worst
503 // case scenario happens in the Primary. It will depend on the second to
504 // last and last class sizes, as well as the dynamic base for the Primary.
505 // The following is an over-approximation that works for our needs.
506 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
507 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
508 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)__builtin_expect(!!(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes
), 0)
)
509 reportSanityCheckError("size (or unused bytes)");
510
511 const uptr LargestClassId = SizeClassMap::LargestClassId;
512 Header.ClassId = LargestClassId;
513 if (UNLIKELY(Header.ClassId != LargestClassId)__builtin_expect(!!(Header.ClassId != LargestClassId), 0))
514 reportSanityCheckError("class ID");
515 }
516
517 static INLINEinline void *getBlockBegin(const void *Ptr,
518 Chunk::UnpackedHeader *Header) {
519 return reinterpret_cast<void *>(
520 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
521 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
522 }
523
524 // Return the size of a chunk as requested during its allocation.
525 INLINEinline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
526 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
527 if (LIKELY(Header->ClassId)__builtin_expect(!!(Header->ClassId), 1))
528 return SizeOrUnusedBytes;
529 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
530 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
531 }
532
533 ALWAYS_INLINEinline __attribute__((always_inline)) void initThreadMaybe(bool MinimalInit = false) {
534 TSDRegistry.initThreadMaybe(this, MinimalInit);
535 }
536
537 void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
538 uptr Size) {
539 Chunk::UnpackedHeader NewHeader = *Header;
540 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
541 // than the maximum allowed, we return a chunk directly to the backend.
542 const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
543 (Size > Options.QuarantineMaxChunkSize);
544 if (BypassQuarantine) {
545 NewHeader.State = Chunk::State::Available;
546 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
547 void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
548 const uptr ClassId = NewHeader.ClassId;
549 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
550 bool UnlockRequired;
551 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
552 TSD->Cache.deallocate(ClassId, BlockBegin);
553 if (UnlockRequired)
554 TSD->unlock();
555 } else {
556 Secondary.deallocate(BlockBegin);
557 }
558 } else {
559 NewHeader.State = Chunk::State::Quarantined;
560 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
561 bool UnlockRequired;
562 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
563 Quarantine.put(&TSD->QuarantineCache,
564 QuarantineCallback(*this, TSD->Cache), Ptr, Size);
565 if (UnlockRequired)
566 TSD->unlock();
567 }
568 }
569
570 // This only cares about valid busy chunks. This might change in the future.
571 uptr getChunkFromBlock(uptr Block, uptr *Size) {
572 u32 Offset = 0;
573 if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
574 Offset = reinterpret_cast<u32 *>(Block)[1];
575 const uptr P = Block + Offset + Chunk::getHeaderSize();
576 const void *Ptr = reinterpret_cast<const void *>(P);
577 Chunk::UnpackedHeader Header;
578 if (!Chunk::isValid(Cookie, Ptr, &Header) ||
579 Header.State != Chunk::State::Allocated)
580 return InvalidChunk;
581 if (Size)
582 *Size = getSize(Ptr, &Header);
583 return P;
584 }
585
586 uptr getStats(ScopedString *Str) {
587 Primary.getStats(Str);
588 Secondary.getStats(Str);
589 Quarantine.getStats(Str);
590 return Str->length();
591 }
592};
593
594} // namespace scudo
595
596#endif // SCUDO_COMBINED_H_

/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h

1//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_SIZE_CLASS_MAP_H_
10#define SCUDO_SIZE_CLASS_MAP_H_
11
12#include "common.h"
13#include "string_utils.h"
14
15namespace scudo {
16
17// SizeClassMap maps allocation sizes into size classes and back, in an
18// efficient table-free manner.
19//
20// Class 0 is a special class that doesn't abide by the same rules as other
21// classes. The allocator uses it to hold batches.
22//
23// The other sizes are controlled by the template parameters:
24// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
25// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
26// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
27// 2^MidSizeLog bytes.
28// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
29// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
30// 0b1xx0..0 (where x is either 0 or 1).
31//
32// This class also gives a hint to a thread-caching allocator about the amount
33// of chunks that can be cached per-thread:
34// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
35// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
36
37template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
38 u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
39class SizeClassMap {
40 static const uptr MinSize = 1UL << MinSizeLog;
41 static const uptr MidSize = 1UL << MidSizeLog;
42 static const uptr MidClass = MidSize / MinSize;
43 static const u8 S = NumBits - 1;
44 static const uptr M = (1UL << S) - 1;
45
46public:
47 static const u32 MaxNumCachedHint = MaxNumCachedHintT;
48
49 static const uptr MaxSize = 1UL << MaxSizeLog;
50 static const uptr NumClasses =
51 MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
52 COMPILER_CHECK(NumClasses <= 256)static_assert(NumClasses <= 256, "");
53 static const uptr LargestClassId = NumClasses - 1;
54 static const uptr BatchClassId = 0;
55
56 static uptr getSizeByClassId(uptr ClassId) {
57 DCHECK_NE(ClassId, BatchClassId);
58 if (ClassId <= MidClass)
59 return ClassId << MinSizeLog;
60 ClassId -= MidClass;
61 const uptr T = MidSize << (ClassId >> S);
62 return T + (T >> S) * (ClassId & M);
63 }
64
65 static uptr getClassIdBySize(uptr Size) {
66 DCHECK_LE(Size, MaxSize);
67 if (Size <= MidSize)
11
Assuming 'Size' is <= 'MidSize'
12
Taking true branch
68 return (Size + MinSize - 1) >> MinSizeLog;
13
Returning value, which participates in a condition later
69 const uptr L = getMostSignificantSetBitIndex(Size);
70 const uptr HBits = (Size >> (L - S)) & M;
71 const uptr LBits = Size & ((1UL << (L - S)) - 1);
72 const uptr L1 = L - MidSizeLog;
73 return MidClass + (L1 << S) + HBits + (LBits > 0);
74 }
75
76 static u32 getMaxCachedHint(uptr Size) {
77 DCHECK_LE(Size, MaxSize);
78 DCHECK_NE(Size, 0);
79 u32 N;
80 // Force a 32-bit division if the template parameters allow for it.
81 if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
82 N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
83 else
84 N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
85 return Max(1U, Min(MaxNumCachedHint, N));
86 }
87
88 static void print() {
89 ScopedString Buffer(1024);
90 uptr PrevS = 0;
91 uptr TotalCached = 0;
92 for (uptr I = 0; I < NumClasses; I++) {
93 if (I == BatchClassId)
94 continue;
95 const uptr S = getSizeByClassId(I);
96 if (S >= MidSize / 2 && (S & (S - 1)) == 0)
97 Buffer.append("\n");
98 const uptr D = S - PrevS;
99 const uptr P = PrevS ? (D * 100 / PrevS) : 0;
100 const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
101 const uptr Cached = getMaxCachedHint(S) * S;
102 Buffer.append(
103 "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
104 I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
105 getClassIdBySize(S));
106 TotalCached += Cached;
107 PrevS = S;
108 }
109 Buffer.append("Total Cached: %zu\n", TotalCached);
110 Buffer.output();
111 }
112
113 static void validate() {
114 for (uptr C = 0; C < NumClasses; C++) {
115 if (C == BatchClassId)
116 continue;
117 const uptr S = getSizeByClassId(C);
118 CHECK_NE(S, 0U)do { u64 V1 = (u64)((S)); u64 V2 = (u64)((0U)); if (__builtin_expect
(!!(!(V1 != V2)), 0)) { reportCheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 118, "(" "(S)" ") " "!=" " (" "(0U)" ")", V1, V2); die(); }
} while (false)
;
119 CHECK_EQ(getClassIdBySize(S), C)do { u64 V1 = (u64)((getClassIdBySize(S))); u64 V2 = (u64)((C
)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 119, "(" "(getClassIdBySize(S))" ") " "==" " (" "(C)" ")", V1
, V2); die(); } } while (false)
;
120 if (C < LargestClassId)
121 CHECK_EQ(getClassIdBySize(S + 1), C + 1)do { u64 V1 = (u64)((getClassIdBySize(S + 1))); u64 V2 = (u64
)((C + 1)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 121, "(" "(getClassIdBySize(S + 1))" ") " "==" " (" "(C + 1)"
")", V1, V2); die(); } } while (false)
;
122 CHECK_EQ(getClassIdBySize(S - 1), C)do { u64 V1 = (u64)((getClassIdBySize(S - 1))); u64 V2 = (u64
)((C)); if (__builtin_expect(!!(!(V1 == V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 122, "(" "(getClassIdBySize(S - 1))" ") " "==" " (" "(C)" ")"
, V1, V2); die(); } } while (false)
;
123 CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1))do { u64 V1 = (u64)((getSizeByClassId(C))); u64 V2 = (u64)((getSizeByClassId
(C - 1))); if (__builtin_expect(!!(!(V1 > V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 123, "(" "(getSizeByClassId(C))" ") " ">" " (" "(getSizeByClassId(C - 1))"
")", V1, V2); die(); } } while (false)
;
124 }
125 // Do not perform the loop if the maximum size is too large.
126 if (MaxSizeLog > 19)
127 return;
128 for (uptr S = 1; S <= MaxSize; S++) {
129 const uptr C = getClassIdBySize(S);
130 CHECK_LT(C, NumClasses)do { u64 V1 = (u64)((C)); u64 V2 = (u64)((NumClasses)); if (__builtin_expect
(!!(!(V1 < V2)), 0)) { reportCheckFailed("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 130, "(" "(C)" ") " "<" " (" "(NumClasses)" ")", V1, V2)
; die(); } } while (false)
;
131 CHECK_GE(getSizeByClassId(C), S)do { u64 V1 = (u64)((getSizeByClassId(C))); u64 V2 = (u64)((S
)); if (__builtin_expect(!!(!(V1 >= V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 131, "(" "(getSizeByClassId(C))" ") " ">=" " (" "(S)" ")"
, V1, V2); die(); } } while (false)
;
132 if (C > 0)
133 CHECK_LT(getSizeByClassId(C - 1), S)do { u64 V1 = (u64)((getSizeByClassId(C - 1))); u64 V2 = (u64
)((S)); if (__builtin_expect(!!(!(V1 < V2)), 0)) { reportCheckFailed
("/build/llvm-toolchain-snapshot-10~svn374877/projects/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 133, "(" "(getSizeByClassId(C - 1))" ") " "<" " (" "(S)"
")", V1, V2); die(); } } while (false)
;
134 }
135 }
136};
137
138typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
139
140// TODO(kostyak): further tune class maps for Android & Fuchsia.
141#if SCUDO_WORDSIZE32U == 64U
142typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
143typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
144#else
145typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
146typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
147#endif
148
149} // namespace scudo
150
151#endif // SCUDO_SIZE_CLASS_MAP_H_