Bug Summary

File:compiler-rt/lib/scudo/standalone/combined.h
Warning:line 278, column 59
The left operand of '-' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple i386-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name wrappers_cpp.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -ffreestanding -target-cpu i686 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/projects/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/../.. -U NDEBUG -D GWP_ASAN_HOOKS -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0/32 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/i386-pc-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/projects/compiler-rt/lib/scudo/standalone -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fcxx-exceptions -fexceptions -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp

1//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "platform.h"
10
11// Skip this compilation unit if compiled as part of Bionic.
12#if !SCUDO_ANDROID0 || !_BIONIC
13
14#include "allocator_config.h"
15
16#include <stdint.h>
17
18extern scudo::Allocator<scudo::Config> *AllocatorPtr;
19
20namespace std {
21struct nothrow_t {};
22enum class align_val_t : size_t {};
23} // namespace std
24
25INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size) {
26 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
27}
28INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size) {
29 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
30}
31INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size,
32 std::nothrow_t const &) NOEXCEPTnoexcept {
33 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
34}
35INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size,
36 std::nothrow_t const &) NOEXCEPTnoexcept {
37 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
1
Calling 'Allocator::allocate'
38}
39INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align) {
40 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
41 static_cast<scudo::uptr>(align));
42}
43INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align) {
44 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
45 static_cast<scudo::uptr>(align));
46}
47INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new(size_t size, std::align_val_t align,
48 std::nothrow_t const &) NOEXCEPTnoexcept {
49 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
50 static_cast<scudo::uptr>(align));
51}
52INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void *operator new[](size_t size, std::align_val_t align,
53 std::nothrow_t const &) NOEXCEPTnoexcept {
54 return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
55 static_cast<scudo::uptr>(align));
56}
57
58INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr)NOEXCEPTnoexcept {
59 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
60}
61INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr) NOEXCEPTnoexcept {
62 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
63}
64INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPTnoexcept {
65 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
66}
67INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
68 std::nothrow_t const &) NOEXCEPTnoexcept {
69 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
70}
71INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size)NOEXCEPTnoexcept {
72 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
73}
74INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size) NOEXCEPTnoexcept {
75 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
76}
77INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align)NOEXCEPTnoexcept {
78 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
79 static_cast<scudo::uptr>(align));
80}
81INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr,
82 std::align_val_t align) NOEXCEPTnoexcept {
83 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
84 static_cast<scudo::uptr>(align));
85}
86INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, std::align_val_t align,
87 std::nothrow_t const &)NOEXCEPTnoexcept {
88 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
89 static_cast<scudo::uptr>(align));
90}
91INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, std::align_val_t align,
92 std::nothrow_t const &) NOEXCEPTnoexcept {
93 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
94 static_cast<scudo::uptr>(align));
95}
96INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete(void *ptr, size_t size,
97 std::align_val_t align)NOEXCEPTnoexcept {
98 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
99 static_cast<scudo::uptr>(align));
100}
101INTERFACE__attribute__((visibility("default"))) WEAK__attribute__((weak)) void operator delete[](void *ptr, size_t size,
102 std::align_val_t align) NOEXCEPTnoexcept {
103 AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
104 static_cast<scudo::uptr>(align));
105}
106
107#endif // !SCUDO_ANDROID || !_BIONIC

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/combined.h

1//===-- combined.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_COMBINED_H_
10#define SCUDO_COMBINED_H_
11
12#include "chunk.h"
13#include "common.h"
14#include "flags.h"
15#include "flags_parser.h"
16#include "interface.h"
17#include "local_cache.h"
18#include "quarantine.h"
19#include "report.h"
20#include "secondary.h"
21#include "string_utils.h"
22#include "tsd.h"
23
24#ifdef GWP_ASAN_HOOKS1
25#include "gwp_asan/guarded_pool_allocator.h"
26// GWP-ASan is declared here in order to avoid indirect call overhead. It's also
27// instantiated outside of the Allocator class, as the allocator is only
28// zero-initialised. GWP-ASan requires constant initialisation, and the Scudo
29// allocator doesn't have a constexpr constructor (see discussion here:
30// https://reviews.llvm.org/D69265#inline-624315).
31static gwp_asan::GuardedPoolAllocator GuardedAlloc;
32#endif // GWP_ASAN_HOOKS
33
34namespace scudo {
35
36template <class Params> class Allocator {
37public:
38 using PrimaryT = typename Params::Primary;
39 using CacheT = typename PrimaryT::CacheT;
40 typedef Allocator<Params> ThisT;
41 typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
42
43 struct QuarantineCallback {
44 explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
45 : Allocator(Instance), Cache(LocalCache) {}
46
47 // Chunk recycling function, returns a quarantined chunk to the backend,
48 // first making sure it hasn't been tampered with.
49 void recycle(void *Ptr) {
50 Chunk::UnpackedHeader Header;
51 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
52 if (UNLIKELY(Header.State != Chunk::State::Quarantined)__builtin_expect(!!(Header.State != Chunk::State::Quarantined
), 0)
)
53 reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
54
55 Chunk::UnpackedHeader NewHeader = Header;
56 NewHeader.State = Chunk::State::Available;
57 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
58
59 void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
60 const uptr ClassId = NewHeader.ClassId;
61 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1))
62 Cache.deallocate(ClassId, BlockBegin);
63 else
64 Allocator.Secondary.deallocate(BlockBegin);
65 }
66
67 // We take a shortcut when allocating a quarantine batch by working with the
68 // appropriate class ID instead of using Size. The compiler should optimize
69 // the class ID computation and work with the associated cache directly.
70 void *allocate(UNUSED__attribute__((unused)) uptr Size) {
71 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
72 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
73 void *Ptr = Cache.allocate(QuarantineClassId);
74 // Quarantine batch allocation failure is fatal.
75 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
76 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
77
78 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
79 Chunk::getHeaderSize());
80 Chunk::UnpackedHeader Header = {};
81 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
82 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
83 Header.State = Chunk::State::Allocated;
84 Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
85
86 return Ptr;
87 }
88
89 void deallocate(void *Ptr) {
90 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
91 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
92 Chunk::UnpackedHeader Header;
93 Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
94
95 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
96 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
97 DCHECK_EQ(Header.ClassId, QuarantineClassId);
98 DCHECK_EQ(Header.Offset, 0);
99 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
100
101 Chunk::UnpackedHeader NewHeader = Header;
102 NewHeader.State = Chunk::State::Available;
103 Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
104 Cache.deallocate(QuarantineClassId,
105 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
106 Chunk::getHeaderSize()));
107 }
108
109 private:
110 ThisT &Allocator;
111 CacheT &Cache;
112 };
113
114 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
115 typedef typename QuarantineT::CacheT QuarantineCacheT;
116
117 void initLinkerInitialized() {
118 performSanityChecks();
119
120 // Check if hardware CRC32 is supported in the binary and by the platform,
121 // if so, opt for the CRC32 hardware version of the checksum.
122 if (&computeHardwareCRC32 && hasHardwareCRC32())
123 HashAlgorithm = Checksum::HardwareCRC32;
124
125 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie)))__builtin_expect(!!(!getRandom(&Cookie, sizeof(Cookie))),
0)
)
126 Cookie = static_cast<u32>(getMonotonicTime() ^
127 (reinterpret_cast<uptr>(this) >> 4));
128
129 initFlags();
130 reportUnrecognizedFlags();
131
132 // Store some flags locally.
133 Options.MayReturnNull = getFlags()->may_return_null;
134 Options.ZeroContents = getFlags()->zero_contents;
135 Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
136 Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
137 Options.QuarantineMaxChunkSize =
138 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
139
140 Stats.initLinkerInitialized();
141 Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
142 Secondary.initLinkerInitialized(&Stats);
143
144 Quarantine.init(
145 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
146 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
147
148#ifdef GWP_ASAN_HOOKS1
149 gwp_asan::options::Options Opt;
150 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
151 // Bear in mind - Scudo has its own alignment guarantees that are strictly
152 // enforced. Scudo exposes the same allocation function for everything from
153 // malloc() to posix_memalign, so in general this flag goes unused, as Scudo
154 // will always ask GWP-ASan for an aligned amount of bytes.
155 Opt.PerfectlyRightAlign = getFlags()->GWP_ASAN_PerfectlyRightAlign;
156 Opt.MaxSimultaneousAllocations =
157 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
158 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
159 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
160 Opt.Printf = Printf;
161 GuardedAlloc.init(Opt);
162#endif // GWP_ASAN_HOOKS
163 }
164
165 void reset() { memset(this, 0, sizeof(*this)); }
166
167 void unmapTestOnly() {
168 TSDRegistry.unmapTestOnly();
169 Primary.unmapTestOnly();
170 }
171
172 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
173
174 // The Cache must be provided zero-initialized.
175 void initCache(CacheT *Cache) {
176 Cache->initLinkerInitialized(&Stats, &Primary);
177 }
178
179 // Release the resources used by a TSD, which involves:
180 // - draining the local quarantine cache to the global quarantine;
181 // - releasing the cached pointers back to the Primary;
182 // - unlinking the local stats from the global ones (destroying the cache does
183 // the last two items).
184 void commitBack(TSD<ThisT> *TSD) {
185 Quarantine.drain(&TSD->QuarantineCache,
186 QuarantineCallback(*this, TSD->Cache));
187 TSD->Cache.destroy(&Stats);
188 }
189
190 NOINLINE__attribute__((noinline)) void *allocate(uptr Size, Chunk::Origin Origin,
191 uptr Alignment = MinAlignment,
192 bool ZeroContents = false) {
193 initThreadMaybe();
194
195#ifdef GWP_ASAN_HOOKS1
196 if (UNLIKELY(GuardedAlloc.shouldSample())__builtin_expect(!!(GuardedAlloc.shouldSample()), 0)) {
2
Taking false branch
197 if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment)))
198 return Ptr;
199 }
200#endif // GWP_ASAN_HOOKS
201
202 ZeroContents |= static_cast<bool>(Options.ZeroContents);
203
204 if (UNLIKELY(Alignment > MaxAlignment)__builtin_expect(!!(Alignment > MaxAlignment), 0)) {
3
Taking false branch
205 if (Options.MayReturnNull)
206 return nullptr;
207 reportAlignmentTooBig(Alignment, MaxAlignment);
208 }
209 if (Alignment
3.1
'Alignment' is >= 'MinAlignment'
3.1
'Alignment' is >= 'MinAlignment'
3.1
'Alignment' is >= 'MinAlignment'
< MinAlignment)
4
Taking false branch
210 Alignment = MinAlignment;
211
212 // If the requested size happens to be 0 (more common than you might think),
213 // allocate MinAlignment bytes on top of the header. Then add the extra
214 // bytes required to fulfill the alignment requirements: we allocate enough
215 // to be sure that there will be an address in the block that will satisfy
216 // the alignment.
217 const uptr NeededSize =
218 roundUpTo(Size, MinAlignment) +
219 ((Alignment
4.1
'Alignment' is <= 'MinAlignment'
4.1
'Alignment' is <= 'MinAlignment'
4.1
'Alignment' is <= 'MinAlignment'
> MinAlignment) ? Alignment : Chunk::getHeaderSize());
5
'?' condition is false
220
221 // Takes care of extravagantly large sizes as well as integer overflows.
222 static_assert(MaxAllowedMallocSize < UINTPTR_MAX4294967295U - MaxAlignment, "");
223 if (UNLIKELY(Size >= MaxAllowedMallocSize)__builtin_expect(!!(Size >= MaxAllowedMallocSize), 0)) {
6
Assuming 'Size' is < 'MaxAllowedMallocSize'
7
Taking false branch
224 if (Options.MayReturnNull)
225 return nullptr;
226 reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
227 }
228 DCHECK_LE(Size, NeededSize);
229
230 void *Block;
231 uptr ClassId;
232 uptr BlockEnd;
8
'BlockEnd' declared without an initial value
233 if (LIKELY(PrimaryT::canAllocate(NeededSize))__builtin_expect(!!(PrimaryT::canAllocate(NeededSize)), 1)) {
9
Taking true branch
234 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
10
Calling 'SizeClassMap::getClassIdBySize'
14
Returning from 'SizeClassMap::getClassIdBySize'
235 DCHECK_NE(ClassId, 0U);
236 bool UnlockRequired;
237 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
238 Block = TSD->Cache.allocate(ClassId);
239 if (UnlockRequired
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
14.1
'UnlockRequired' is false
)
15
Taking false branch
240 TSD->unlock();
241 } else {
242 ClassId = 0;
243 Block =
244 Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents);
245 }
246
247 if (UNLIKELY(!Block)__builtin_expect(!!(!Block), 0)) {
16
Assuming 'Block' is non-null
17
Taking false branch
248 if (Options.MayReturnNull)
249 return nullptr;
250 reportOutOfMemory(NeededSize);
251 }
252
253 // We only need to zero the contents for Primary backed allocations. This
254 // condition is not necessarily unlikely, but since memset is costly, we
255 // might as well mark it as such.
256 if (UNLIKELY(ZeroContents && ClassId)__builtin_expect(!!(ZeroContents && ClassId), 0))
18
Assuming 'ZeroContents' is false
19
Taking false branch
257 memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
258
259 const uptr UnalignedUserPtr =
260 reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
261 const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
262
263 Chunk::UnpackedHeader Header = {};
264 if (UNLIKELY(UnalignedUserPtr != UserPtr)__builtin_expect(!!(UnalignedUserPtr != UserPtr), 0)) {
20
Assuming 'UnalignedUserPtr' is equal to 'UserPtr'
21
Taking false branch
265 const uptr Offset = UserPtr - UnalignedUserPtr;
266 DCHECK_GE(Offset, 2 * sizeof(u32));
267 // The BlockMarker has no security purpose, but is specifically meant for
268 // the chunk iteration function that can be used in debugging situations.
269 // It is the only situation where we have to locate the start of a chunk
270 // based on its block address.
271 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
272 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
273 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
274 }
275 Header.ClassId = ClassId & Chunk::ClassIdMask;
276 Header.State = Chunk::State::Allocated;
277 Header.Origin = Origin & Chunk::OriginMask;
278 Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
22
Assuming 'ClassId' is 0
23
'?' condition is false
24
The left operand of '-' is a garbage value
279 Chunk::SizeOrUnusedBytesMask;
280 void *Ptr = reinterpret_cast<void *>(UserPtr);
281 Chunk::storeHeader(Cookie, Ptr, &Header);
282
283 if (&__scudo_allocate_hook)
284 __scudo_allocate_hook(Ptr, Size);
285
286 return Ptr;
287 }
288
289 NOINLINE__attribute__((noinline)) void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
290 UNUSED__attribute__((unused)) uptr Alignment = MinAlignment) {
291 // For a deallocation, we only ensure minimal initialization, meaning thread
292 // local data will be left uninitialized for now (when using ELF TLS). The
293 // fallback cache will be used instead. This is a workaround for a situation
294 // where the only heap operation performed in a thread would be a free past
295 // the TLS destructors, ending up in initialized thread specific data never
296 // being destroyed properly. Any other heap operation will do a full init.
297 initThreadMaybe(/*MinimalInit=*/true);
298
299#ifdef GWP_ASAN_HOOKS1
300 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0)) {
301 GuardedAlloc.deallocate(Ptr);
302 return;
303 }
304#endif // GWP_ASAN_HOOKS
305
306 if (&__scudo_deallocate_hook)
307 __scudo_deallocate_hook(Ptr);
308
309 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
310 return;
311 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(Ptr
), MinAlignment)), 0)
)
312 reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
313
314 Chunk::UnpackedHeader Header;
315 Chunk::loadHeader(Cookie, Ptr, &Header);
316
317 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
318 reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
319 if (Options.DeallocTypeMismatch) {
320 if (Header.Origin != Origin) {
321 // With the exception of memalign'd chunks, that can be still be free'd.
322 if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
323 Origin != Chunk::Origin::Malloc)__builtin_expect(!!(Header.Origin != Chunk::Origin::Memalign ||
Origin != Chunk::Origin::Malloc), 0)
)
324 reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
325 Header.Origin, Origin);
326 }
327 }
328
329 const uptr Size = getSize(Ptr, &Header);
330 if (DeleteSize && Options.DeleteSizeMismatch) {
331 if (UNLIKELY(DeleteSize != Size)__builtin_expect(!!(DeleteSize != Size), 0))
332 reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
333 }
334
335 quarantineOrDeallocateChunk(Ptr, &Header, Size);
336 }
337
338 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
339 initThreadMaybe();
340
341 // The following cases are handled by the C wrappers.
342 DCHECK_NE(OldPtr, nullptr);
343 DCHECK_NE(NewSize, 0);
344
345#ifdef GWP_ASAN_HOOKS1
346 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(OldPtr)), 0)) {
347 uptr OldSize = GuardedAlloc.getSize(OldPtr);
348 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
349 if (NewPtr)
350 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
351 GuardedAlloc.deallocate(OldPtr);
352 return NewPtr;
353 }
354#endif // GWP_ASAN_HOOKS
355
356 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment))__builtin_expect(!!(!isAligned(reinterpret_cast<uptr>(OldPtr
), MinAlignment)), 0)
)
357 reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
358
359 Chunk::UnpackedHeader OldHeader;
360 Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
361
362 if (UNLIKELY(OldHeader.State != Chunk::State::Allocated)__builtin_expect(!!(OldHeader.State != Chunk::State::Allocated
), 0)
)
363 reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
364
365 // Pointer has to be allocated with a malloc-type function. Some
366 // applications think that it is OK to realloc a memalign'ed pointer, which
367 // will trigger this check. It really isn't.
368 if (Options.DeallocTypeMismatch) {
369 if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc)__builtin_expect(!!(OldHeader.Origin != Chunk::Origin::Malloc
), 0)
)
370 reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
371 OldHeader.Origin, Chunk::Origin::Malloc);
372 }
373
374 void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
375 uptr BlockEnd;
376 uptr OldSize;
377 const uptr ClassId = OldHeader.ClassId;
378 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
379 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
380 SizeClassMap::getSizeByClassId(ClassId);
381 OldSize = OldHeader.SizeOrUnusedBytes;
382 } else {
383 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
384 OldSize = BlockEnd -
385 (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
386 }
387 // If the new chunk still fits in the previously allocated block (with a
388 // reasonable delta), we just keep the old block, and update the chunk
389 // header to reflect the size change.
390 if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
391 const uptr Delta =
392 OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
393 if (Delta <= SizeClassMap::MaxSize / 2) {
394 Chunk::UnpackedHeader NewHeader = OldHeader;
395 NewHeader.SizeOrUnusedBytes =
396 (ClassId ? NewSize
397 : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
398 Chunk::SizeOrUnusedBytesMask;
399 Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
400 return OldPtr;
401 }
402 }
403
404 // Otherwise we allocate a new one, and deallocate the old one. Some
405 // allocators will allocate an even larger chunk (by a fixed factor) to
406 // allow for potential further in-place realloc. The gains of such a trick
407 // are currently unclear.
408 void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
409 if (NewPtr) {
410 const uptr OldSize = getSize(OldPtr, &OldHeader);
411 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
412 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
413 }
414 return NewPtr;
415 }
416
417 // TODO(kostyak): disable() is currently best-effort. There are some small
418 // windows of time when an allocation could still succeed after
419 // this function finishes. We will revisit that later.
420 void disable() {
421 initThreadMaybe();
422 TSDRegistry.disable();
423 Secondary.disable();
424 }
425
426 void enable() {
427 initThreadMaybe();
428 Secondary.enable();
429 TSDRegistry.enable();
430 }
431
432 // The function returns the amount of bytes required to store the statistics,
433 // which might be larger than the amount of bytes provided. Note that the
434 // statistics buffer is not necessarily constant between calls to this
435 // function. This can be called with a null buffer or zero size for buffer
436 // sizing purposes.
437 uptr getStats(char *Buffer, uptr Size) {
438 ScopedString Str(1024);
439 disable();
440 const uptr Length = getStats(&Str) + 1;
441 enable();
442 if (Length < Size)
443 Size = Length;
444 if (Buffer && Size) {
445 memcpy(Buffer, Str.data(), Size);
446 Buffer[Size - 1] = '\0';
447 }
448 return Length;
449 }
450
451 void printStats() {
452 ScopedString Str(1024);
453 disable();
454 getStats(&Str);
455 enable();
456 Str.output();
457 }
458
459 void releaseToOS() {
460 initThreadMaybe();
461 Primary.releaseToOS();
462 }
463
464 // Iterate over all chunks and call a callback for all busy chunks located
465 // within the provided memory range. Said callback must not use this allocator
466 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
467 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
468 void *Arg) {
469 initThreadMaybe();
470 const uptr From = Base;
471 const uptr To = Base + Size;
472 auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
473 if (Block < From || Block >= To)
474 return;
475 uptr Chunk;
476 Chunk::UnpackedHeader Header;
477 if (getChunkFromBlock(Block, &Chunk, &Header) &&
478 Header.State == Chunk::State::Allocated)
479 Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg);
480 };
481 Primary.iterateOverBlocks(Lambda);
482 Secondary.iterateOverBlocks(Lambda);
483 }
484
485 bool canReturnNull() {
486 initThreadMaybe();
487 return Options.MayReturnNull;
488 }
489
490 // TODO(kostyak): implement this as a "backend" to mallopt.
491 bool setOption(UNUSED__attribute__((unused)) uptr Option, UNUSED__attribute__((unused)) uptr Value) { return false; }
492
493 // Return the usable size for a given chunk. Technically we lie, as we just
494 // report the actual size of a chunk. This is done to counteract code actively
495 // writing past the end of a chunk (like sqlite3) when the usable size allows
496 // for it, which then forces realloc to copy the usable size of a chunk as
497 // opposed to its actual size.
498 uptr getUsableSize(const void *Ptr) {
499 initThreadMaybe();
500 if (UNLIKELY(!Ptr)__builtin_expect(!!(!Ptr), 0))
501 return 0;
502
503#ifdef GWP_ASAN_HOOKS1
504 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))__builtin_expect(!!(GuardedAlloc.pointerIsMine(Ptr)), 0))
505 return GuardedAlloc.getSize(Ptr);
506#endif // GWP_ASAN_HOOKS
507
508 Chunk::UnpackedHeader Header;
509 Chunk::loadHeader(Cookie, Ptr, &Header);
510 // Getting the usable size of a chunk only makes sense if it's allocated.
511 if (UNLIKELY(Header.State != Chunk::State::Allocated)__builtin_expect(!!(Header.State != Chunk::State::Allocated),
0)
)
512 reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
513 return getSize(Ptr, &Header);
514 }
515
516 void getStats(StatCounters S) {
517 initThreadMaybe();
518 Stats.get(S);
519 }
520
521 // Returns true if the pointer provided was allocated by the current
522 // allocator instance, which is compliant with tcmalloc's ownership concept.
523 // A corrupted chunk will not be reported as owned, which is WAI.
524 bool isOwned(const void *Ptr) {
525 initThreadMaybe();
526#ifdef GWP_ASAN_HOOKS1
527 if (GuardedAlloc.pointerIsMine(Ptr))
528 return true;
529#endif // GWP_ASAN_HOOKS
530 if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
531 return false;
532 Chunk::UnpackedHeader Header;
533 return Chunk::isValid(Cookie, Ptr, &Header) &&
534 Header.State == Chunk::State::Allocated;
535 }
536
537private:
538 using SecondaryT = typename Params::Secondary;
539 typedef typename PrimaryT::SizeClassMap SizeClassMap;
540
541 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG(3);
542 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
543 static const uptr MinAlignment = 1UL << MinAlignmentLog;
544 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
545 static const uptr MaxAllowedMallocSize =
546 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40)(1UL << 31);
547
548 static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
549 "Minimal alignment must at least cover a chunk header.");
550
551 static const u32 BlockMarker = 0x44554353U;
552
553 GlobalStats Stats;
554 TSDRegistryT TSDRegistry;
555 PrimaryT Primary;
556 SecondaryT Secondary;
557 QuarantineT Quarantine;
558
559 u32 Cookie;
560
561 struct {
562 u8 MayReturnNull : 1; // may_return_null
563 u8 ZeroContents : 1; // zero_contents
564 u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
565 u8 DeleteSizeMismatch : 1; // delete_size_mismatch
566 u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
567 } Options;
568
569 // The following might get optimized out by the compiler.
570 NOINLINE__attribute__((noinline)) void performSanityChecks() {
571 // Verify that the header offset field can hold the maximum offset. In the
572 // case of the Secondary allocator, it takes care of alignment and the
573 // offset will always be small. In the case of the Primary, the worst case
574 // scenario happens in the last size class, when the backend allocation
575 // would already be aligned on the requested alignment, which would happen
576 // to be the maximum alignment that would fit in that size class. As a
577 // result, the maximum offset will be at most the maximum alignment for the
578 // last size class minus the header size, in multiples of MinAlignment.
579 Chunk::UnpackedHeader Header = {};
580 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
581 SizeClassMap::MaxSize - MinAlignment);
582 const uptr MaxOffset =
583 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
584 Header.Offset = MaxOffset & Chunk::OffsetMask;
585 if (UNLIKELY(Header.Offset != MaxOffset)__builtin_expect(!!(Header.Offset != MaxOffset), 0))
586 reportSanityCheckError("offset");
587
588 // Verify that we can fit the maximum size or amount of unused bytes in the
589 // header. Given that the Secondary fits the allocation to a page, the worst
590 // case scenario happens in the Primary. It will depend on the second to
591 // last and last class sizes, as well as the dynamic base for the Primary.
592 // The following is an over-approximation that works for our needs.
593 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
594 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
595 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)__builtin_expect(!!(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes
), 0)
)
596 reportSanityCheckError("size (or unused bytes)");
597
598 const uptr LargestClassId = SizeClassMap::LargestClassId;
599 Header.ClassId = LargestClassId;
600 if (UNLIKELY(Header.ClassId != LargestClassId)__builtin_expect(!!(Header.ClassId != LargestClassId), 0))
601 reportSanityCheckError("class ID");
602 }
603
604 static inline void *getBlockBegin(const void *Ptr,
605 Chunk::UnpackedHeader *Header) {
606 return reinterpret_cast<void *>(
607 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
608 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
609 }
610
611 // Return the size of a chunk as requested during its allocation.
612 inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
613 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
614 if (LIKELY(Header->ClassId)__builtin_expect(!!(Header->ClassId), 1))
615 return SizeOrUnusedBytes;
616 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
617 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
618 }
619
620 ALWAYS_INLINEinline __attribute__((always_inline)) void initThreadMaybe(bool MinimalInit = false) {
621 TSDRegistry.initThreadMaybe(this, MinimalInit);
622 }
623
624 void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
625 uptr Size) {
626 Chunk::UnpackedHeader NewHeader = *Header;
627 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
628 // than the maximum allowed, we return a chunk directly to the backend.
629 // Logical Or can be short-circuited, which introduces unnecessary
630 // conditional jumps, so use bitwise Or and let the compiler be clever.
631 const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
632 (Size > Options.QuarantineMaxChunkSize);
633 if (BypassQuarantine) {
634 NewHeader.State = Chunk::State::Available;
635 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
636 void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
637 const uptr ClassId = NewHeader.ClassId;
638 if (LIKELY(ClassId)__builtin_expect(!!(ClassId), 1)) {
639 bool UnlockRequired;
640 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
641 TSD->Cache.deallocate(ClassId, BlockBegin);
642 if (UnlockRequired)
643 TSD->unlock();
644 } else {
645 Secondary.deallocate(BlockBegin);
646 }
647 } else {
648 NewHeader.State = Chunk::State::Quarantined;
649 Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
650 bool UnlockRequired;
651 auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
652 Quarantine.put(&TSD->QuarantineCache,
653 QuarantineCallback(*this, TSD->Cache), Ptr, Size);
654 if (UnlockRequired)
655 TSD->unlock();
656 }
657 }
658
659 bool getChunkFromBlock(uptr Block, uptr *Chunk,
660 Chunk::UnpackedHeader *Header) {
661 u32 Offset = 0;
662 if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
663 Offset = reinterpret_cast<u32 *>(Block)[1];
664 *Chunk = Block + Offset + Chunk::getHeaderSize();
665 return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
666 }
667
668 uptr getStats(ScopedString *Str) {
669 Primary.getStats(Str);
670 Secondary.getStats(Str);
671 Quarantine.getStats(Str);
672 return Str->length();
673 }
674};
675
676} // namespace scudo
677
678#endif // SCUDO_COMBINED_H_

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h

1//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_SIZE_CLASS_MAP_H_
10#define SCUDO_SIZE_CLASS_MAP_H_
11
12#include "common.h"
13#include "string_utils.h"
14
15namespace scudo {
16
17// SizeClassMap maps allocation sizes into size classes and back, in an
18// efficient table-free manner.
19//
20// Class 0 is a special class that doesn't abide by the same rules as other
21// classes. The allocator uses it to hold batches.
22//
23// The other sizes are controlled by the template parameters:
24// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
25// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
26// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
27// 2^MidSizeLog bytes.
28// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
29// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
30// 0b1xx0..0 (where x is either 0 or 1).
31//
32// This class also gives a hint to a thread-caching allocator about the amount
33// of chunks that can be cached per-thread:
34// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
35// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
36
37template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
38 u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
39class SizeClassMap {
40 static const uptr MinSize = 1UL << MinSizeLog;
41 static const uptr MidSize = 1UL << MidSizeLog;
42 static const uptr MidClass = MidSize / MinSize;
43 static const u8 S = NumBits - 1;
44 static const uptr M = (1UL << S) - 1;
45
46public:
47 static const u32 MaxNumCachedHint = MaxNumCachedHintT;
48
49 static const uptr MaxSize = 1UL << MaxSizeLog;
50 static const uptr NumClasses =
51 MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
52 static_assert(NumClasses <= 256, "");
53 static const uptr LargestClassId = NumClasses - 1;
54 static const uptr BatchClassId = 0;
55
56 static uptr getSizeByClassId(uptr ClassId) {
57 DCHECK_NE(ClassId, BatchClassId);
58 if (ClassId <= MidClass)
59 return ClassId << MinSizeLog;
60 ClassId -= MidClass;
61 const uptr T = MidSize << (ClassId >> S);
62 return T + (T >> S) * (ClassId & M);
63 }
64
65 static uptr getClassIdBySize(uptr Size) {
66 DCHECK_LE(Size, MaxSize);
67 if (Size <= MidSize)
11
Assuming 'Size' is <= 'MidSize'
12
Taking true branch
68 return (Size + MinSize - 1) >> MinSizeLog;
13
Returning value, which participates in a condition later
69 const uptr L = getMostSignificantSetBitIndex(Size);
70 const uptr HBits = (Size >> (L - S)) & M;
71 const uptr LBits = Size & ((1UL << (L - S)) - 1);
72 const uptr L1 = L - MidSizeLog;
73 return MidClass + (L1 << S) + HBits + (LBits > 0);
74 }
75
76 static u32 getMaxCachedHint(uptr Size) {
77 DCHECK_LE(Size, MaxSize);
78 DCHECK_NE(Size, 0);
79 u32 N;
80 // Force a 32-bit division if the template parameters allow for it.
81 if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
82 N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
83 else
84 N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
85 return Max(1U, Min(MaxNumCachedHint, N));
86 }
87
88 static void print() {
89 ScopedString Buffer(1024);
90 uptr PrevS = 0;
91 uptr TotalCached = 0;
92 for (uptr I = 0; I < NumClasses; I++) {
93 if (I == BatchClassId)
94 continue;
95 const uptr S = getSizeByClassId(I);
96 if (S >= MidSize / 2 && (S & (S - 1)) == 0)
97 Buffer.append("\n");
98 const uptr D = S - PrevS;
99 const uptr P = PrevS ? (D * 100 / PrevS) : 0;
100 const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
101 const uptr Cached = getMaxCachedHint(S) * S;
102 Buffer.append(
103 "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
104 I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
105 getClassIdBySize(S));
106 TotalCached += Cached;
107 PrevS = S;
108 }
109 Buffer.append("Total Cached: %zu\n", TotalCached);
110 Buffer.output();
111 }
112
113 static void validate() {
114 for (uptr C = 0; C < NumClasses; C++) {
115 if (C == BatchClassId)
116 continue;
117 const uptr S = getSizeByClassId(C);
118 CHECK_NE(S, 0U)do { scudo::u64 V1 = (scudo::u64)((S)); scudo::u64 V2 = (scudo
::u64)((0U)); if (__builtin_expect(!!(!(V1 != V2)), 0)) { scudo
::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 118, "(" "(S)" ") " "!=" " (" "(0U)" ")", V1, V2); scudo::die
(); } } while (false)
;
119 CHECK_EQ(getClassIdBySize(S), C)do { scudo::u64 V1 = (scudo::u64)((getClassIdBySize(S))); scudo
::u64 V2 = (scudo::u64)((C)); if (__builtin_expect(!!(!(V1 ==
V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 119, "(" "(getClassIdBySize(S))" ") " "==" " (" "(C)" ")", V1
, V2); scudo::die(); } } while (false)
;
120 if (C < LargestClassId)
121 CHECK_EQ(getClassIdBySize(S + 1), C + 1)do { scudo::u64 V1 = (scudo::u64)((getClassIdBySize(S + 1)));
scudo::u64 V2 = (scudo::u64)((C + 1)); if (__builtin_expect(
!!(!(V1 == V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 121, "(" "(getClassIdBySize(S + 1))" ") " "==" " (" "(C + 1)"
")", V1, V2); scudo::die(); } } while (false)
;
122 CHECK_EQ(getClassIdBySize(S - 1), C)do { scudo::u64 V1 = (scudo::u64)((getClassIdBySize(S - 1)));
scudo::u64 V2 = (scudo::u64)((C)); if (__builtin_expect(!!(!
(V1 == V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 122, "(" "(getClassIdBySize(S - 1))" ") " "==" " (" "(C)" ")"
, V1, V2); scudo::die(); } } while (false)
;
123 if (C - 1 != BatchClassId)
124 CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1))do { scudo::u64 V1 = (scudo::u64)((getSizeByClassId(C))); scudo
::u64 V2 = (scudo::u64)((getSizeByClassId(C - 1))); if (__builtin_expect
(!!(!(V1 > V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 124, "(" "(getSizeByClassId(C))" ") " ">" " (" "(getSizeByClassId(C - 1))"
")", V1, V2); scudo::die(); } } while (false)
;
125 }
126 // Do not perform the loop if the maximum size is too large.
127 if (MaxSizeLog > 19)
128 return;
129 for (uptr S = 1; S <= MaxSize; S++) {
130 const uptr C = getClassIdBySize(S);
131 CHECK_LT(C, NumClasses)do { scudo::u64 V1 = (scudo::u64)((C)); scudo::u64 V2 = (scudo
::u64)((NumClasses)); if (__builtin_expect(!!(!(V1 < V2)),
0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 131, "(" "(C)" ") " "<" " (" "(NumClasses)" ")", V1, V2)
; scudo::die(); } } while (false)
;
132 CHECK_GE(getSizeByClassId(C), S)do { scudo::u64 V1 = (scudo::u64)((getSizeByClassId(C))); scudo
::u64 V2 = (scudo::u64)((S)); if (__builtin_expect(!!(!(V1 >=
V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 132, "(" "(getSizeByClassId(C))" ") " ">=" " (" "(S)" ")"
, V1, V2); scudo::die(); } } while (false)
;
133 if (C - 1 != BatchClassId)
134 CHECK_LT(getSizeByClassId(C - 1), S)do { scudo::u64 V1 = (scudo::u64)((getSizeByClassId(C - 1)));
scudo::u64 V2 = (scudo::u64)((S)); if (__builtin_expect(!!(!
(V1 < V2)), 0)) { scudo::reportCheckFailed("/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/compiler-rt/lib/scudo/standalone/size_class_map.h"
, 134, "(" "(getSizeByClassId(C - 1))" ") " "<" " (" "(S)"
")", V1, V2); scudo::die(); } } while (false)
;
135 }
136 }
137};
138
139typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
140
141// TODO(kostyak): further tune class maps for Android & Fuchsia.
142#if SCUDO_WORDSIZE32U == 64U
143typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
144typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
145#else
146typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
147typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
148#endif
149
150} // namespace scudo
151
152#endif // SCUDO_SIZE_CLASS_MAP_H_