Line data Source code
1 : //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : /// \file
10 : ///
11 : /// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
12 : /// of these conform to an LLVM "Allocator" concept which consists of an
13 : /// Allocate method accepting a size and alignment, and a Deallocate accepting
14 : /// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
15 : /// Allocate and Deallocate for setting size and alignment based on the final
16 : /// type. These overloads are typically provided by a base class template \c
17 : /// AllocatorBase.
18 : ///
19 : //===----------------------------------------------------------------------===//
20 :
21 : #ifndef LLVM_SUPPORT_ALLOCATOR_H
22 : #define LLVM_SUPPORT_ALLOCATOR_H
23 :
24 : #include "llvm/ADT/Optional.h"
25 : #include "llvm/ADT/SmallVector.h"
26 : #include "llvm/Support/Compiler.h"
27 : #include "llvm/Support/ErrorHandling.h"
28 : #include "llvm/Support/MathExtras.h"
29 : #include "llvm/Support/MemAlloc.h"
30 : #include <algorithm>
31 : #include <cassert>
32 : #include <cstddef>
33 : #include <cstdint>
34 : #include <cstdlib>
35 : #include <iterator>
36 : #include <type_traits>
37 : #include <utility>
38 :
39 : namespace llvm {
40 :
41 : /// CRTP base class providing obvious overloads for the core \c
42 : /// Allocate() methods of LLVM-style allocators.
43 : ///
44 : /// This base class both documents the full public interface exposed by all
45 : /// LLVM-style allocators, and redirects all of the overloads to a single core
46 : /// set of methods which the derived class must define.
47 : template <typename DerivedT> class AllocatorBase {
48 : public:
49 : /// Allocate \a Size bytes of \a Alignment aligned memory. This method
50 : /// must be implemented by \c DerivedT.
51 0 : void *Allocate(size_t Size, size_t Alignment) {
52 : #ifdef __clang__
53 : static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
54 : &AllocatorBase::Allocate) !=
55 : static_cast<void *(DerivedT::*)(size_t, size_t)>(
56 : &DerivedT::Allocate),
57 : "Class derives from AllocatorBase without implementing the "
58 : "core Allocate(size_t, size_t) overload!");
59 : #endif
60 284305791 : return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
61 : }
62 :
63 : /// Deallocate \a Ptr to \a Size bytes of memory allocated by this
64 : /// allocator.
65 0 : void Deallocate(const void *Ptr, size_t Size) {
66 : #ifdef __clang__
67 : static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
68 : &AllocatorBase::Deallocate) !=
69 : static_cast<void (DerivedT::*)(const void *, size_t)>(
70 : &DerivedT::Deallocate),
71 : "Class derives from AllocatorBase without implementing the "
72 : "core Deallocate(void *) overload!");
73 : #endif
74 0 : return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
75 : }
76 :
77 : // The rest of these methods are helpers that redirect to one of the above
78 : // core methods.
79 :
80 : /// Allocate space for a sequence of objects without constructing them.
81 0 : template <typename T> T *Allocate(size_t Num = 1) {
82 135697653 : return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
83 : }
84 :
85 : /// Deallocate space for a sequence of objects without constructing them.
86 : template <typename T>
87 : typename std::enable_if<
88 : !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
89 0 : Deallocate(T *Ptr, size_t Num = 1) {
90 : Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
91 0 : }
92 0 : };
93 :
94 0 : class MallocAllocator : public AllocatorBase<MallocAllocator> {
95 0 : public:
96 : void Reset() {}
97 0 :
98 0 : LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
99 : size_t /*Alignment*/) {
100 39953 : return safe_malloc(Size);
101 0 : }
102 :
103 0 : // Pull in base class overloads.
104 0 : using AllocatorBase<MallocAllocator>::Allocate;
105 :
106 0 : void Deallocate(const void *Ptr, size_t /*Size*/) {
107 276735779 : free(const_cast<void *>(Ptr));
108 0 : }
109 0 :
110 0 : // Pull in base class overloads.
111 : using AllocatorBase<MallocAllocator>::Deallocate;
112 0 :
113 : void PrintStats() const {}
114 : };
115 :
116 : namespace detail {
117 :
118 0 : // We call out to an external function to actually print the message as the
119 32 : // printing code uses Allocator.h in its implementation.
120 0 : void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
121 0 : size_t TotalMemory);
122 :
123 : } // end namespace detail
124 :
125 : /// Allocate memory in an ever growing pool, as if by bump-pointer.
126 : ///
127 0 : /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
128 81 : /// memory rather than relying on a boundless contiguous heap. However, it has
129 0 : /// bump-pointer semantics in that it is a monotonically growing pool of memory
130 : /// where every allocation is found by merely allocating the next N bytes in
131 : /// the slab, or the next N bytes in the next slab.
132 : ///
133 : /// Note that this also has a threshold for forcing allocations above a certain
134 : /// size into their own slab.
135 : ///
136 : /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
137 : /// object, which wraps malloc, to allocate memory, but it can be changed to
138 : /// use a custom allocator.
139 : template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
140 : size_t SizeThreshold = SlabSize>
141 : class BumpPtrAllocatorImpl
142 : : public AllocatorBase<
143 : BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
144 : public:
145 : static_assert(SizeThreshold <= SlabSize,
146 : "The SizeThreshold must be at most the SlabSize to ensure "
147 : "that objects larger than a slab go into their own memory "
148 : "allocation.");
149 :
150 153396649 : BumpPtrAllocatorImpl() = default;
151 :
152 : template <typename T>
153 : BumpPtrAllocatorImpl(T &&Allocator)
154 : : Allocator(std::forward<T &&>(Allocator)) {}
155 :
156 : // Manually implement a move constructor as we must clear the old allocator's
157 : // slabs as a matter of correctness.
158 8509 : BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
159 17018 : : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
160 : CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
161 17018 : BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
162 8545 : Allocator(std::move(Old.Allocator)) {
163 8509 : Old.CurPtr = Old.End = nullptr;
164 8509 : Old.BytesAllocated = 0;
165 : Old.Slabs.clear();
166 : Old.CustomSizedSlabs.clear();
167 8509 : }
168 :
169 83610249 : ~BumpPtrAllocatorImpl() {
170 83610250 : DeallocateSlabs(Slabs.begin(), Slabs.end());
171 822177 : DeallocateCustomSizedSlabs();
172 83610253 : }
173 3 :
174 2 : BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
175 2 : DeallocateSlabs(Slabs.begin(), Slabs.end());
176 2 : DeallocateCustomSizedSlabs();
177 11 :
178 11 : CurPtr = RHS.CurPtr;
179 1 : End = RHS.End;
180 11 : BytesAllocated = RHS.BytesAllocated;
181 36 : RedZoneSize = RHS.RedZoneSize;
182 37 : Slabs = std::move(RHS.Slabs);
183 1 : CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
184 36 : Allocator = std::move(RHS.Allocator);
185 :
186 5 : RHS.CurPtr = RHS.End = nullptr;
187 5 : RHS.BytesAllocated = 0;
188 1 : RHS.Slabs.clear();
189 1 : RHS.CustomSizedSlabs.clear();
190 410944 : return *this;
191 410944 : }
192 4 :
193 410944 : /// Deallocate all but the current slab and reset the current pointer
194 1 : /// to the beginning of it, freeing all memory allocated so far.
195 25596406 : void Reset() {
196 : // Deallocate all but the first slab, and deallocate all custom-sized slabs.
197 : DeallocateCustomSizedSlabs();
198 5 : CustomSizedSlabs.clear();
199 4 :
200 25596405 : if (Slabs.empty())
201 : return;
202 4 :
203 2 : // Reset the state.
204 4317357 : BytesAllocated = 0;
205 4317357 : CurPtr = (char *)Slabs.front();
206 4317357 : End = CurPtr + SlabSize;
207 2 :
208 2 : __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
209 4317357 : DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
210 8634712 : Slabs.erase(std::next(Slabs.begin()), Slabs.end());
211 : }
212 3 :
213 1 : /// Allocate space at the specified alignment.
214 1 : LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
215 2428285487 : Allocate(size_t Size, size_t Alignment) {
216 1 : assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
217 2 :
218 3 : // Keep track of how many bytes we've allocated.
219 2428285487 : BytesAllocated += Size;
220 :
221 2428285488 : size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
222 2 : assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
223 25 :
224 : size_t SizeToAllocate = Size;
225 : #if LLVM_ADDRESS_SANITIZER_BUILD
226 : // Add trailing bytes as a "red zone" under ASan.
227 153 : SizeToAllocate += RedZoneSize;
228 : #endif
229 25 :
230 : // Check if we have enough space.
231 2428285615 : if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
232 2400000674 : char *AlignedPtr = CurPtr + Adjustment;
233 2400000802 : CurPtr = AlignedPtr + SizeToAllocate;
234 : // Update the allocation point of this memory block in MemorySanitizer.
235 : // Without this, MemorySanitizer messages for values originated from here
236 19432954 : // will point to the allocation of the entire slab.
237 : __msan_allocated_memory(AlignedPtr, Size);
238 : // Similarly, tell ASan about this space.
239 25 : __asan_unpoison_memory_region(AlignedPtr, Size);
240 2419433637 : return AlignedPtr;
241 9 : }
242 19432954 :
243 128 : // If Size is really big, allocate a separate slab for it.
244 28284916 : size_t PaddedSize = SizeToAllocate + Alignment - 1;
245 28284916 : if (PaddedSize > SizeThreshold) {
246 : void *NewSlab = Allocator.Allocate(PaddedSize, 0);
247 : // We own the new slab and don't want anyone reading anyting other than
248 9 : // pieces returned from this method. So poison the whole slab.
249 : __asan_poison_memory_region(NewSlab, PaddedSize);
250 36360 : CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
251 :
252 19433073 : uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
253 19164971 : assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
254 19201315 : char *AlignedPtr = (char*)AlignedAddr;
255 : __msan_allocated_memory(AlignedPtr, Size);
256 25 : __asan_unpoison_memory_region(AlignedPtr, Size);
257 25 : return AlignedPtr;
258 4 : }
259 :
260 : // Otherwise, start a new slab and try again.
261 47413408 : StartNewSlab();
262 28248458 : uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
263 : assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
264 : "Unable to allocate memory!");
265 28516453 : char *AlignedPtr = (char*)AlignedAddr;
266 28516453 : CurPtr = AlignedPtr + SizeToAllocate;
267 : __msan_allocated_memory(AlignedPtr, Size);
268 : __asan_unpoison_memory_region(AlignedPtr, Size);
269 28248466 : return AlignedPtr;
270 12 : }
271 0 :
272 : // Pull in base class overloads.
273 37 : using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
274 37 :
275 0 : // Bump pointer allocators are expected to never free their storage; and
276 : // clients expect pointers to remain valid for non-dereferencing uses even
277 37 : // after deallocation.
278 25 : void Deallocate(const void *Ptr, size_t Size) {
279 2 : __asan_poison_memory_region(Ptr, Size);
280 0 : }
281 25 :
282 267999 : // Pull in base class overloads.
283 268001 : using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
284 :
285 2 : size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
286 267999 :
287 267999 : /// \return An index uniquely and reproducibly identifying
288 : /// an input pointer \p Ptr in the given allocator.
289 : /// The returned value is negative iff the object is inside a custom-size
290 267999 : /// slab.
291 : /// Returns an empty optional if the pointer is not found in the allocator.
292 1 : llvm::Optional<int64_t> identifyObject(const void *Ptr) {
293 : const char *P = static_cast<const char *>(Ptr);
294 : int64_t InSlabIdx = 0;
295 4 : for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
296 2 : const char *S = static_cast<const char *>(Slabs[Idx]);
297 2 : if (P >= S && P < S + computeSlabSize(Idx))
298 1 : return InSlabIdx + static_cast<int64_t>(P - S);
299 2 : InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
300 : }
301 0 :
302 : // Use negative index to denote custom sized slabs.
303 : int64_t InCustomSizedSlabIdx = -1;
304 0 : for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
305 0 : const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
306 0 : size_t Size = CustomSizedSlabs[Idx].second;
307 0 : if (P >= S && P < S + Size)
308 2 : return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
309 2 : InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
310 : }
311 : return None;
312 : }
313 :
314 297 : size_t getTotalMemory() const {
315 : size_t TotalMemory = 0;
316 521 : for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
317 450 : TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
318 297 : for (auto &PtrAndSize : CustomSizedSlabs)
319 0 : TotalMemory += PtrAndSize.second;
320 296 : return TotalMemory;
321 : }
322 :
323 0 : size_t getBytesAllocated() const { return BytesAllocated; }
324 :
325 1 : void setRedZoneSize(size_t NewSize) {
326 811985 : RedZoneSize = NewSize;
327 0 : }
328 :
329 10 : void PrintStats() const {
330 10 : detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
331 : getTotalMemory());
332 9 : }
333 1 :
334 : private:
335 23 : /// The current pointer into the current slab.
336 : ///
337 : /// This points to the next free byte in the slab.
338 : char *CurPtr = nullptr;
339 23 :
340 : /// The end of the current slab.
341 23 : char *End = nullptr;
342 :
343 : /// The slabs allocated so far.
344 : SmallVector<void *, 4> Slabs;
345 :
346 : /// Custom-sized slabs allocated for too-large allocation requests.
347 : SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
348 :
349 : /// How many bytes we've allocated.
350 : ///
351 23 : /// Used so that we can compute how much space was wasted.
352 9 : size_t BytesAllocated = 0;
353 9 :
354 : /// The number of bytes to put between allocations when running under
355 : /// a sanitizer.
356 : size_t RedZoneSize = 1;
357 :
358 : /// The allocator instance we use to get slabs of memory.
359 : AllocatorT Allocator;
360 9 :
361 : static size_t computeSlabSize(unsigned SlabIdx) {
362 : // Scale the actual allocated slab size based on the number of slabs
363 : // allocated. Every 128 slabs allocated, we double the allocated size to
364 14 : // reduce allocation frequency, but saturate at multiplying the slab size by
365 14 : // 2^30.
366 31643553 : return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
367 : }
368 :
369 : /// Allocate a new slab and move the bump pointers over into the new
370 3 : /// slab, modifying CurPtr and End.
371 28248453 : void StartNewSlab() {
372 28248453 : size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
373 :
374 28248456 : void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
375 : // We own the new slab and don't want anyone reading anything other than
376 : // pieces returned from this method. So poison the whole slab.
377 : __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
378 25 :
379 28248453 : Slabs.push_back(NewSlab);
380 28248454 : CurPtr = (char *)(NewSlab);
381 28248465 : End = ((char *)NewSlab) + AllocatedSlabSize;
382 28248465 : }
383 25 :
384 25 : /// Deallocate a sequence of slabs.
385 87927603 : void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
386 36 : SmallVectorImpl<void *>::iterator E) {
387 108316623 : for (; I != E; ++I) {
388 : size_t AllocatedSlabSize =
389 20121043 : computeSlabSize(std::distance(Slabs.begin(), I));
390 20121032 : Allocator.Deallocate(*I, AllocatedSlabSize);
391 25 : }
392 88195616 : }
393 268024 :
394 25 : /// Deallocate all memory for custom sized slabs.
395 267999 : void DeallocateCustomSizedSlabs() {
396 109230860 : for (auto &PtrAndSize : CustomSizedSlabs) {
397 24255 : void *Ptr = PtrAndSize.first;
398 : size_t Size = PtrAndSize.second;
399 73 : Allocator.Deallocate(Ptr, Size);
400 267999 : }
401 268031 : }
402 268031 :
403 267999 : template <typename T> friend class SpecificBumpPtrAllocator;
404 41 : };
405 54 :
406 410940 : /// The standard BumpPtrAllocator which just uses the default template
407 : /// parameters.
408 3475205 : typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
409 0 :
410 3064223 : /// A BumpPtrAllocator that allows only elements of a specific type to be
411 3064223 : /// allocated.
412 : ///
413 410940 : /// This allows calling the destructor in DestroyAll() and when the allocator is
414 : /// destroyed.
415 : template <typename T> class SpecificBumpPtrAllocator {
416 : BumpPtrAllocator Allocator;
417 411772 :
418 832 : public:
419 932073 : SpecificBumpPtrAllocator() {
420 : // Because SpecificBumpPtrAllocator walks the memory to call destructors,
421 : // it can't have red zones between allocations.
422 : Allocator.setRedZoneSize(0);
423 : }
424 1192 : SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
425 1192 : : Allocator(std::move(Old.Allocator)) {}
426 207031 : ~SpecificBumpPtrAllocator() { DestroyAll(); }
427 :
428 : SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
429 0 : Allocator = std::move(RHS.Allocator);
430 : return *this;
431 : }
432 :
433 : /// Call the destructor of each allocated object and deallocate all but the
434 : /// current slab and reset the current pointer to the beginning of it, freeing
435 : /// all memory allocated so far.
436 1414451 : void DestroyAll() {
437 : auto DestroyElements = [](char *Begin, char *End) {
438 : assert(Begin == (char *)alignAddr(Begin, alignof(T)));
439 11706708 : for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
440 5810705 : reinterpret_cast<T *>(Ptr)->~T();
441 : };
442 :
443 2406709 : for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
444 : ++I) {
445 992256 : size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
446 : std::distance(Allocator.Slabs.begin(), I));
447 1795078 : char *Begin = (char *)alignAddr(*I, alignof(T));
448 897539 : char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
449 : : (char *)*I + AllocatedSlabSize;
450 :
451 5000 : DestroyElements(Begin, End);
452 : }
453 :
454 1360255 : for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
455 0 : void *Ptr = PtrAndSize.first;
456 0 : size_t Size = PtrAndSize.second;
457 0 : DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
458 : }
459 :
460 1414453 : Allocator.Reset();
461 1414451 : }
462 100539 :
463 : /// Allocate space for an array of objects without constructing them.
464 6626742 : T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
465 433390 : };
466 387411 :
467 : } // end namespace llvm
468 :
469 169705 : template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
470 : void *operator new(size_t Size,
471 69166 : llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
472 : SizeThreshold> &Allocator) {
473 77338 : struct S {
474 38669 : char c;
475 : union {
476 : double D;
477 : long double LD;
478 : long long L;
479 : void *P;
480 70975 : } x;
481 0 : };
482 0 : return Allocator.Allocate(
483 73741132 : Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
484 : }
485 :
486 100551 : template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
487 100538 : void operator delete(
488 98792 : void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
489 : }
490 :
491 13844 : #endif // LLVM_SUPPORT_ALLOCATOR_H
|