LLVM  6.0.0svn
Allocator.h
Go to the documentation of this file.
1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
12 /// of these conform to an LLVM "Allocator" concept which consists of an
13 /// Allocate method accepting a size and alignment, and a Deallocate accepting
14 /// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
15 /// Allocate and Deallocate for setting size and alignment based on the final
16 /// type. These overloads are typically provided by a base class template \c
17 /// AllocatorBase.
18 ///
19 //===----------------------------------------------------------------------===//
20 
21 #ifndef LLVM_SUPPORT_ALLOCATOR_H
22 #define LLVM_SUPPORT_ALLOCATOR_H
23 
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/Compiler.h"
27 #include <algorithm>
28 #include <cassert>
29 #include <cstddef>
30 #include <cstdint>
31 #include <cstdlib>
32 #include <iterator>
33 #include <type_traits>
34 #include <utility>
35 
36 namespace llvm {
37 
38 /// \brief CRTP base class providing obvious overloads for the core \c
39 /// Allocate() methods of LLVM-style allocators.
40 ///
41 /// This base class both documents the full public interface exposed by all
42 /// LLVM-style allocators, and redirects all of the overloads to a single core
43 /// set of methods which the derived class must define.
44 template <typename DerivedT> class AllocatorBase {
45 public:
46  /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
47  /// must be implemented by \c DerivedT.
48  void *Allocate(size_t Size, size_t Alignment) {
49 #ifdef __clang__
50  static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
52  static_cast<void *(DerivedT::*)(size_t, size_t)>(
53  &DerivedT::Allocate),
54  "Class derives from AllocatorBase without implementing the "
55  "core Allocate(size_t, size_t) overload!");
56 #endif
57  return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
58  }
59 
60  /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
61  /// allocator.
62  void Deallocate(const void *Ptr, size_t Size) {
63 #ifdef __clang__
64  static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
66  static_cast<void (DerivedT::*)(const void *, size_t)>(
67  &DerivedT::Deallocate),
68  "Class derives from AllocatorBase without implementing the "
69  "core Deallocate(void *) overload!");
70 #endif
71  return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
72  }
73 
74  // The rest of these methods are helpers that redirect to one of the above
75  // core methods.
76 
77  /// \brief Allocate space for a sequence of objects without constructing them.
78  template <typename T> T *Allocate(size_t Num = 1) {
79  return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
80  }
81 
82  /// \brief Deallocate space for a sequence of objects without constructing them.
83  template <typename T>
84  typename std::enable_if<
85  !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
86  Deallocate(T *Ptr, size_t Num = 1) {
87  Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
88  }
89 };
90 
91 class MallocAllocator : public AllocatorBase<MallocAllocator> {
92 public:
93  void Reset() {}
94 
96  size_t /*Alignment*/) {
97  return malloc(Size);
98  }
99 
100  // Pull in base class overloads.
102 
103  void Deallocate(const void *Ptr, size_t /*Size*/) {
104  free(const_cast<void *>(Ptr));
105  }
106 
107  // Pull in base class overloads.
109 
110  void PrintStats() const {}
111 };
112 
113 namespace detail {
114 
115 // We call out to an external function to actually print the message as the
116 // printing code uses Allocator.h in its implementation.
117 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
118  size_t TotalMemory);
119 
120 } // end namespace detail
121 
122 /// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
123 ///
124 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
125 /// memory rather than relying on a boundless contiguous heap. However, it has
126 /// bump-pointer semantics in that it is a monotonically growing pool of memory
127 /// where every allocation is found by merely allocating the next N bytes in
128 /// the slab, or the next N bytes in the next slab.
129 ///
130 /// Note that this also has a threshold for forcing allocations above a certain
131 /// size into their own slab.
132 ///
133 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
134 /// object, which wraps malloc, to allocate memory, but it can be changed to
135 /// use a custom allocator.
136 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
137  size_t SizeThreshold = SlabSize>
139  : public AllocatorBase<
140  BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
141 public:
142  static_assert(SizeThreshold <= SlabSize,
143  "The SizeThreshold must be at most the SlabSize to ensure "
144  "that objects larger than a slab go into their own memory "
145  "allocation.");
146 
147  BumpPtrAllocatorImpl() = default;
148 
149  template <typename T>
151  : Allocator(std::forward<T &&>(Allocator)) {}
152 
153  // Manually implement a move constructor as we must clear the old allocator's
154  // slabs as a matter of correctness.
156  : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
157  CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
158  BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
159  Allocator(std::move(Old.Allocator)) {
160  Old.CurPtr = Old.End = nullptr;
161  Old.BytesAllocated = 0;
162  Old.Slabs.clear();
163  Old.CustomSizedSlabs.clear();
164  }
165 
167  DeallocateSlabs(Slabs.begin(), Slabs.end());
168  DeallocateCustomSizedSlabs();
169  }
170 
172  DeallocateSlabs(Slabs.begin(), Slabs.end());
173  DeallocateCustomSizedSlabs();
174 
175  CurPtr = RHS.CurPtr;
176  End = RHS.End;
177  BytesAllocated = RHS.BytesAllocated;
178  RedZoneSize = RHS.RedZoneSize;
179  Slabs = std::move(RHS.Slabs);
180  CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
181  Allocator = std::move(RHS.Allocator);
182 
183  RHS.CurPtr = RHS.End = nullptr;
184  RHS.BytesAllocated = 0;
185  RHS.Slabs.clear();
186  RHS.CustomSizedSlabs.clear();
187  return *this;
188  }
189 
190  /// \brief Deallocate all but the current slab and reset the current pointer
191  /// to the beginning of it, freeing all memory allocated so far.
192  void Reset() {
193  // Deallocate all but the first slab, and deallocate all custom-sized slabs.
194  DeallocateCustomSizedSlabs();
195  CustomSizedSlabs.clear();
196 
197  if (Slabs.empty())
198  return;
199 
200  // Reset the state.
201  BytesAllocated = 0;
202  CurPtr = (char *)Slabs.front();
203  End = CurPtr + SlabSize;
204 
205  __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
206  DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
207  Slabs.erase(std::next(Slabs.begin()), Slabs.end());
208  }
209 
210  /// \brief Allocate space at the specified alignment.
212  Allocate(size_t Size, size_t Alignment) {
213  assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
214 
215  // Keep track of how many bytes we've allocated.
216  BytesAllocated += Size;
217 
218  size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
219  assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
220 
221  size_t SizeToAllocate = Size;
222 #if LLVM_ADDRESS_SANITIZER_BUILD
223  // Add trailing bytes as a "red zone" under ASan.
224  SizeToAllocate += RedZoneSize;
225 #endif
226 
227  // Check if we have enough space.
228  if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
229  char *AlignedPtr = CurPtr + Adjustment;
230  CurPtr = AlignedPtr + SizeToAllocate;
231  // Update the allocation point of this memory block in MemorySanitizer.
232  // Without this, MemorySanitizer messages for values originated from here
233  // will point to the allocation of the entire slab.
234  __msan_allocated_memory(AlignedPtr, Size);
235  // Similarly, tell ASan about this space.
236  __asan_unpoison_memory_region(AlignedPtr, Size);
237  return AlignedPtr;
238  }
239 
240  // If Size is really big, allocate a separate slab for it.
241  size_t PaddedSize = SizeToAllocate + Alignment - 1;
242  if (PaddedSize > SizeThreshold) {
243  void *NewSlab = Allocator.Allocate(PaddedSize, 0);
244  // We own the new slab and don't want anyone reading anyting other than
245  // pieces returned from this method. So poison the whole slab.
246  __asan_poison_memory_region(NewSlab, PaddedSize);
247  CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
248 
249  uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
250  assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
251  char *AlignedPtr = (char*)AlignedAddr;
252  __msan_allocated_memory(AlignedPtr, Size);
253  __asan_unpoison_memory_region(AlignedPtr, Size);
254  return AlignedPtr;
255  }
256 
257  // Otherwise, start a new slab and try again.
258  StartNewSlab();
259  uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
260  assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
261  "Unable to allocate memory!");
262  char *AlignedPtr = (char*)AlignedAddr;
263  CurPtr = AlignedPtr + SizeToAllocate;
264  __msan_allocated_memory(AlignedPtr, Size);
265  __asan_unpoison_memory_region(AlignedPtr, Size);
266  return AlignedPtr;
267  }
268 
269  // Pull in base class overloads.
271 
272  void Deallocate(const void *Ptr, size_t Size) {
273  __asan_poison_memory_region(Ptr, Size);
274  }
275 
276  // Pull in base class overloads.
278 
279  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
280 
281  size_t getTotalMemory() const {
282  size_t TotalMemory = 0;
283  for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
284  TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
285  for (auto &PtrAndSize : CustomSizedSlabs)
286  TotalMemory += PtrAndSize.second;
287  return TotalMemory;
288  }
289 
290  size_t getBytesAllocated() const { return BytesAllocated; }
291 
292  void setRedZoneSize(size_t NewSize) {
293  RedZoneSize = NewSize;
294  }
295 
296  void PrintStats() const {
297  detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
298  getTotalMemory());
299  }
300 
301 private:
302  /// \brief The current pointer into the current slab.
303  ///
304  /// This points to the next free byte in the slab.
305  char *CurPtr = nullptr;
306 
307  /// \brief The end of the current slab.
308  char *End = nullptr;
309 
310  /// \brief The slabs allocated so far.
312 
313  /// \brief Custom-sized slabs allocated for too-large allocation requests.
314  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
315 
316  /// \brief How many bytes we've allocated.
317  ///
318  /// Used so that we can compute how much space was wasted.
319  size_t BytesAllocated = 0;
320 
321  /// \brief The number of bytes to put between allocations when running under
322  /// a sanitizer.
323  size_t RedZoneSize = 1;
324 
325  /// \brief The allocator instance we use to get slabs of memory.
326  AllocatorT Allocator;
327 
328  static size_t computeSlabSize(unsigned SlabIdx) {
329  // Scale the actual allocated slab size based on the number of slabs
330  // allocated. Every 128 slabs allocated, we double the allocated size to
331  // reduce allocation frequency, but saturate at multiplying the slab size by
332  // 2^30.
333  return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
334  }
335 
336  /// \brief Allocate a new slab and move the bump pointers over into the new
337  /// slab, modifying CurPtr and End.
338  void StartNewSlab() {
339  size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
340 
341  void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
342  // We own the new slab and don't want anyone reading anything other than
343  // pieces returned from this method. So poison the whole slab.
344  __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
345 
346  Slabs.push_back(NewSlab);
347  CurPtr = (char *)(NewSlab);
348  End = ((char *)NewSlab) + AllocatedSlabSize;
349  }
350 
351  /// \brief Deallocate a sequence of slabs.
352  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
354  for (; I != E; ++I) {
355  size_t AllocatedSlabSize =
356  computeSlabSize(std::distance(Slabs.begin(), I));
357  Allocator.Deallocate(*I, AllocatedSlabSize);
358  }
359  }
360 
361  /// \brief Deallocate all memory for custom sized slabs.
362  void DeallocateCustomSizedSlabs() {
363  for (auto &PtrAndSize : CustomSizedSlabs) {
364  void *Ptr = PtrAndSize.first;
365  size_t Size = PtrAndSize.second;
366  Allocator.Deallocate(Ptr, Size);
367  }
368  }
369 
370  template <typename T> friend class SpecificBumpPtrAllocator;
371 };
372 
373 /// \brief The standard BumpPtrAllocator which just uses the default template
374 /// parameters.
376 
377 /// \brief A BumpPtrAllocator that allows only elements of a specific type to be
378 /// allocated.
379 ///
380 /// This allows calling the destructor in DestroyAll() and when the allocator is
381 /// destroyed.
382 template <typename T> class SpecificBumpPtrAllocator {
383  BumpPtrAllocator Allocator;
384 
385 public:
387  // Because SpecificBumpPtrAllocator walks the memory to call destructors,
388  // it can't have red zones between allocations.
389  Allocator.setRedZoneSize(0);
390  }
392  : Allocator(std::move(Old.Allocator)) {}
393  ~SpecificBumpPtrAllocator() { DestroyAll(); }
394 
396  Allocator = std::move(RHS.Allocator);
397  return *this;
398  }
399 
400  /// Call the destructor of each allocated object and deallocate all but the
401  /// current slab and reset the current pointer to the beginning of it, freeing
402  /// all memory allocated so far.
403  void DestroyAll() {
404  auto DestroyElements = [](char *Begin, char *End) {
405  assert(Begin == (char *)alignAddr(Begin, alignof(T)));
406  for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
407  reinterpret_cast<T *>(Ptr)->~T();
408  };
409 
410  for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
411  ++I) {
412  size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
413  std::distance(Allocator.Slabs.begin(), I));
414  char *Begin = (char *)alignAddr(*I, alignof(T));
415  char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
416  : (char *)*I + AllocatedSlabSize;
417 
418  DestroyElements(Begin, End);
419  }
420 
421  for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
422  void *Ptr = PtrAndSize.first;
423  size_t Size = PtrAndSize.second;
424  DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
425  }
426 
427  Allocator.Reset();
428  }
429 
430  /// \brief Allocate space for an array of objects without constructing them.
431  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
432 };
433 
434 } // end namespace llvm
435 
436 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
437 void *operator new(size_t Size,
438  llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
439  SizeThreshold> &Allocator) {
440  struct S {
441  char c;
442  union {
443  double D;
444  long double LD;
445  long long L;
446  void *P;
447  } x;
448  };
449  return Allocator.Allocate(
450  Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
451 }
452 
453 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
454 void operator delete(
456 }
457 
458 #endif // LLVM_SUPPORT_ALLOCATOR_H
void push_back(const T &Elt)
Definition: SmallVector.h:212
#define __msan_allocated_memory(p, size)
Definition: Compiler.h:376
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
void Deallocate(const void *Ptr, size_t Size)
Deallocate Ptr to Size bytes of memory allocated by this allocator.
Definition: Allocator.h:62
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
Definition: Allocator.cpp:21
void * Allocate(size_t Size, size_t Alignment)
Allocate Size bytes of Alignment aligned memory.
Definition: Allocator.h:48
void PrintStats() const
Definition: Allocator.h:110
Definition: BitVector.h:920
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it...
Definition: Allocator.h:192
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition: Allocator.h:171
void Deallocate(const void *Ptr, size_t Size)
Definition: Allocator.h:272
#define T
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
Definition: Allocator.h:403
size_t getBytesAllocated() const
Definition: Allocator.h:290
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition: Allocator.h:375
#define P(N)
#define __asan_unpoison_memory_region(p, size)
Definition: Compiler.h:388
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:138
size_t alignmentAdjustment(const void *Ptr, size_t Alignment)
Returns the necessary adjustment for aligning Ptr to Alignment bytes, rounding up.
Definition: MathExtras.h:626
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
Definition: Allocator.h:212
static const unsigned End
void Deallocate(const void *Ptr, size_t)
Definition: Allocator.h:103
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition: Allocator.h:431
void setRedZoneSize(size_t NewSize)
Definition: Allocator.h:292
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
Definition: Compiler.h:214
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:632
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition: Allocator.h:395
Basic Register Allocator
#define E
Definition: LargeTest.cpp:27
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition: Allocator.h:391
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:382
size_t GetNumSlabs() const
Definition: Allocator.h:279
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a pointer that does not alias any...
Definition: Compiler.h:224
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Aligns Addr to Alignment bytes, rounding up.
Definition: MathExtras.h:615
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t)
Definition: Allocator.h:95
T * Allocate(size_t Num=1)
Allocate space for a sequence of objects without constructing them.
Definition: Allocator.h:78
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
#define I(x, y, z)
Definition: MD5.cpp:58
std::enable_if< !std::is_same< typename std::remove_cv< T >::type, void >::value, void >::type Deallocate(T *Ptr, size_t Num=1)
Deallocate space for a sequence of objects without constructing them.
Definition: Allocator.h:86
size_t getTotalMemory() const
Definition: Allocator.h:281
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define __asan_poison_memory_region(p, size)
Definition: Compiler.h:387
BumpPtrAllocatorImpl(T &&Allocator)
Definition: Allocator.h:150
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition: Allocator.h:155
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators...
Definition: Allocator.h:44
int * Ptr
#define D
Definition: LargeTest.cpp:26