LLVM  15.0.0git
Allocator.h
Go to the documentation of this file.
1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11 /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12 /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13 /// allocator.
14 ///
15 //===----------------------------------------------------------------------===//
16 
17 #ifndef LLVM_SUPPORT_ALLOCATOR_H
18 #define LLVM_SUPPORT_ALLOCATOR_H
19 
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Compiler.h"
26 #include <algorithm>
27 #include <cassert>
28 #include <cstddef>
29 #include <cstdint>
30 #include <iterator>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 
37 // We call out to an external function to actually print the message as the
38 // printing code uses Allocator.h in its implementation.
39 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
40  size_t TotalMemory);
41 
42 } // end namespace detail
43 
44 /// Allocate memory in an ever growing pool, as if by bump-pointer.
45 ///
46 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
47 /// memory rather than relying on a boundless contiguous heap. However, it has
48 /// bump-pointer semantics in that it is a monotonically growing pool of memory
49 /// where every allocation is found by merely allocating the next N bytes in
50 /// the slab, or the next N bytes in the next slab.
51 ///
52 /// Note that this also has a threshold for forcing allocations above a certain
53 /// size into their own slab.
54 ///
55 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
56 /// object, which wraps malloc, to allocate memory, but it can be changed to
57 /// use a custom allocator.
58 ///
59 /// The GrowthDelay specifies after how many allocated slabs the allocator
60 /// increases the size of the slabs.
61 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
62  size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
64  : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
65  SizeThreshold, GrowthDelay>>,
66  private AllocatorT {
67 public:
68  static_assert(SizeThreshold <= SlabSize,
69  "The SizeThreshold must be at most the SlabSize to ensure "
70  "that objects larger than a slab go into their own memory "
71  "allocation.");
72  static_assert(GrowthDelay > 0,
73  "GrowthDelay must be at least 1 which already increases the"
74  "slab size after each allocated slab.");
75 
76  BumpPtrAllocatorImpl() = default;
77 
78  template <typename T>
80  : AllocatorT(std::forward<T &&>(Allocator)) {}
81 
82  // Manually implement a move constructor as we must clear the old allocator's
83  // slabs as a matter of correctness.
85  : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
86  End(Old.End), Slabs(std::move(Old.Slabs)),
87  CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
88  BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
89  Old.CurPtr = Old.End = nullptr;
90  Old.BytesAllocated = 0;
91  Old.Slabs.clear();
92  Old.CustomSizedSlabs.clear();
93  }
94 
96  DeallocateSlabs(Slabs.begin(), Slabs.end());
97  DeallocateCustomSizedSlabs();
98  }
99 
101  DeallocateSlabs(Slabs.begin(), Slabs.end());
102  DeallocateCustomSizedSlabs();
103 
104  CurPtr = RHS.CurPtr;
105  End = RHS.End;
106  BytesAllocated = RHS.BytesAllocated;
107  RedZoneSize = RHS.RedZoneSize;
108  Slabs = std::move(RHS.Slabs);
109  CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
110  AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
111 
112  RHS.CurPtr = RHS.End = nullptr;
113  RHS.BytesAllocated = 0;
114  RHS.Slabs.clear();
115  RHS.CustomSizedSlabs.clear();
116  return *this;
117  }
118 
119  /// Deallocate all but the current slab and reset the current pointer
120  /// to the beginning of it, freeing all memory allocated so far.
121  void Reset() {
122  // Deallocate all but the first slab, and deallocate all custom-sized slabs.
123  DeallocateCustomSizedSlabs();
124  CustomSizedSlabs.clear();
125 
126  if (Slabs.empty())
127  return;
128 
129  // Reset the state.
130  BytesAllocated = 0;
131  CurPtr = (char *)Slabs.front();
132  End = CurPtr + SlabSize;
133 
134  __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
135  DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
136  Slabs.erase(std::next(Slabs.begin()), Slabs.end());
137  }
138 
139  /// Allocate space at the specified alignment.
140  // This method is *not* marked noalias, because
141  // SpecificBumpPtrAllocator::DestroyAll() loops over all allocations, and
142  // that loop is not based on the Allocate() return value.
143  //
144  // Allocate(0, N) is valid, it returns a non-null pointer (which should not
145  // be dereferenced).
146  LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, Align Alignment) {
147  // Keep track of how many bytes we've allocated.
148  BytesAllocated += Size;
149 
150  size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
151  assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
152 
153  size_t SizeToAllocate = Size;
154 #if LLVM_ADDRESS_SANITIZER_BUILD
155  // Add trailing bytes as a "red zone" under ASan.
156  SizeToAllocate += RedZoneSize;
157 #endif
158 
159  // Check if we have enough space.
160  if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)
161  // We can't return nullptr even for a zero-sized allocation!
162  && CurPtr != nullptr) {
163  char *AlignedPtr = CurPtr + Adjustment;
164  CurPtr = AlignedPtr + SizeToAllocate;
165  // Update the allocation point of this memory block in MemorySanitizer.
166  // Without this, MemorySanitizer messages for values originated from here
167  // will point to the allocation of the entire slab.
168  __msan_allocated_memory(AlignedPtr, Size);
169  // Similarly, tell ASan about this space.
170  __asan_unpoison_memory_region(AlignedPtr, Size);
171  return AlignedPtr;
172  }
173 
174  // If Size is really big, allocate a separate slab for it.
175  size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
176  if (PaddedSize > SizeThreshold) {
177  void *NewSlab =
178  AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
179  // We own the new slab and don't want anyone reading anyting other than
180  // pieces returned from this method. So poison the whole slab.
181  __asan_poison_memory_region(NewSlab, PaddedSize);
182  CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
183 
184  uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
185  assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
186  char *AlignedPtr = (char*)AlignedAddr;
187  __msan_allocated_memory(AlignedPtr, Size);
188  __asan_unpoison_memory_region(AlignedPtr, Size);
189  return AlignedPtr;
190  }
191 
192  // Otherwise, start a new slab and try again.
193  StartNewSlab();
194  uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
195  assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
196  "Unable to allocate memory!");
197  char *AlignedPtr = (char*)AlignedAddr;
198  CurPtr = AlignedPtr + SizeToAllocate;
199  __msan_allocated_memory(AlignedPtr, Size);
200  __asan_unpoison_memory_region(AlignedPtr, Size);
201  return AlignedPtr;
202  }
203 
204  inline LLVM_ATTRIBUTE_RETURNS_NONNULL void *
205  Allocate(size_t Size, size_t Alignment) {
206  assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.");
207  return Allocate(Size, Align(Alignment));
208  }
209 
210  // Pull in base class overloads.
212 
213  // Bump pointer allocators are expected to never free their storage; and
214  // clients expect pointers to remain valid for non-dereferencing uses even
215  // after deallocation.
216  void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
217  __asan_poison_memory_region(Ptr, Size);
218  }
219 
220  // Pull in base class overloads.
222 
223  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
224 
225  /// \return An index uniquely and reproducibly identifying
226  /// an input pointer \p Ptr in the given allocator.
227  /// The returned value is negative iff the object is inside a custom-size
228  /// slab.
229  /// Returns an empty optional if the pointer is not found in the allocator.
231  const char *P = static_cast<const char *>(Ptr);
232  int64_t InSlabIdx = 0;
233  for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
234  const char *S = static_cast<const char *>(Slabs[Idx]);
235  if (P >= S && P < S + computeSlabSize(Idx))
236  return InSlabIdx + static_cast<int64_t>(P - S);
237  InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
238  }
239 
240  // Use negative index to denote custom sized slabs.
241  int64_t InCustomSizedSlabIdx = -1;
242  for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
243  const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
244  size_t Size = CustomSizedSlabs[Idx].second;
245  if (P >= S && P < S + Size)
246  return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
247  InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
248  }
249  return None;
250  }
251 
252  /// A wrapper around identifyObject that additionally asserts that
253  /// the object is indeed within the allocator.
254  /// \return An index uniquely and reproducibly identifying
255  /// an input pointer \p Ptr in the given allocator.
256  int64_t identifyKnownObject(const void *Ptr) {
258  assert(Out && "Wrong allocator used");
259  return *Out;
260  }
261 
262  /// A wrapper around identifyKnownObject. Accepts type information
263  /// about the object and produces a smaller identifier by relying on
264  /// the alignment information. Note that sub-classes may have different
265  /// alignment, so the most base class should be passed as template parameter
266  /// in order to obtain correct results. For that reason automatic template
267  /// parameter deduction is disabled.
268  /// \return An index uniquely and reproducibly identifying
269  /// an input pointer \p Ptr in the given allocator. This identifier is
270  /// different from the ones produced by identifyObject and
271  /// identifyAlignedObject.
272  template <typename T>
273  int64_t identifyKnownAlignedObject(const void *Ptr) {
274  int64_t Out = identifyKnownObject(Ptr);
275  assert(Out % alignof(T) == 0 && "Wrong alignment information");
276  return Out / alignof(T);
277  }
278 
279  size_t getTotalMemory() const {
280  size_t TotalMemory = 0;
281  for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
282  TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
283  for (const auto &PtrAndSize : CustomSizedSlabs)
284  TotalMemory += PtrAndSize.second;
285  return TotalMemory;
286  }
287 
288  size_t getBytesAllocated() const { return BytesAllocated; }
289 
290  void setRedZoneSize(size_t NewSize) {
291  RedZoneSize = NewSize;
292  }
293 
294  void PrintStats() const {
295  detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
296  getTotalMemory());
297  }
298 
299 private:
300  /// The current pointer into the current slab.
301  ///
302  /// This points to the next free byte in the slab.
303  char *CurPtr = nullptr;
304 
305  /// The end of the current slab.
306  char *End = nullptr;
307 
308  /// The slabs allocated so far.
310 
311  /// Custom-sized slabs allocated for too-large allocation requests.
312  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
313 
314  /// How many bytes we've allocated.
315  ///
316  /// Used so that we can compute how much space was wasted.
317  size_t BytesAllocated = 0;
318 
319  /// The number of bytes to put between allocations when running under
320  /// a sanitizer.
321  size_t RedZoneSize = 1;
322 
323  static size_t computeSlabSize(unsigned SlabIdx) {
324  // Scale the actual allocated slab size based on the number of slabs
325  // allocated. Every GrowthDelay slabs allocated, we double
326  // the allocated size to reduce allocation frequency, but saturate at
327  // multiplying the slab size by 2^30.
328  return SlabSize *
329  ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
330  }
331 
332  /// Allocate a new slab and move the bump pointers over into the new
333  /// slab, modifying CurPtr and End.
334  void StartNewSlab() {
335  size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
336 
337  void *NewSlab =
338  AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
339  // We own the new slab and don't want anyone reading anything other than
340  // pieces returned from this method. So poison the whole slab.
341  __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
342 
343  Slabs.push_back(NewSlab);
344  CurPtr = (char *)(NewSlab);
345  End = ((char *)NewSlab) + AllocatedSlabSize;
346  }
347 
348  /// Deallocate a sequence of slabs.
349  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
351  for (; I != E; ++I) {
352  size_t AllocatedSlabSize =
353  computeSlabSize(std::distance(Slabs.begin(), I));
354  AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
355  }
356  }
357 
358  /// Deallocate all memory for custom sized slabs.
359  void DeallocateCustomSizedSlabs() {
360  for (auto &PtrAndSize : CustomSizedSlabs) {
361  void *Ptr = PtrAndSize.first;
362  size_t Size = PtrAndSize.second;
363  AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
364  }
365  }
366 
367  template <typename T> friend class SpecificBumpPtrAllocator;
368 };
369 
370 /// The standard BumpPtrAllocator which just uses the default template
371 /// parameters.
373 
374 /// A BumpPtrAllocator that allows only elements of a specific type to be
375 /// allocated.
376 ///
377 /// This allows calling the destructor in DestroyAll() and when the allocator is
378 /// destroyed.
379 template <typename T> class SpecificBumpPtrAllocator {
380  BumpPtrAllocator Allocator;
381 
382 public:
384  // Because SpecificBumpPtrAllocator walks the memory to call destructors,
385  // it can't have red zones between allocations.
386  Allocator.setRedZoneSize(0);
387  }
389  : Allocator(std::move(Old.Allocator)) {}
391 
393  Allocator = std::move(RHS.Allocator);
394  return *this;
395  }
396 
397  /// Call the destructor of each allocated object and deallocate all but the
398  /// current slab and reset the current pointer to the beginning of it, freeing
399  /// all memory allocated so far.
400  void DestroyAll() {
401  auto DestroyElements = [](char *Begin, char *End) {
402  assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()));
403  for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
404  reinterpret_cast<T *>(Ptr)->~T();
405  };
406 
407  for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
408  ++I) {
409  size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
410  std::distance(Allocator.Slabs.begin(), I));
411  char *Begin = (char *)alignAddr(*I, Align::Of<T>());
412  char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
413  : (char *)*I + AllocatedSlabSize;
414 
415  DestroyElements(Begin, End);
416  }
417 
418  for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
419  void *Ptr = PtrAndSize.first;
420  size_t Size = PtrAndSize.second;
421  DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
422  (char *)Ptr + Size);
423  }
424 
425  Allocator.Reset();
426  }
427 
428  /// Allocate space for an array of objects without constructing them.
429  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
430 };
431 
432 } // end namespace llvm
433 
434 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
435  size_t GrowthDelay>
436 void *
437 operator new(size_t Size,
438  llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
439  GrowthDelay> &Allocator) {
440  return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
441  alignof(std::max_align_t)));
442 }
443 
444 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
445  size_t GrowthDelay>
446 void operator delete(void *,
447  llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
448  SizeThreshold, GrowthDelay> &) {
449 }
450 
451 #endif // LLVM_SUPPORT_ALLOCATOR_H
llvm::Check::Size
@ Size
Definition: FileCheck.h:77
llvm::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator()
Definition: Allocator.h:383
MathExtras.h
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
__asan_poison_memory_region
#define __asan_poison_memory_region(p, size)
Definition: Compiler.h:432
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:724
Optional.h
llvm::BumpPtrAllocatorImpl::Reset
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Definition: Allocator.h:121
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition: Allocator.h:84
llvm::SpecificBumpPtrAllocator::operator=
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition: Allocator.h:392
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::SmallVector< void *, 4 >
llvm::detail::printBumpPtrAllocatorStats
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
Definition: Allocator.cpp:20
llvm::alignAddr
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Definition: Alignment.h:188
llvm::SpecificBumpPtrAllocator
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:379
llvm::Optional< int64_t >
llvm::BumpPtrAllocator
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition: Allocator.h:372
T
#define T
Definition: Mips16ISelLowering.cpp:341
__asan_unpoison_memory_region
#define __asan_unpoison_memory_region(p, size)
Definition: Compiler.h:433
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
size_t
llvm::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition: Allocator.h:388
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::BumpPtrAllocatorImpl::~BumpPtrAllocatorImpl
~BumpPtrAllocatorImpl()
Definition: Allocator.h:95
llvm::BumpPtrAllocatorImpl::identifyObject
llvm::Optional< int64_t > identifyObject(const void *Ptr)
Definition: Allocator.h:230
llvm::NextPowerOf2
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:710
llvm::BumpPtrAllocatorImpl::getBytesAllocated
size_t getBytesAllocated() const
Definition: Allocator.h:288
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::None
const NoneType None
Definition: None.h:24
AllocatorBase.h
llvm::BumpPtrAllocatorImpl::identifyKnownObject
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
Definition: Allocator.h:256
llvm::BumpPtrAllocatorImpl::identifyKnownAlignedObject
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
Definition: Allocator.h:273
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:63
LLVM_ATTRIBUTE_RETURNS_NONNULL
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
Definition: Compiler.h:257
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
I
#define I(x, y, z)
Definition: MD5.cpp:58
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1675
llvm::offsetToAlignedAddr
uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment)
Returns the necessary adjustment for aligning Addr to Alignment bytes, rounding up.
Definition: Alignment.h:204
llvm::BumpPtrAllocatorImpl::getTotalMemory
size_t getTotalMemory() const
Definition: Allocator.h:279
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
Compiler.h
llvm::BumpPtrAllocatorImpl::operator=
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition: Allocator.h:100
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl()=default
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::SpecificBumpPtrAllocator::Allocate
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition: Allocator.h:429
llvm::SpecificBumpPtrAllocator::~SpecificBumpPtrAllocator
~SpecificBumpPtrAllocator()
Definition: Allocator.h:390
Alignment.h
std
Definition: BitVector.h:851
llvm::BumpPtrAllocatorImpl::Allocate
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
Definition: Allocator.h:205
llvm::BumpPtrAllocatorImpl::GetNumSlabs
size_t GetNumSlabs() const
Definition: Allocator.h:223
llvm::BumpPtrAllocatorImpl::Allocate
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
Definition: Allocator.h:146
__msan_allocated_memory
#define __msan_allocated_memory(p, size)
Definition: Compiler.h:407
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:591
SmallVector.h
llvm::BumpPtrAllocatorImpl::setRedZoneSize
void setRedZoneSize(size_t NewSize)
Definition: Allocator.h:290
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:142
llvm::SmallVectorImpl< void * >::iterator
typename SuperClass::iterator iterator
Definition: SmallVector.h:558
llvm::BumpPtrAllocatorImpl::PrintStats
void PrintStats() const
Definition: Allocator.h:294
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(T &&Allocator)
Definition: Allocator.h:79
llvm::AllocatorBase
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Definition: AllocatorBase.h:34
llvm::BumpPtrAllocatorImpl::Deallocate
void Deallocate(const void *Ptr, size_t Size, size_t)
Definition: Allocator.h:216
llvm::SpecificBumpPtrAllocator::DestroyAll
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
Definition: Allocator.h:400