17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
61template <
typename AllocatorT = MallocAllocator,
size_t SlabSize = 4096,
62 size_t SizeThreshold = SlabSize,
size_t GrowthDelay = 128>
64 :
public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
65 SizeThreshold, GrowthDelay>>,
70 static_assert(SizeThreshold <= SlabSize,
71 "The SizeThreshold must be at most the SlabSize to ensure "
72 "that objects larger than a slab go into their own memory "
74 static_assert(GrowthDelay > 0,
75 "GrowthDelay must be at least 1 which already increases the"
76 "slab size after each allocated slab.");
88 End(Old.End), Slabs(
std::
move(Old.Slabs)),
89 CustomSizedSlabs(
std::
move(Old.CustomSizedSlabs)),
90 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
91 Old.CurPtr = Old.End =
nullptr;
92 Old.BytesAllocated = 0;
94 Old.CustomSizedSlabs.clear();
98 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
99 DeallocateCustomSizedSlabs();
103 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
104 DeallocateCustomSizedSlabs();
108 BytesAllocated =
RHS.BytesAllocated;
109 RedZoneSize =
RHS.RedZoneSize;
110 Slabs = std::move(
RHS.Slabs);
111 CustomSizedSlabs = std::move(
RHS.CustomSizedSlabs);
112 AllocTy::operator=(std::move(
RHS.getAllocator()));
114 RHS.CurPtr =
RHS.End =
nullptr;
115 RHS.BytesAllocated = 0;
117 RHS.CustomSizedSlabs.clear();
125 DeallocateCustomSizedSlabs();
126 CustomSizedSlabs.
clear();
133 CurPtr = (
char *)Slabs.
front();
134 End = CurPtr + SlabSize;
137 DeallocateSlabs(std::next(Slabs.
begin()), Slabs.
end());
150 BytesAllocated +=
Size;
153 assert(Adjustment +
Size >=
Size &&
"Adjustment + Size must not overflow");
155 size_t SizeToAllocate =
Size;
156#if LLVM_ADDRESS_SANITIZER_BUILD
158 SizeToAllocate += RedZoneSize;
162 if (Adjustment + SizeToAllocate <=
size_t(End - CurPtr)
164 && CurPtr !=
nullptr) {
165 char *AlignedPtr = CurPtr + Adjustment;
166 CurPtr = AlignedPtr + SizeToAllocate;
177 size_t PaddedSize = SizeToAllocate + Alignment.
value() - 1;
178 if (PaddedSize > SizeThreshold) {
180 this->
getAllocator().Allocate(PaddedSize,
alignof(std::max_align_t));
184 CustomSizedSlabs.
push_back(std::make_pair(NewSlab, PaddedSize));
186 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
187 assert(AlignedAddr +
Size <= (uintptr_t)NewSlab + PaddedSize);
188 char *AlignedPtr = (
char*)AlignedAddr;
196 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
197 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
198 "Unable to allocate memory!");
199 char *AlignedPtr = (
char*)AlignedAddr;
200 CurPtr = AlignedPtr + SizeToAllocate;
208 assert(Alignment > 0 &&
"0-byte alignment is not allowed. Use 1 instead.");
233 const char *
P =
static_cast<const char *
>(
Ptr);
234 int64_t InSlabIdx = 0;
236 const char *S =
static_cast<const char *
>(Slabs[
Idx]);
237 if (
P >= S &&
P < S + computeSlabSize(
Idx))
238 return InSlabIdx +
static_cast<int64_t
>(
P - S);
239 InSlabIdx +=
static_cast<int64_t
>(computeSlabSize(
Idx));
243 int64_t InCustomSizedSlabIdx = -1;
245 const char *S =
static_cast<const char *
>(CustomSizedSlabs[
Idx].first);
246 size_t Size = CustomSizedSlabs[
Idx].second;
247 if (
P >= S &&
P < S +
Size)
248 return InCustomSizedSlabIdx -
static_cast<int64_t
>(
P - S);
249 InCustomSizedSlabIdx -=
static_cast<int64_t
>(
Size);
260 assert(Out &&
"Wrong allocator used");
274 template <
typename T>
277 assert(Out %
alignof(
T) == 0 &&
"Wrong alignment information");
278 return Out /
alignof(
T);
282 size_t TotalMemory = 0;
284 TotalMemory += computeSlabSize(std::distance(Slabs.
begin(),
I));
285 for (
const auto &PtrAndSize : CustomSizedSlabs)
286 TotalMemory += PtrAndSize.second;
293 RedZoneSize = NewSize;
305 char *CurPtr =
nullptr;
319 size_t BytesAllocated = 0;
323 size_t RedZoneSize = 1;
325 static size_t computeSlabSize(
unsigned SlabIdx) {
331 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
336 void StartNewSlab() {
337 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
339 void *NewSlab = this->
getAllocator().Allocate(AllocatedSlabSize,
340 alignof(std::max_align_t));
346 CurPtr = (
char *)(NewSlab);
347 End = ((
char *)NewSlab) + AllocatedSlabSize;
351 void DeallocateSlabs(SmallVectorImpl<void *>::iterator
I,
352 SmallVectorImpl<void *>::iterator
E) {
353 for (;
I !=
E; ++
I) {
354 size_t AllocatedSlabSize =
355 computeSlabSize(std::distance(Slabs.
begin(),
I));
357 alignof(std::max_align_t));
362 void DeallocateCustomSizedSlabs() {
363 for (
auto &PtrAndSize : CustomSizedSlabs) {
364 void *
Ptr = PtrAndSize.first;
365 size_t Size = PtrAndSize.second;
404 auto DestroyElements = [](
char *Begin,
char *
End) {
407 reinterpret_cast<T *
>(
Ptr)->~
T();
412 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
414 char *Begin = (
char *)
alignAddr(*
I, Align::Of<T>());
416 : (
char *)*
I + AllocatedSlabSize;
418 DestroyElements(Begin,
End);
421 for (
auto &PtrAndSize :
Allocator.CustomSizedSlabs) {
422 void *
Ptr = PtrAndSize.first;
423 size_t Size = PtrAndSize.second;
437template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
444 alignof(std::max_align_t)));
447template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
449void operator delete(
void *,
451 SizeThreshold, GrowthDelay> &) {
This file defines MallocAllocator.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define __asan_poison_memory_region(p, size)
#define __asan_unpoison_memory_region(p, size)
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#define __msan_allocated_memory(p, size)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Allocate memory in an ever growing pool, as if by bump-pointer.
size_t GetNumSlabs() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
void setRedZoneSize(size_t NewSize)
std::optional< int64_t > identifyObject(const void *Ptr)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
BumpPtrAllocatorImpl()=default
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
size_t getBytesAllocated() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
void Deallocate(const void *Ptr, size_t Size, size_t)
BumpPtrAllocatorImpl(T &&Allocator)
size_t getTotalMemory() const
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
~SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
This is an optimization pass for GlobalISel generic memory operations.
uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment)
Returns the necessary adjustment for aligning Addr to Alignment bytes, rounding up.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.