17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
61template <
typename AllocatorT = MallocAllocator,
size_t SlabSize = 4096,
62 size_t SizeThreshold = SlabSize,
size_t GrowthDelay = 128>
64 :
public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
65 SizeThreshold, GrowthDelay>>,
70 static_assert(SizeThreshold <= SlabSize,
71 "The SizeThreshold must be at most the SlabSize to ensure "
72 "that objects larger than a slab go into their own memory "
74 static_assert(GrowthDelay > 0,
75 "GrowthDelay must be at least 1 which already increases the"
76 "slab size after each allocated slab.");
88 End(Old.End), Slabs(
std::
move(Old.Slabs)),
89 CustomSizedSlabs(
std::
move(Old.CustomSizedSlabs)),
90 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
91 Old.CurPtr = Old.End =
nullptr;
92 Old.BytesAllocated = 0;
94 Old.CustomSizedSlabs.clear();
98 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
99 DeallocateCustomSizedSlabs();
103 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
104 DeallocateCustomSizedSlabs();
108 BytesAllocated =
RHS.BytesAllocated;
109 RedZoneSize =
RHS.RedZoneSize;
110 Slabs = std::move(
RHS.Slabs);
111 CustomSizedSlabs = std::move(
RHS.CustomSizedSlabs);
112 AllocTy::operator=(std::move(
RHS.getAllocator()));
114 RHS.CurPtr =
RHS.End =
nullptr;
115 RHS.BytesAllocated = 0;
117 RHS.CustomSizedSlabs.clear();
125 DeallocateCustomSizedSlabs();
126 CustomSizedSlabs.
clear();
133 CurPtr = (
char *)Slabs.
front();
134 End = CurPtr + SlabSize;
137 DeallocateSlabs(std::next(Slabs.
begin()), Slabs.
end());
150 BytesAllocated +=
Size;
152 uintptr_t AlignedPtr =
alignAddr(CurPtr, Alignment);
154 size_t SizeToAllocate =
Size;
155#if LLVM_ADDRESS_SANITIZER_BUILD
157 SizeToAllocate += RedZoneSize;
160 uintptr_t AllocEndPtr = AlignedPtr + SizeToAllocate;
161 assert(AllocEndPtr >= uintptr_t(CurPtr) &&
162 "Alignment + Size must not overflow");
167 && CurPtr !=
nullptr)) {
168 CurPtr =
reinterpret_cast<char *
>(AllocEndPtr);
175 return reinterpret_cast<char *
>(AlignedPtr);
184 size_t PaddedSize = SizeToAllocate + Alignment.
value() - 1;
185 if (PaddedSize > SizeThreshold) {
187 this->
getAllocator().Allocate(PaddedSize,
alignof(std::max_align_t));
191 CustomSizedSlabs.
push_back(std::make_pair(NewSlab, PaddedSize));
193 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
194 assert(AlignedAddr +
Size <= (uintptr_t)NewSlab + PaddedSize);
195 char *AlignedPtr = (
char*)AlignedAddr;
203 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
204 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
205 "Unable to allocate memory!");
206 char *AlignedPtr = (
char*)AlignedAddr;
207 CurPtr = AlignedPtr + SizeToAllocate;
215 assert(Alignment > 0 &&
"0-byte alignment is not allowed. Use 1 instead.");
240 const char *
P =
static_cast<const char *
>(
Ptr);
241 int64_t InSlabIdx = 0;
243 const char *S =
static_cast<const char *
>(Slabs[
Idx]);
244 if (
P >= S &&
P < S + computeSlabSize(
Idx))
245 return InSlabIdx +
static_cast<int64_t
>(
P - S);
246 InSlabIdx +=
static_cast<int64_t
>(computeSlabSize(
Idx));
250 int64_t InCustomSizedSlabIdx = -1;
252 const char *S =
static_cast<const char *
>(CustomSizedSlabs[
Idx].first);
253 size_t Size = CustomSizedSlabs[
Idx].second;
254 if (
P >= S &&
P < S +
Size)
255 return InCustomSizedSlabIdx -
static_cast<int64_t
>(
P - S);
256 InCustomSizedSlabIdx -=
static_cast<int64_t
>(
Size);
267 assert(Out &&
"Wrong allocator used");
281 template <
typename T>
284 assert(Out %
alignof(
T) == 0 &&
"Wrong alignment information");
285 return Out /
alignof(
T);
289 size_t TotalMemory = 0;
291 TotalMemory += computeSlabSize(std::distance(Slabs.
begin(),
I));
292 for (
const auto &PtrAndSize : CustomSizedSlabs)
293 TotalMemory += PtrAndSize.second;
300 RedZoneSize = NewSize;
312 char *CurPtr =
nullptr;
326 size_t BytesAllocated = 0;
330 size_t RedZoneSize = 1;
332 static size_t computeSlabSize(
unsigned SlabIdx) {
338 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
343 void StartNewSlab() {
344 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
346 void *NewSlab = this->
getAllocator().Allocate(AllocatedSlabSize,
347 alignof(std::max_align_t));
353 CurPtr = (
char *)(NewSlab);
354 End = ((
char *)NewSlab) + AllocatedSlabSize;
358 void DeallocateSlabs(SmallVectorImpl<void *>::iterator
I,
359 SmallVectorImpl<void *>::iterator
E) {
360 for (;
I !=
E; ++
I) {
361 size_t AllocatedSlabSize =
362 computeSlabSize(std::distance(Slabs.
begin(),
I));
364 alignof(std::max_align_t));
369 void DeallocateCustomSizedSlabs() {
370 for (
auto &PtrAndSize : CustomSizedSlabs) {
371 void *
Ptr = PtrAndSize.first;
372 size_t Size = PtrAndSize.second;
411 auto DestroyElements = [](
char *Begin,
char *
End) {
414 reinterpret_cast<T *
>(
Ptr)->~
T();
419 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
421 char *Begin = (
char *)
alignAddr(*
I, Align::Of<T>());
423 : (
char *)*
I + AllocatedSlabSize;
425 DestroyElements(Begin,
End);
428 for (
auto &PtrAndSize :
Allocator.CustomSizedSlabs) {
429 void *
Ptr = PtrAndSize.first;
430 size_t Size = PtrAndSize.second;
451template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
458 alignof(std::max_align_t)));
461template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
463void operator delete(
void *,
465 SizeThreshold, GrowthDelay> &) {
This file defines MallocAllocator.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define __asan_poison_memory_region(p, size)
#define __asan_unpoison_memory_region(p, size)
#define LLVM_ATTRIBUTE_NOINLINE
LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so, mark a method "not for inl...
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#define __msan_allocated_memory(p, size)
#define LLVM_LIKELY(EXPR)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Allocate memory in an ever growing pool, as if by bump-pointer.
size_t GetNumSlabs() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
void setRedZoneSize(size_t NewSize)
std::optional< int64_t > identifyObject(const void *Ptr)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
BumpPtrAllocatorImpl()=default
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
size_t getBytesAllocated() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
void Deallocate(const void *Ptr, size_t Size, size_t)
BumpPtrAllocatorImpl(T &&Allocator)
size_t getTotalMemory() const
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
~SpecificBumpPtrAllocator()
std::optional< int64_t > identifyObject(const void *Ptr)
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
This is an optimization pass for GlobalISel generic memory operations.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.