17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
61template <
typename AllocatorT = MallocAllocator,
size_t SlabSize = 4096,
62 size_t SizeThreshold = SlabSize,
size_t GrowthDelay = 128>
64 :
public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
65 SizeThreshold, GrowthDelay>>,
70 static_assert(SizeThreshold <= SlabSize,
71 "The SizeThreshold must be at most the SlabSize to ensure "
72 "that objects larger than a slab go into their own memory "
74 static_assert(GrowthDelay > 0,
75 "GrowthDelay must be at least 1 which already increases the"
76 "slab size after each allocated slab.");
88 End(Old.End), Slabs(
std::
move(Old.Slabs)),
89 CustomSizedSlabs(
std::
move(Old.CustomSizedSlabs)),
90 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
91 Old.CurPtr = Old.End =
nullptr;
92 Old.BytesAllocated = 0;
94 Old.CustomSizedSlabs.clear();
98 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
99 DeallocateCustomSizedSlabs();
103 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
104 DeallocateCustomSizedSlabs();
108 BytesAllocated =
RHS.BytesAllocated;
109 RedZoneSize =
RHS.RedZoneSize;
110 Slabs = std::move(
RHS.Slabs);
111 CustomSizedSlabs = std::move(
RHS.CustomSizedSlabs);
112 AllocTy::operator=(std::move(
RHS.getAllocator()));
114 RHS.CurPtr =
RHS.End =
nullptr;
115 RHS.BytesAllocated = 0;
117 RHS.CustomSizedSlabs.clear();
125 DeallocateCustomSizedSlabs();
126 CustomSizedSlabs.
clear();
133 CurPtr = (
char *)Slabs.
front();
134 End = CurPtr + SlabSize;
137 DeallocateSlabs(std::next(Slabs.
begin()), Slabs.
end());
150 BytesAllocated +=
Size;
153 assert(Adjustment +
Size >=
Size &&
"Adjustment + Size must not overflow");
155 size_t SizeToAllocate =
Size;
156#if LLVM_ADDRESS_SANITIZER_BUILD
158 SizeToAllocate += RedZoneSize;
162 if (
LLVM_LIKELY(Adjustment + SizeToAllocate <=
size_t(End - CurPtr)
164 && CurPtr !=
nullptr)) {
165 char *AlignedPtr = CurPtr + Adjustment;
166 CurPtr = AlignedPtr + SizeToAllocate;
182 size_t PaddedSize = SizeToAllocate + Alignment.
value() - 1;
183 if (PaddedSize > SizeThreshold) {
185 this->
getAllocator().Allocate(PaddedSize,
alignof(std::max_align_t));
189 CustomSizedSlabs.
push_back(std::make_pair(NewSlab, PaddedSize));
191 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
192 assert(AlignedAddr +
Size <= (uintptr_t)NewSlab + PaddedSize);
193 char *AlignedPtr = (
char*)AlignedAddr;
201 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
202 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
203 "Unable to allocate memory!");
204 char *AlignedPtr = (
char*)AlignedAddr;
205 CurPtr = AlignedPtr + SizeToAllocate;
213 assert(Alignment > 0 &&
"0-byte alignment is not allowed. Use 1 instead.");
238 const char *
P =
static_cast<const char *
>(
Ptr);
239 int64_t InSlabIdx = 0;
241 const char *S =
static_cast<const char *
>(Slabs[
Idx]);
242 if (
P >= S &&
P < S + computeSlabSize(
Idx))
243 return InSlabIdx +
static_cast<int64_t
>(
P - S);
244 InSlabIdx +=
static_cast<int64_t
>(computeSlabSize(
Idx));
248 int64_t InCustomSizedSlabIdx = -1;
250 const char *S =
static_cast<const char *
>(CustomSizedSlabs[
Idx].first);
251 size_t Size = CustomSizedSlabs[
Idx].second;
252 if (
P >= S &&
P < S +
Size)
253 return InCustomSizedSlabIdx -
static_cast<int64_t
>(
P - S);
254 InCustomSizedSlabIdx -=
static_cast<int64_t
>(
Size);
265 assert(Out &&
"Wrong allocator used");
279 template <
typename T>
282 assert(Out %
alignof(
T) == 0 &&
"Wrong alignment information");
283 return Out /
alignof(
T);
287 size_t TotalMemory = 0;
289 TotalMemory += computeSlabSize(std::distance(Slabs.
begin(),
I));
290 for (
const auto &PtrAndSize : CustomSizedSlabs)
291 TotalMemory += PtrAndSize.second;
298 RedZoneSize = NewSize;
310 char *CurPtr =
nullptr;
324 size_t BytesAllocated = 0;
328 size_t RedZoneSize = 1;
330 static size_t computeSlabSize(
unsigned SlabIdx) {
336 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
341 void StartNewSlab() {
342 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
344 void *NewSlab = this->
getAllocator().Allocate(AllocatedSlabSize,
345 alignof(std::max_align_t));
351 CurPtr = (
char *)(NewSlab);
352 End = ((
char *)NewSlab) + AllocatedSlabSize;
356 void DeallocateSlabs(SmallVectorImpl<void *>::iterator
I,
357 SmallVectorImpl<void *>::iterator
E) {
358 for (;
I !=
E; ++
I) {
359 size_t AllocatedSlabSize =
360 computeSlabSize(std::distance(Slabs.
begin(),
I));
362 alignof(std::max_align_t));
367 void DeallocateCustomSizedSlabs() {
368 for (
auto &PtrAndSize : CustomSizedSlabs) {
369 void *
Ptr = PtrAndSize.first;
370 size_t Size = PtrAndSize.second;
409 auto DestroyElements = [](
char *Begin,
char *
End) {
412 reinterpret_cast<T *
>(
Ptr)->~
T();
417 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
419 char *Begin = (
char *)
alignAddr(*
I, Align::Of<T>());
421 : (
char *)*
I + AllocatedSlabSize;
423 DestroyElements(Begin,
End);
426 for (
auto &PtrAndSize :
Allocator.CustomSizedSlabs) {
427 void *
Ptr = PtrAndSize.first;
428 size_t Size = PtrAndSize.second;
442template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
449 alignof(std::max_align_t)));
452template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
454void operator delete(
void *,
456 SizeThreshold, GrowthDelay> &) {
This file defines MallocAllocator.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define __asan_poison_memory_region(p, size)
#define __asan_unpoison_memory_region(p, size)
#define LLVM_ATTRIBUTE_NOINLINE
LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so, mark a method "not for inl...
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#define __msan_allocated_memory(p, size)
#define LLVM_LIKELY(EXPR)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Allocate memory in an ever growing pool, as if by bump-pointer.
size_t GetNumSlabs() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
void setRedZoneSize(size_t NewSize)
std::optional< int64_t > identifyObject(const void *Ptr)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
BumpPtrAllocatorImpl()=default
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
size_t getBytesAllocated() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
void Deallocate(const void *Ptr, size_t Size, size_t)
BumpPtrAllocatorImpl(T &&Allocator)
size_t getTotalMemory() const
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
~SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
This is an optimization pass for GlobalISel generic memory operations.
uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment)
Returns the necessary adjustment for aligning Addr to Alignment bytes, rounding up.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.