21 #ifndef LLVM_SUPPORT_ALLOCATOR_H
22 #define LLVM_SUPPORT_ALLOCATOR_H
33 #include <type_traits>
48 void *
Allocate(
size_t Size,
size_t Alignment) {
50 static_assert(
static_cast<void *(
AllocatorBase::*)(
size_t,
size_t)
>(
52 static_cast<void *(DerivedT::*)(
size_t,
size_t)
>(
54 "Class derives from AllocatorBase without implementing the "
55 "core Allocate(size_t, size_t) overload!");
57 return static_cast<DerivedT *
>(
this)->
Allocate(Size, Alignment);
64 static_assert(
static_cast<void (
AllocatorBase::*)(
const void *,
size_t)
>(
66 static_cast<void (DerivedT::*)(
const void *,
size_t)
>(
67 &DerivedT::Deallocate),
68 "Class derives from AllocatorBase without implementing the "
69 "core Deallocate(void *) overload!");
71 return static_cast<DerivedT *
>(
this)->
Deallocate(Ptr, Size);
79 return static_cast<T *
>(
Allocate(Num *
sizeof(
T),
alignof(
T)));
84 typename std::enable_if<
85 !std::is_same<typename std::remove_cv<T>::type,
void>::value,
void>::type
87 Deallocate(static_cast<const void *>(Ptr), Num *
sizeof(
T));
104 free(const_cast<void *>(Ptr));
136 template <
typename AllocatorT = MallocAllocator,
size_t SlabSize = 4096,
137 size_t SizeThreshold = SlabSize>
140 BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
142 static_assert(SizeThreshold <= SlabSize,
143 "The SizeThreshold must be at most the SlabSize to ensure "
144 "that objects larger than a slab go into their own memory "
148 : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {}
149 template <
typename T>
151 : CurPtr(nullptr), End(nullptr), BytesAllocated(0),
152 Allocator(std::forward<
T &&>(Allocator)) {}
157 : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
158 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
159 BytesAllocated(Old.BytesAllocated),
160 Allocator(std::move(Old.Allocator)) {
161 Old.CurPtr = Old.End =
nullptr;
162 Old.BytesAllocated = 0;
164 Old.CustomSizedSlabs.clear();
168 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
169 DeallocateCustomSizedSlabs();
173 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
174 DeallocateCustomSizedSlabs();
178 BytesAllocated = RHS.BytesAllocated;
179 Slabs = std::move(RHS.Slabs);
180 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
181 Allocator = std::move(RHS.Allocator);
183 RHS.CurPtr = RHS.End =
nullptr;
184 RHS.BytesAllocated = 0;
186 RHS.CustomSizedSlabs.clear();
194 DeallocateCustomSizedSlabs();
195 CustomSizedSlabs.
clear();
202 CurPtr = (
char *)Slabs.
front();
203 End = CurPtr + SlabSize;
206 DeallocateSlabs(std::next(Slabs.
begin()), Slabs.
end());
213 assert(Alignment > 0 &&
"0-byte alignnment is not allowed. Use 1 instead.");
216 BytesAllocated += Size;
219 assert(Adjustment + Size >= Size &&
"Adjustment + Size must not overflow");
222 if (Adjustment + Size <=
size_t(End - CurPtr)) {
223 char *AlignedPtr = CurPtr + Adjustment;
224 CurPtr = AlignedPtr + Size;
235 size_t PaddedSize = Size + Alignment - 1;
236 if (PaddedSize > SizeThreshold) {
237 void *NewSlab = Allocator.Allocate(PaddedSize, 0);
241 CustomSizedSlabs.
push_back(std::make_pair(NewSlab, PaddedSize));
243 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
244 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
245 char *AlignedPtr = (
char*)AlignedAddr;
253 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
254 assert(AlignedAddr + Size <= (uintptr_t)End &&
255 "Unable to allocate memory!");
256 char *AlignedPtr = (
char*)AlignedAddr;
257 CurPtr = AlignedPtr + Size;
276 size_t TotalMemory = 0;
278 TotalMemory += computeSlabSize(std::distance(Slabs.
begin(),
I));
279 for (
auto &PtrAndSize : CustomSizedSlabs)
280 TotalMemory += PtrAndSize.second;
309 size_t BytesAllocated;
312 AllocatorT Allocator;
314 static size_t computeSlabSize(
unsigned SlabIdx) {
319 return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
324 void StartNewSlab() {
325 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
327 void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
333 CurPtr = (
char *)(NewSlab);
334 End = ((
char *)NewSlab) + AllocatedSlabSize;
340 for (; I !=
E; ++
I) {
341 size_t AllocatedSlabSize =
342 computeSlabSize(std::distance(Slabs.
begin(),
I));
343 Allocator.Deallocate(*I, AllocatedSlabSize);
348 void DeallocateCustomSizedSlabs() {
349 for (
auto &PtrAndSize : CustomSizedSlabs) {
350 void *
Ptr = PtrAndSize.first;
351 size_t Size = PtrAndSize.second;
352 Allocator.Deallocate(Ptr, Size);
374 : Allocator(std::move(Old.Allocator)) {}
378 Allocator = std::move(RHS.Allocator);
386 auto DestroyElements = [](
char *Begin,
char *
End) {
389 reinterpret_cast<T *
>(
Ptr)->~
T();
392 for (
auto I = Allocator.Slabs.
begin(), E = Allocator.Slabs.
end(); I !=
E;
394 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
395 std::distance(Allocator.Slabs.
begin(),
I));
396 char *Begin = (
char *)
alignAddr(*I,
alignof(
T));
397 char *
End = *I == Allocator.Slabs.
back() ? Allocator.CurPtr
398 : (
char *)*I + AllocatedSlabSize;
400 DestroyElements(Begin, End);
403 for (
auto &PtrAndSize : Allocator.CustomSizedSlabs) {
404 void *
Ptr = PtrAndSize.first;
405 size_t Size = PtrAndSize.second;
406 DestroyElements((
char *)
alignAddr(Ptr,
alignof(
T)), (
char *)Ptr + Size);
418 template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold>
419 void *
operator new(
size_t Size,
435 template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold>
436 void operator delete(
440 #endif // LLVM_SUPPORT_ALLOCATOR_H
SuperClass::iterator iterator
void push_back(const T &Elt)
#define __msan_allocated_memory(p, size)
size_t getBytesAllocated() const
void Deallocate(const void *Ptr, size_t Size)
Deallocate Ptr to Size bytes of memory allocated by this allocator.
size_t getTotalMemory() const
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
void * Allocate(size_t Size, size_t Alignment)
Allocate Size bytes of Alignment aligned memory.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it...
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
size_t GetNumSlabs() const
void Deallocate(const void *Ptr, size_t Size)
LLVM_NODISCARD bool empty() const
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
SpecificBumpPtrAllocator()=default
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template paramaters.
#define __asan_unpoison_memory_region(p, size)
Allocate memory in an ever growing pool, as if by bump-pointer.
size_t alignmentAdjustment(const void *Ptr, size_t Alignment)
Returns the necessary adjustment for aligning Ptr to Alignment bytes, rounding up.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
Greedy Register Allocator
static const unsigned End
void Deallocate(const void *Ptr, size_t)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
~SpecificBumpPtrAllocator()
uint64_t NextPowerOf2(uint64_t A)
NextPowerOf2 - Returns the next power of two (in 64-bits) that is strictly greater than A...
iterator erase(const_iterator CI)
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a pointer that does not alias any...
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Aligns Addr to Alignment bytes, rounding up.
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t)
T * Allocate(size_t Num=1)
Allocate space for a sequence of objects without constructing them.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
std::enable_if< !std::is_same< typename std::remove_cv< T >::type, void >::value, void >::type Deallocate(T *Ptr, size_t Num=1)
Deallocate space for a sequence of objects without constructing them.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define __asan_poison_memory_region(p, size)
BumpPtrAllocatorImpl(T &&Allocator)
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators...