LLVM 20.0.0git
SectionMemoryManager.cpp
Go to the documentation of this file.
1//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the section-based memory manager used by the MCJIT
10// execution engine and RuntimeDyld
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/Config/config.h"
18
19namespace llvm {
20
22 unsigned Alignment,
23 unsigned SectionID,
25 bool IsReadOnly) {
26 if (IsReadOnly)
28 Size, Alignment);
30 Alignment);
31}
32
34 unsigned Alignment,
35 unsigned SectionID,
38 Alignment);
39}
40
41uint8_t *SectionMemoryManager::allocateSection(
43 unsigned Alignment) {
44 if (!Alignment)
45 Alignment = 16;
46
47 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
48
49 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
50 uintptr_t Addr = 0;
51
52 MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
53 switch (Purpose) {
55 return CodeMem;
57 return RODataMem;
59 return RWDataMem;
60 }
61 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
62 }();
63
64 // Look in the list of free memory regions and use a block there if one
65 // is available.
66 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
67 if (FreeMB.Free.allocatedSize() >= RequiredSize) {
68 Addr = (uintptr_t)FreeMB.Free.base();
69 uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
70 // Align the address.
71 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
72
73 if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
74 // The part of the block we're giving out to the user is now pending
75 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
76
77 // Remember this pending block, such that future allocations can just
78 // modify it rather than creating a new one
79 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
80 } else {
81 sys::MemoryBlock &PendingMB =
82 MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
83 PendingMB = sys::MemoryBlock(PendingMB.base(),
84 Addr + Size - (uintptr_t)PendingMB.base());
85 }
86
87 // Remember how much free space is now left in this block
88 FreeMB.Free =
89 sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
90 return (uint8_t *)Addr;
91 }
92 }
93
94 // No pre-allocated free block was large enough. Allocate a new memory region.
95 // Note that all sections get allocated as read-write. The permissions will
96 // be updated later based on memory group.
97 //
98 // FIXME: It would be useful to define a default allocation size (or add
99 // it as a constructor parameter) to minimize the number of allocations.
100 //
101 // FIXME: Initialize the Near member for each memory group to avoid
102 // interleaving.
103 std::error_code ec;
104 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
105 Purpose, RequiredSize, &MemGroup.Near,
107 if (ec) {
108 // FIXME: Add error propagation to the interface.
109 return nullptr;
110 }
111
112 // Save this address as the basis for our next request
113 MemGroup.Near = MB;
114
115 // Copy the address to all the other groups, if they have not
116 // been initialized.
117 if (CodeMem.Near.base() == nullptr)
118 CodeMem.Near = MB;
119 if (RODataMem.Near.base() == nullptr)
120 RODataMem.Near = MB;
121 if (RWDataMem.Near.base() == nullptr)
122 RWDataMem.Near = MB;
123
124 // Remember that we allocated this memory
125 MemGroup.AllocatedMem.push_back(MB);
126 Addr = (uintptr_t)MB.base();
127 uintptr_t EndOfBlock = Addr + MB.allocatedSize();
128
129 // Align the address.
130 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
131
132 // The part of the block we're giving out to the user is now pending
133 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
134
135 // The allocateMappedMemory may allocate much more memory than we need. In
136 // this case, we store the unused memory as a free memory block.
137 unsigned FreeSize = EndOfBlock - Addr - Size;
138 if (FreeSize > 16) {
139 FreeMemBlock FreeMB;
140 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
141 FreeMB.PendingPrefixIndex = (unsigned)-1;
142 MemGroup.FreeMem.push_back(FreeMB);
143 }
144
145 // Return aligned address
146 return (uint8_t *)Addr;
147}
148
149bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
150 // FIXME: Should in-progress permissions be reverted if an error occurs?
151 std::error_code ec;
152
153 // Make code memory executable.
154 ec = applyMemoryGroupPermissions(CodeMem,
156 if (ec) {
157 if (ErrMsg) {
158 *ErrMsg = ec.message();
159 }
160 return true;
161 }
162
163 // Make read-only data memory read-only.
164 ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
165 if (ec) {
166 if (ErrMsg) {
167 *ErrMsg = ec.message();
168 }
169 return true;
170 }
171
172 // Read-write data memory already has the correct permissions
173
174 // Some platforms with separate data cache and instruction cache require
175 // explicit cache flush, otherwise JIT code manipulations (like resolved
176 // relocations) will get to the data cache but not to the instruction cache.
178
179 return false;
180}
181
183 static const size_t PageSize = sys::Process::getPageSizeEstimate();
184
185 size_t StartOverlap =
186 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
187
188 size_t TrimmedSize = M.allocatedSize();
189 TrimmedSize -= StartOverlap;
190 TrimmedSize -= TrimmedSize % PageSize;
191
192 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
193 TrimmedSize);
194
195 assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
196 assert((Trimmed.allocatedSize() % PageSize) == 0);
197 assert(M.base() <= Trimmed.base() &&
198 Trimmed.allocatedSize() <= M.allocatedSize());
199
200 return Trimmed;
201}
202
203std::error_code
204SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
205 unsigned Permissions) {
206 for (sys::MemoryBlock &MB : MemGroup.PendingMem)
207 if (std::error_code EC = MMapper->protectMappedMemory(MB, Permissions))
208 return EC;
209
210 MemGroup.PendingMem.clear();
211
212 // Now go through free blocks and trim any of them that don't span the entire
213 // page because one of the pending blocks may have overlapped it.
214 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
215 FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
216 // We cleared the PendingMem list, so all these pointers are now invalid
217 FreeMB.PendingPrefixIndex = (unsigned)-1;
218 }
219
220 // Remove all blocks which are now empty
221 erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
222 return FreeMB.Free.allocatedSize() == 0;
223 });
224
225 return std::error_code();
226}
227
229 for (sys::MemoryBlock &Block : CodeMem.PendingMem)
231 Block.allocatedSize());
232}
233
235 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
236 for (sys::MemoryBlock &Block : Group->AllocatedMem)
237 MMapper->releaseMappedMemory(Block);
238 }
239}
240
242
243void SectionMemoryManager::anchor() {}
244
245namespace {
246// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
247// into sys::Memory.
248class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
249public:
250 sys::MemoryBlock
251 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
252 size_t NumBytes, const sys::MemoryBlock *const NearBlock,
253 unsigned Flags, std::error_code &EC) override {
254 return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
255 }
256
257 std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
258 unsigned Flags) override {
260 }
261
262 std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
264 }
265};
266} // namespace
267
269 : MMapper(UnownedMM), OwnedMMapper(nullptr) {
270 if (!MMapper) {
271 OwnedMMapper = std::make_unique<DefaultMMapper>();
272 MMapper = OwnedMMapper.get();
273 }
274}
275
276} // namespace llvm
uint64_t Addr
uint64_t Size
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
Provides a library for accessing information about this process and other processes on the operating ...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Implementations of this interface are used by SectionMemoryManager to request pages from the operatin...
virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block, unsigned Flags)=0
This method sets the protection flags for a block of memory to the state specified by Flags.
virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M)=0
This method releases a block of memory that was allocated with the allocateMappedMemory method.
virtual sys::MemoryBlock allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes, const sys::MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)=0
This method attempts to allocate NumBytes bytes of virtual memory for Purpose.
AllocationPurpose
This enum describes the various reasons to allocate pages from allocateMappedMemory.
SectionMemoryManager(MemoryMapper *MM=nullptr)
Creates a SectionMemoryManager instance with MM as the associated memory mapper.
virtual void invalidateInstructionCache()
Invalidate instruction cache for code sections.
uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool isReadOnly) override
Allocates a memory block of (at least) the given size suitable for executable code.
uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override
Allocates a memory block of (at least) the given size suitable for executable code.
bool finalizeMemory(std::string *ErrMsg=nullptr) override
Update section-specific memory permissions and other attributes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This class encapsulates the notion of a memory block which has an address and a size.
Definition: Memory.h:31
void * base() const
Definition: Memory.h:36
size_t allocatedSize() const
The size as it was allocated.
Definition: Memory.h:39
static std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method.
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e....
static void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
static std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags.
static unsigned getPageSizeEstimate()
Get the process's estimated page size.
Definition: Process.h:61
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2082