LLVM  9.0.0svn
SectionMemoryManager.cpp
Go to the documentation of this file.
1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the section-based memory manager used by the MCJIT
10 // execution engine and RuntimeDyld
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/Config/config.h"
17 #include "llvm/Support/Process.h"
18 
19 namespace llvm {
20 
22  unsigned Alignment,
23  unsigned SectionID,
25  bool IsReadOnly) {
26  if (IsReadOnly)
28  Size, Alignment);
29  return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
30  Alignment);
31 }
32 
34  unsigned Alignment,
35  unsigned SectionID,
37  return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
38  Alignment);
39 }
40 
41 uint8_t *SectionMemoryManager::allocateSection(
43  unsigned Alignment) {
44  if (!Alignment)
45  Alignment = 16;
46 
47  assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
48 
49  uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
50  uintptr_t Addr = 0;
51 
52  MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
53  switch (Purpose) {
55  return CodeMem;
57  return RODataMem;
59  return RWDataMem;
60  }
61  llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
62  }();
63 
64  // Look in the list of free memory regions and use a block there if one
65  // is available.
66  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
67  if (FreeMB.Free.size() >= RequiredSize) {
68  Addr = (uintptr_t)FreeMB.Free.base();
69  uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
70  // Align the address.
71  Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
72 
73  if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
74  // The part of the block we're giving out to the user is now pending
75  MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
76 
77  // Remember this pending block, such that future allocations can just
78  // modify it rather than creating a new one
79  FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
80  } else {
81  sys::MemoryBlock &PendingMB =
82  MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
83  PendingMB = sys::MemoryBlock(PendingMB.base(),
84  Addr + Size - (uintptr_t)PendingMB.base());
85  }
86 
87  // Remember how much free space is now left in this block
88  FreeMB.Free =
89  sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
90  return (uint8_t *)Addr;
91  }
92  }
93 
94  // No pre-allocated free block was large enough. Allocate a new memory region.
95  // Note that all sections get allocated as read-write. The permissions will
96  // be updated later based on memory group.
97  //
98  // FIXME: It would be useful to define a default allocation size (or add
99  // it as a constructor parameter) to minimize the number of allocations.
100  //
101  // FIXME: Initialize the Near member for each memory group to avoid
102  // interleaving.
103  std::error_code ec;
105  Purpose, RequiredSize, &MemGroup.Near,
107  if (ec) {
108  // FIXME: Add error propagation to the interface.
109  return nullptr;
110  }
111 
112  // Save this address as the basis for our next request
113  MemGroup.Near = MB;
114 
115  // Remember that we allocated this memory
116  MemGroup.AllocatedMem.push_back(MB);
117  Addr = (uintptr_t)MB.base();
118  uintptr_t EndOfBlock = Addr + MB.size();
119 
120  // Align the address.
121  Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
122 
123  // The part of the block we're giving out to the user is now pending
124  MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
125 
126  // The allocateMappedMemory may allocate much more memory than we need. In
127  // this case, we store the unused memory as a free memory block.
128  unsigned FreeSize = EndOfBlock - Addr - Size;
129  if (FreeSize > 16) {
130  FreeMemBlock FreeMB;
131  FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
132  FreeMB.PendingPrefixIndex = (unsigned)-1;
133  MemGroup.FreeMem.push_back(FreeMB);
134  }
135 
136  // Return aligned address
137  return (uint8_t *)Addr;
138 }
139 
140 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
141  // FIXME: Should in-progress permissions be reverted if an error occurs?
142  std::error_code ec;
143 
144  // Make code memory executable.
145  ec = applyMemoryGroupPermissions(CodeMem,
147  if (ec) {
148  if (ErrMsg) {
149  *ErrMsg = ec.message();
150  }
151  return true;
152  }
153 
154  // Make read-only data memory read-only.
155  ec = applyMemoryGroupPermissions(RODataMem,
157  if (ec) {
158  if (ErrMsg) {
159  *ErrMsg = ec.message();
160  }
161  return true;
162  }
163 
164  // Read-write data memory already has the correct permissions
165 
166  // Some platforms with separate data cache and instruction cache require
167  // explicit cache flush, otherwise JIT code manipulations (like resolved
168  // relocations) will get to the data cache but not to the instruction cache.
170 
171  return false;
172 }
173 
175  static const size_t PageSize = sys::Process::getPageSize();
176 
177  size_t StartOverlap =
178  (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
179 
180  size_t TrimmedSize = M.size();
181  TrimmedSize -= StartOverlap;
182  TrimmedSize -= TrimmedSize % PageSize;
183 
184  sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
185  TrimmedSize);
186 
187  assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
188  assert((Trimmed.size() % PageSize) == 0);
189  assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
190 
191  return Trimmed;
192 }
193 
194 std::error_code
195 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
196  unsigned Permissions) {
197  for (sys::MemoryBlock &MB : MemGroup.PendingMem)
198  if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
199  return EC;
200 
201  MemGroup.PendingMem.clear();
202 
203  // Now go through free blocks and trim any of them that don't span the entire
204  // page because one of the pending blocks may have overlapped it.
205  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
206  FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
207  // We cleared the PendingMem list, so all these pointers are now invalid
208  FreeMB.PendingPrefixIndex = (unsigned)-1;
209  }
210 
211  // Remove all blocks which are now empty
212  MemGroup.FreeMem.erase(
213  remove_if(MemGroup.FreeMem,
214  [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
215  MemGroup.FreeMem.end());
216 
217  return std::error_code();
218 }
219 
221  for (sys::MemoryBlock &Block : CodeMem.PendingMem)
223 }
224 
226  for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
227  for (sys::MemoryBlock &Block : Group->AllocatedMem)
228  MMapper.releaseMappedMemory(Block);
229  }
230 }
231 
233 
234 void SectionMemoryManager::anchor() {}
235 
236 namespace {
237 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
238 // into sys::Memory.
239 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
240 public:
242  allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
243  size_t NumBytes, const sys::MemoryBlock *const NearBlock,
244  unsigned Flags, std::error_code &EC) override {
245  // allocateMappedMemory calls mmap(2). We round up a request size
246  // to page size to get extra space for free.
247  static const size_t PageSize = sys::Process::getPageSize();
248  size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1);
249  return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC);
250  }
251 
252  std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
253  unsigned Flags) override {
254  return sys::Memory::protectMappedMemory(Block, Flags);
255  }
256 
257  std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
259  }
260 };
261 
262 DefaultMMapper DefaultMMapperInstance;
263 } // namespace
264 
266  : MMapper(MM ? *MM : DefaultMMapperInstance) {}
267 
268 } // namespace llvm
virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M)=0
This method releases a block of memory that was allocated with the allocateMappedMemory method...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
SectionMemoryManager(MemoryMapper *MM=nullptr)
Creates a SectionMemoryManager instance with MM as the associated memory mapper.
static std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method...
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e...
Implementations of this interface are used by SectionMemoryManager to request pages from the operatin...
void * base() const
Definition: Memory.h:32
static void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
AllocationPurpose
This enum describes the various reasons to allocate pages from allocateMappedMemory.
virtual void invalidateInstructionCache()
Invalidate instruction cache for code sections.
uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override
Allocates a memory block of (at least) the given size suitable for executable code.
uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool isReadOnly) override
Allocates a memory block of (at least) the given size suitable for executable code.
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1225
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getPageSize()
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M)
This class encapsulates the notion of a memory block which has an address and a size.
Definition: Memory.h:28
virtual sys::MemoryBlock allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes, const sys::MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)=0
This method attempts to allocate NumBytes bytes of virtual memory for Purpose.
Provides a library for accessing information about this process and other processes on the operating ...
bool finalizeMemory(std::string *ErrMsg=nullptr) override
Update section-specific memory permissions and other attributes.
uint32_t Size
Definition: Profile.cpp:46
virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block, unsigned Flags)=0
This method sets the protection flags for a block of memory to the state specified by Flags...
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
size_t size() const
Definition: Memory.h:33
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
static std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags...