LLVM  6.0.0svn
Memory.inc
Go to the documentation of this file.
1 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines some functions for various memory management utilities.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "Unix.h"
15 #include "llvm/Support/DataTypes.h"
17 #include "llvm/Support/Process.h"
18 
19 #ifdef HAVE_SYS_MMAN_H
20 #include <sys/mman.h>
21 #endif
22 
23 #ifdef __APPLE__
24 #include <mach/mach.h>
25 #endif
26 
27 #if defined(__mips__)
28 # if defined(__OpenBSD__)
29 # include <mips64/sysarch.h>
30 # else
31 # include <sys/cachectl.h>
32 # endif
33 #endif
34 
35 #ifdef __APPLE__
36 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
37 #else
38 extern "C" void __clear_cache(void *, void*);
39 #endif
40 
41 namespace {
42 
43 int getPosixProtectionFlags(unsigned Flags) {
44  switch (Flags) {
46  return PROT_READ;
48  return PROT_WRITE;
50  return PROT_READ | PROT_WRITE;
52  return PROT_READ | PROT_EXEC;
55  return PROT_READ | PROT_WRITE | PROT_EXEC;
57 #if defined(__FreeBSD__)
58  // On PowerPC, having an executable page that has no read permission
59  // can have unintended consequences. The function InvalidateInstruction-
60  // Cache uses instructions dcbf and icbi, both of which are treated by
61  // the processor as loads. If the page has no read permissions,
62  // executing these instructions will result in a segmentation fault.
63  // Somehow, this problem is not present on Linux, but it does happen
64  // on FreeBSD.
65  return PROT_READ | PROT_EXEC;
66 #else
67  return PROT_EXEC;
68 #endif
69  default:
70  llvm_unreachable("Illegal memory protection flag specified!");
71  }
72  // Provide a default return value as required by some compilers.
73  return PROT_NONE;
74 }
75 
76 } // anonymous namespace
77 
78 namespace llvm {
79 namespace sys {
80 
81 MemoryBlock
82 Memory::allocateMappedMemory(size_t NumBytes,
83  const MemoryBlock *const NearBlock,
84  unsigned PFlags,
85  std::error_code &EC) {
86  EC = std::error_code();
87  if (NumBytes == 0)
88  return MemoryBlock();
89 
90  static const size_t PageSize = Process::getPageSize();
91  const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
92 
93  int fd = -1;
94 
95  int MMFlags = MAP_PRIVATE |
96 #ifdef MAP_ANONYMOUS
97  MAP_ANONYMOUS
98 #else
99  MAP_ANON
100 #endif
101  ; // Ends statement above
102 
103  int Protect = getPosixProtectionFlags(PFlags);
104 
105  // Use any near hint and the page size to set a page-aligned starting address
106  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
107  NearBlock->size() : 0;
108  if (Start && Start % PageSize)
109  Start += PageSize - Start % PageSize;
110 
111  void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
112  Protect, MMFlags, fd, 0);
113  if (Addr == MAP_FAILED) {
114  if (NearBlock) //Try again without a near hint
115  return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
116 
117  EC = std::error_code(errno, std::generic_category());
118  return MemoryBlock();
119  }
120 
121  MemoryBlock Result;
122  Result.Address = Addr;
123  Result.Size = NumPages*PageSize;
124 
125  if (PFlags & MF_EXEC)
126  Memory::InvalidateInstructionCache(Result.Address, Result.Size);
127 
128  return Result;
129 }
130 
131 std::error_code
132 Memory::releaseMappedMemory(MemoryBlock &M) {
133  if (M.Address == nullptr || M.Size == 0)
134  return std::error_code();
135 
136  if (0 != ::munmap(M.Address, M.Size))
137  return std::error_code(errno, std::generic_category());
138 
139  M.Address = nullptr;
140  M.Size = 0;
141 
142  return std::error_code();
143 }
144 
145 std::error_code
146 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
147  static const size_t PageSize = Process::getPageSize();
148  if (M.Address == nullptr || M.Size == 0)
149  return std::error_code();
150 
151  if (!Flags)
152  return std::error_code(EINVAL, std::generic_category());
153 
154  int Protect = getPosixProtectionFlags(Flags);
155 
156  uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
157  uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize);
158  int Result = ::mprotect((void *)Start, End - Start, Protect);
159 
160  if (Result != 0)
161  return std::error_code(errno, std::generic_category());
162 
163  if (Flags & MF_EXEC)
164  Memory::InvalidateInstructionCache(M.Address, M.Size);
165 
166  return std::error_code();
167 }
168 
169 /// AllocateRWX - Allocate a slab of memory with read/write/execute
170 /// permissions. This is typically used for JIT applications where we want
171 /// to emit code to the memory then jump to it. Getting this type of memory
172 /// is very OS specific.
173 ///
174 MemoryBlock
175 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
176  std::string *ErrMsg) {
177  if (NumBytes == 0) return MemoryBlock();
178 
179  static const size_t PageSize = Process::getPageSize();
180  size_t NumPages = (NumBytes+PageSize-1)/PageSize;
181 
182  int fd = -1;
183 
184  int flags = MAP_PRIVATE |
185 #ifdef MAP_ANONYMOUS
186  MAP_ANONYMOUS
187 #else
188  MAP_ANON
189 #endif
190  ;
191 
192  void* start = NearBlock ? (unsigned char*)NearBlock->base() +
193  NearBlock->size() : nullptr;
194 
195 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
196  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
197  flags, fd, 0);
198 #elif defined(__NetBSD__) && defined(PROT_MPROTECT)
199  void *pa =
200  ::mmap(start, PageSize * NumPages,
201  PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC), flags, fd, 0);
202 #else
203  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
204  flags, fd, 0);
205 #endif
206  if (pa == MAP_FAILED) {
207  if (NearBlock) //Try again without a near hint
208  return AllocateRWX(NumBytes, nullptr);
209 
210  MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
211  return MemoryBlock();
212  }
213 
214 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
215  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
216  (vm_size_t)(PageSize*NumPages), 0,
217  VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
218  if (KERN_SUCCESS != kr) {
219  MakeErrMsg(ErrMsg, "vm_protect max RX failed");
220  return MemoryBlock();
221  }
222 
223  kr = vm_protect(mach_task_self(), (vm_address_t)pa,
224  (vm_size_t)(PageSize*NumPages), 0,
226  if (KERN_SUCCESS != kr) {
227  MakeErrMsg(ErrMsg, "vm_protect RW failed");
228  return MemoryBlock();
229  }
230 #endif
231 
232  MemoryBlock result;
233  result.Address = pa;
234  result.Size = NumPages*PageSize;
235 
236  return result;
237 }
238 
239 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
240  if (M.Address == nullptr || M.Size == 0) return false;
241  if (0 != ::munmap(M.Address, M.Size))
242  return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
243  return false;
244 }
245 
246 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
247 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
248  if (M.Address == 0 || M.Size == 0) return false;
249  Memory::InvalidateInstructionCache(M.Address, M.Size);
250  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
251  (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
252  return KERN_SUCCESS == kr;
253 #else
254  return true;
255 #endif
256 }
257 
258 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
259  if (M.Address == nullptr || M.Size == 0) return false;
260  Memory::InvalidateInstructionCache(M.Address, M.Size);
261 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
262  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
263  (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
264  return KERN_SUCCESS == kr;
265 #else
266  return true;
267 #endif
268 }
269 
270 bool Memory::setRangeWritable(const void *Addr, size_t Size) {
271 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
272  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
273  (vm_size_t)Size, 0,
275  return KERN_SUCCESS == kr;
276 #else
277  return true;
278 #endif
279 }
280 
281 bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
282 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
283  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
284  (vm_size_t)Size, 0,
285  VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
286  return KERN_SUCCESS == kr;
287 #else
288  return true;
289 #endif
290 }
291 
292 /// InvalidateInstructionCache - Before the JIT can run a block of code
293 /// that has been emitted it must invalidate the instruction cache on some
294 /// platforms.
295 void Memory::InvalidateInstructionCache(const void *Addr,
296  size_t Len) {
297 
298 // icache invalidation for PPC and ARM.
299 #if defined(__APPLE__)
300 
301 # if (defined(__POWERPC__) || defined (__ppc__) || \
302  defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
303  defined(__arm64__))
304  sys_icache_invalidate(const_cast<void *>(Addr), Len);
305 # endif
306 
307 #else
308 
309 # if (defined(__POWERPC__) || defined (__ppc__) || \
310  defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
311  const size_t LineSize = 32;
312 
313  const intptr_t Mask = ~(LineSize - 1);
314  const intptr_t StartLine = ((intptr_t) Addr) & Mask;
315  const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
316 
317  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
318  asm volatile("dcbf 0, %0" : : "r"(Line));
319  asm volatile("sync");
320 
321  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
322  asm volatile("icbi 0, %0" : : "r"(Line));
323  asm volatile("isync");
324 # elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \
325  defined(__GNUC__)
326  // FIXME: Can we safely always call this for __GNUC__ everywhere?
327  const char *Start = static_cast<const char *>(Addr);
328  const char *End = Start + Len;
329  __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
330 # endif
331 
332 #endif // end apple
333 
334  ValgrindDiscardTranslations(Addr, Len);
335 }
336 
337 } // namespace sys
338 } // namespace llvm
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
void ValgrindDiscardTranslations(const void *Addr, size_t Len)
Definition: Valgrind.cpp:52
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096))
static const unsigned End
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool MakeErrMsg(std::string *ErrMsg, const std::string &prefix)
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Aligns Addr to Alignment bytes, rounding up.
Definition: MathExtras.h:615
Provides a library for accessing information about this process and other processes on the operating ...
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81