19 #ifdef HAVE_SYS_MMAN_H 24 #include <mach/mach.h> 28 # if defined(__OpenBSD__) 29 # include <mips64/sysarch.h> 30 # elif !defined(__FreeBSD__) 31 # include <sys/cachectl.h> 36 extern "C" void sys_icache_invalidate(
const void *Addr,
size_t len);
38 extern "C" void __clear_cache(
void *,
void*);
43 int getPosixProtectionFlags(
unsigned Flags) {
50 return PROT_READ | PROT_WRITE;
52 return PROT_READ | PROT_EXEC;
55 return PROT_READ | PROT_WRITE | PROT_EXEC;
57 #if defined(__FreeBSD__) 65 return PROT_READ | PROT_EXEC;
82 Memory::allocateMappedMemory(
size_t NumBytes,
83 const MemoryBlock *
const NearBlock,
85 std::error_code &EC) {
86 EC = std::error_code();
90 static const size_t PageSize = Process::getPageSize();
91 const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
95 int MMFlags = MAP_PRIVATE |
103 int Protect = getPosixProtectionFlags(PFlags);
105 #if defined(__NetBSD__) && defined(PROT_MPROTECT) 106 Protect |= PROT_MPROTECT(PROT_READ | PROT_WRITE | PROT_EXEC);
110 uintptr_t Start = NearBlock ?
reinterpret_cast<uintptr_t
>(NearBlock->base()) +
111 NearBlock->size() : 0;
112 if (Start && Start % PageSize)
113 Start += PageSize - Start %
PageSize;
115 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
116 Protect, MMFlags, fd, 0);
117 if (Addr == MAP_FAILED) {
119 return allocateMappedMemory(NumBytes,
nullptr, PFlags, EC);
121 EC = std::error_code(errno, std::generic_category());
122 return MemoryBlock();
126 Result.Address = Addr;
130 if (PFlags & MF_EXEC) {
131 EC = Memory::protectMappedMemory (Result, PFlags);
132 if (EC != std::error_code())
133 return MemoryBlock();
140 Memory::releaseMappedMemory(MemoryBlock &M) {
141 if (M.Address ==
nullptr || M.Size == 0)
142 return std::error_code();
144 if (0 != ::munmap(M.Address, M.Size))
145 return std::error_code(errno, std::generic_category());
150 return std::error_code();
154 Memory::protectMappedMemory(
const MemoryBlock &M,
unsigned Flags) {
155 static const size_t PageSize = Process::getPageSize();
156 if (M.Address ==
nullptr || M.Size == 0)
157 return std::error_code();
160 return std::error_code(EINVAL, std::generic_category());
162 int Protect = getPosixProtectionFlags(Flags);
163 uintptr_t Start =
alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
164 uintptr_t
End =
alignAddr((uint8_t *)M.Address + M.Size, PageSize);
166 bool InvalidateCache = (Flags & MF_EXEC);
168 #if defined(__arm__) || defined(__aarch64__) 172 if (InvalidateCache && !(Protect & PROT_READ)) {
173 int Result = ::mprotect((
void *)Start, End - Start, Protect | PROT_READ);
175 return std::error_code(errno, std::generic_category());
177 Memory::InvalidateInstructionCache(M.Address, M.Size);
178 InvalidateCache =
false;
182 int Result = ::mprotect((
void *)Start, End - Start, Protect);
185 return std::error_code(errno, std::generic_category());
188 Memory::InvalidateInstructionCache(M.Address, M.Size);
190 return std::error_code();
196 void Memory::InvalidateInstructionCache(
const void *Addr,
200 #if defined(__APPLE__) 202 # if (defined(__POWERPC__) || defined (__ppc__) || \ 203 defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \ 205 sys_icache_invalidate(const_cast<void *>(Addr), Len);
210 # if (defined(__POWERPC__) || defined (__ppc__) || \ 211 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) 212 const size_t LineSize = 32;
218 for (
intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
219 asm volatile(
"dcbf 0, %0" : :
"r"(Line));
220 asm volatile(
"sync");
222 for (
intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
223 asm volatile(
"icbi 0, %0" : :
"r"(Line));
224 asm volatile(
"isync");
225 # elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \ 228 const char *Start =
static_cast<const char *
>(Addr);
229 const char *End = Start + Len;
230 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
Compute iterated dominance frontiers using a linear time algorithm.
void ValgrindDiscardTranslations(const void *Addr, size_t Len)
static const unsigned End
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Aligns Addr to Alignment bytes, rounding up.
Provides a library for accessing information about this process and other processes on the operating ...
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.