LLVM 19.0.0git
Memory.inc
Go to the documentation of this file.
1//===- Win32/Memory.cpp - Win32 Memory Implementation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides the Win32 specific implementation of various Memory
10// management utilities
11//
12//===----------------------------------------------------------------------===//
13
18
19// The Windows.h header must be the last one included.
21
22static DWORD getWindowsProtectionFlags(unsigned Flags) {
23 switch (Flags & llvm::sys::Memory::MF_RWE_MASK) {
24 // Contrary to what you might expect, the Windows page protection flags
25 // are not a bitwise combination of RWX values
27 return PAGE_READONLY;
29 // Note: PAGE_WRITE is not supported by VirtualProtect
30 return PAGE_READWRITE;
32 return PAGE_READWRITE;
34 return PAGE_EXECUTE_READ;
37 return PAGE_EXECUTE_READWRITE;
39 return PAGE_EXECUTE;
40 default:
41 llvm_unreachable("Illegal memory protection flag specified!");
42 }
43 // Provide a default return value as required by some compilers.
44 return PAGE_NOACCESS;
45}
46
47// While we'd be happy to allocate single pages, the Windows allocation
48// granularity may be larger than a single page (in practice, it is 64K)
49// so mapping less than that will create an unreachable fragment of memory.
50static size_t getAllocationGranularity() {
51 SYSTEM_INFO Info;
52 ::GetSystemInfo(&Info);
53 if (Info.dwPageSize > Info.dwAllocationGranularity)
54 return Info.dwPageSize;
55 else
56 return Info.dwAllocationGranularity;
57}
58
59// Large/huge memory pages need explicit process permissions in order to be
60// used. See https://blogs.msdn.microsoft.com/oldnewthing/20110128-00/?p=11643
61// Also large pages need to be manually enabled on your OS. If all this is
62// sucessfull, we return the minimal large memory page size.
63static size_t enableProcessLargePages() {
64 HANDLE Token = 0;
65 size_t LargePageMin = GetLargePageMinimum();
66 if (LargePageMin)
67 OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
68 &Token);
69 if (!Token)
70 return 0;
71 LUID Luid;
72 if (!LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &Luid)) {
73 CloseHandle(Token);
74 return 0;
75 }
76 TOKEN_PRIVILEGES TP{};
77 TP.PrivilegeCount = 1;
78 TP.Privileges[0].Luid = Luid;
79 TP.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
80 if (!AdjustTokenPrivileges(Token, FALSE, &TP, 0, 0, 0)) {
81 CloseHandle(Token);
82 return 0;
83 }
84 DWORD E = GetLastError();
85 CloseHandle(Token);
86 if (E == ERROR_SUCCESS)
87 return LargePageMin;
88 return 0;
89}
90
91namespace llvm {
92namespace sys {
93
94//===----------------------------------------------------------------------===//
95//=== WARNING: Implementation here must contain only Win32 specific code
96//=== and must not be UNIX code
97//===----------------------------------------------------------------------===//
98
99MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
100 const MemoryBlock *const NearBlock,
101 unsigned Flags, std::error_code &EC) {
102 EC = std::error_code();
103 if (NumBytes == 0)
104 return MemoryBlock();
105
106 static size_t DefaultGranularity = getAllocationGranularity();
107 static size_t LargePageGranularity = enableProcessLargePages();
108
109 DWORD AllocType = MEM_RESERVE | MEM_COMMIT;
110 bool HugePages = false;
111 size_t Granularity = DefaultGranularity;
112
113 if ((Flags & MF_HUGE_HINT) && LargePageGranularity > 0) {
114 AllocType |= MEM_LARGE_PAGES;
115 HugePages = true;
116 Granularity = LargePageGranularity;
117 }
118
119 size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
120
121 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
122 NearBlock->allocatedSize()
123 : 0;
124
125 // If the requested address is not aligned to the allocation granularity,
126 // round up to get beyond NearBlock. VirtualAlloc would have rounded down.
127 if (Start && Start % Granularity != 0)
128 Start += Granularity - Start % Granularity;
129
130 DWORD Protect = getWindowsProtectionFlags(Flags);
131
132 size_t AllocSize = NumBlocks * Granularity;
133 void *PA = ::VirtualAlloc(reinterpret_cast<void *>(Start), AllocSize,
134 AllocType, Protect);
135 if (PA == NULL) {
136 if (NearBlock || HugePages) {
137 // Try again without the NearBlock hint and without large memory pages
138 return allocateMappedMemory(NumBytes, NULL, Flags & ~MF_HUGE_HINT, EC);
139 }
140 EC = mapWindowsError(::GetLastError());
141 return MemoryBlock();
142 }
143
144 MemoryBlock Result;
145 Result.Address = PA;
146 Result.AllocatedSize = AllocSize;
147 Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
148
149 if (Flags & MF_EXEC)
150 Memory::InvalidateInstructionCache(Result.Address, AllocSize);
151
152 return Result;
153}
154
155std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
156 if (M.Address == 0 || M.AllocatedSize == 0)
157 return std::error_code();
158
159 if (!VirtualFree(M.Address, 0, MEM_RELEASE))
160 return mapWindowsError(::GetLastError());
161
162 M.Address = 0;
163 M.AllocatedSize = 0;
164
165 return std::error_code();
166}
167
168std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
169 unsigned Flags) {
170 if (M.Address == 0 || M.AllocatedSize == 0)
171 return std::error_code();
172
173 DWORD Protect = getWindowsProtectionFlags(Flags);
174
175 DWORD OldFlags;
176 if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags))
177 return mapWindowsError(::GetLastError());
178
179 if (Flags & MF_EXEC)
180 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
181
182 return std::error_code();
183}
184
185/// InvalidateInstructionCache - Before the JIT can run a block of code
186/// that has been emitted it must invalidate the instruction cache on some
187/// platforms.
188void Memory::InvalidateInstructionCache(const void *Addr, size_t Len) {
189 FlushInstructionCache(GetCurrentProcess(), Addr, Len);
190}
191
192} // namespace sys
193} // namespace llvm
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
uint64_t Addr
AllocType
Provides a library for accessing information about this process and other processes on the operating ...
static std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method.
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e....
@ MF_HUGE_HINT
The MF_HUGE_HINT flag is used to indicate that the request for a memory block should be satisfied wit...
Definition: Memory.h:70
static void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
static std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::error_code mapWindowsError(unsigned EV)