11#include "llvm/Config/llvm-config.h"
15#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
39 return PageSize.takeError();
40 return std::make_unique<InProcessMemoryMapper>(*PageSize);
53 std::lock_guard<std::mutex> Lock(
Mutex);
54 Reservations[MB.base()].Size = MB.allocatedSize();
62 return Addr.toPtr<
char *>();
73 auto Size = Segment.ContentSize + Segment.ZeroFillSize;
81 std::memset((
Base + Segment.ContentSize).toPtr<
void *>(), 0,
82 Segment.ZeroFillSize);
94 if (!DeinitializeActions)
95 return OnInitialized(DeinitializeActions.takeError());
98 std::lock_guard<std::mutex> Lock(
Mutex);
101 Allocations[MinAddr].Size = MaxAddr - MinAddr;
102 Allocations[MinAddr].DeinitializationActions =
103 std::move(*DeinitializeActions);
104 Reservations[AI.
MappingBase.
toPtr<
void *>()].Allocations.push_back(MinAddr);
107 OnInitialized(MinAddr);
116 std::lock_guard<std::mutex> Lock(
Mutex);
121 Allocations[
Base].DeinitializationActions)) {
122 AllErr =
joinErrors(std::move(AllErr), std::move(Err));
137 OnDeinitialized(std::move(AllErr));
144 for (
auto Base : Bases) {
145 std::vector<ExecutorAddr> AllocAddrs;
148 std::lock_guard<std::mutex> Lock(
Mutex);
149 auto &R = Reservations[
Base.toPtr<
void *>()];
151 AllocAddrs.swap(R.Allocations);
155 std::promise<MSVCPError>
P;
156 auto F =
P.get_future();
170 std::lock_guard<std::mutex> Lock(
Mutex);
171 Reservations.
erase(
Base.toPtr<
void *>());
174 OnReleased(std::move(Err));
178 std::vector<ExecutorAddr> ReservationAddrs;
180 std::lock_guard<std::mutex> Lock(
Mutex);
182 ReservationAddrs.reserve(Reservations.
size());
183 for (
const auto &R : Reservations) {
188 std::promise<MSVCPError>
P;
189 auto F =
P.get_future();
190 release(ReservationAddrs, [&](
Error Err) {
P.set_value(std::move(Err)); });
199#if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
200 llvm_unreachable(
"SharedMemoryMapper is not supported on this platform yet");
206#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
209 return PageSize.takeError();
211 return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
213 return make_error<StringError>(
214 "SharedMemoryMapper is not supported on this platform yet",
221#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
226 [
this, NumBytes, OnReserved = std::move(OnReserved)](
227 Error SerializationErr,
229 if (SerializationErr) {
231 return OnReserved(std::move(SerializationErr));
235 return OnReserved(
Result.takeError());
238 std::string SharedMemoryName;
239 std::tie(RemoteAddr, SharedMemoryName) = std::move(*
Result);
241 void *LocalAddr =
nullptr;
243#if defined(LLVM_ON_UNIX)
247 reinterpret_cast<const uint8_t *
>(SharedMemoryName.c_str()),
248 SharedMemoryName.size());
249 auto HashedName = BLAKE3::hash<sizeof(key_t)>(
Data);
250 key_t Key = *
reinterpret_cast<key_t *
>(HashedName.data());
252 shmget(Key, NumBytes, IPC_CREAT | __IPC_SHAREAS | 0700);
253 if (SharedMemoryId < 0) {
255 std::error_code(errno, std::generic_category())));
257 LocalAddr = shmat(SharedMemoryId,
nullptr, 0);
258 if (LocalAddr ==
reinterpret_cast<void *
>(-1)) {
260 std::error_code(errno, std::generic_category())));
263 int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
264 if (SharedMemoryFile < 0) {
269 shm_unlink(SharedMemoryName.c_str());
271 LocalAddr = mmap(
nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
272 SharedMemoryFile, 0);
273 if (LocalAddr == MAP_FAILED) {
277 close(SharedMemoryFile);
282 std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
283 SharedMemoryName.end());
284 HANDLE SharedMemoryFile = OpenFileMappingW(
285 FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
286 if (!SharedMemoryFile)
290 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
292 CloseHandle(SharedMemoryFile);
296 CloseHandle(SharedMemoryFile);
300 std::lock_guard<std::mutex> Lock(
Mutex);
301 Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
309 OnReserved(make_error<StringError>(
310 "SharedMemoryMapper is not supported on this platform yet",
316 auto R = Reservations.upper_bound(
Addr);
317 assert(R != Reservations.begin() &&
"Attempt to prepare unreserved range");
322 return static_cast<char *
>(R->second.LocalAddr) +
Offset;
327 auto Reservation = Reservations.upper_bound(AI.
MappingBase);
328 assert(Reservation != Reservations.begin() &&
"Attempt to initialize unreserved range");
331 auto AllocationOffset = AI.
MappingBase - Reservation->first;
340 char *
Base =
static_cast<char *
>(Reservation->second.LocalAddr) +
341 AllocationOffset + Segment.Offset;
342 std::memset(
Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
345 SegReq.
RAG = {Segment.AG.getMemProt(),
348 SegReq.
Size = Segment.ContentSize + Segment.ZeroFillSize;
356 [OnInitialized = std::move(OnInitialized)](
358 if (SerializationErr) {
360 return OnInitialized(std::move(SerializationErr));
363 OnInitialized(std::move(
Result));
365 SAs.
Instance, Reservation->first, std::move(FR));
374 [OnDeinitialized = std::move(OnDeinitialized)](
Error SerializationErr,
376 if (SerializationErr) {
378 return OnDeinitialized(std::move(SerializationErr));
381 OnDeinitialized(std::move(
Result));
388#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
392 std::lock_guard<std::mutex> Lock(
Mutex);
394 for (
auto Base : Bases) {
396#if defined(LLVM_ON_UNIX)
399 if (shmdt(Reservations[
Base].LocalAddr) < 0)
402 if (munmap(Reservations[
Base].LocalAddr, Reservations[
Base].
Size) != 0)
408 if (!UnmapViewOfFile(Reservations[
Base].LocalAddr))
414 Reservations.erase(
Base);
421 [OnReleased = std::move(OnReleased),
423 if (SerializationErr) {
426 joinErrors(std::move(Err), std::move(SerializationErr)));
433 OnReleased(make_error<StringError>(
434 "SharedMemoryMapper is not supported on this platform yet",
440 std::lock_guard<std::mutex> Lock(
Mutex);
441 for (
const auto &R : Reservations) {
443#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
446 shmdt(R.second.LocalAddr);
448 munmap(R.second.LocalAddr, R.second.Size);
453 UnmapViewOfFile(R.second.LocalAddr);
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool erase(const KeyT &Val)
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
Represents an address in the executor process.
static ExecutorAddr fromPtr(T *Ptr, UnwrapFn &&Unwrap=UnwrapFn())
Create an ExecutorAddr from the given pointer.
std::enable_if_t< std::is_pointer< T >::value, T > toPtr(WrapFn &&Wrap=WrapFn()) const
Cast this ExecutorAddr to a pointer of the given type.
ExecutorProcessControl supports interaction with a JIT target process.
void callSPSWrapperAsync(RunPolicyT &&Runner, ExecutorAddr WrapperFnAddr, SendResultT &&SendResult, const ArgTs &...Args)
Run a wrapper function using SPS to serialize the arguments and deserialize the results.
void initialize(AllocInfo &AI, OnInitializedFunction OnInitialized) override
Ensures executor memory is synchronized with working copy memory, sends functions to be called after ...
void reserve(size_t NumBytes, OnReservedFunction OnReserved) override
Reserves address space in executor process.
InProcessMemoryMapper(size_t PageSize)
~InProcessMemoryMapper() override
void deinitialize(ArrayRef< ExecutorAddr > Allocations, OnDeinitializedFunction OnDeInitialized) override
Runs previously specified deinitialization actions Executor addresses returned by initialize should b...
static Expected< std::unique_ptr< InProcessMemoryMapper > > Create()
char * prepare(ExecutorAddr Addr, size_t ContentSize) override
Provides working memory.
void release(ArrayRef< ExecutorAddr > Reservations, OnReleasedFunction OnRelease) override
Release address space acquired through reserve()
static Expected< std::unique_ptr< SharedMemoryMapper > > Create(ExecutorProcessControl &EPC, SymbolAddrs SAs)
void reserve(size_t NumBytes, OnReservedFunction OnReserved) override
Reserves address space in executor process.
void deinitialize(ArrayRef< ExecutorAddr > Allocations, OnDeinitializedFunction OnDeInitialized) override
Runs previously specified deinitialization actions Executor addresses returned by initialize should b...
~SharedMemoryMapper() override
void initialize(AllocInfo &AI, OnInitializedFunction OnInitialized) override
Ensures executor memory is synchronized with working copy memory, sends functions to be called after ...
char * prepare(ExecutorAddr Addr, size_t ContentSize) override
Provides working memory.
void release(ArrayRef< ExecutorAddr > Reservations, OnReleasedFunction OnRelease) override
Release address space acquired through reserve()
SharedMemoryMapper(ExecutorProcessControl &EPC, SymbolAddrs SAs, size_t PageSize)
This class encapsulates the notion of a memory block which has an address and a size.
static std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method.
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e....
static void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
static std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags.
static Expected< unsigned > getPageSize()
Get the process's page size.
unique_function is a type-erasing functor similar to std::function.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
shared::SPSExpected< shared::SPSExecutorAddr >(shared::SPSExecutorAddr, shared::SPSExecutorAddr, shared::SPSSharedMemoryFinalizeRequest) SPSExecutorSharedMemoryMapperServiceInitializeSignature
shared::SPSError(shared::SPSExecutorAddr, shared::SPSSequence< shared::SPSExecutorAddr >) SPSExecutorSharedMemoryMapperServiceReleaseSignature
shared::SPSExpected< shared::SPSTuple< shared::SPSExecutorAddr, shared::SPSString > >(shared::SPSExecutorAddr, uint64_t) SPSExecutorSharedMemoryMapperServiceReserveSignature
shared::SPSError(shared::SPSExecutorAddr, shared::SPSSequence< shared::SPSExecutorAddr >) SPSExecutorSharedMemoryMapperServiceDeinitializeSignature
Error runDeallocActions(ArrayRef< WrapperFunctionCall > DAs)
Run deallocation actions.
Expected< std::vector< WrapperFunctionCall > > runFinalizeActions(AllocActions &AAs)
Run finalize actions.
@ Finalize
Finalize memory should be allocated by the allocator, and then be overwritten and deallocated after a...
sys::Memory::ProtectionFlags toSysMemoryProtectionFlags(MemProt MP)
Convert a MemProt value to a corresponding sys::Memory::ProtectionFlags value.
This is an optimization pass for GlobalISel generic memory operations.
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
auto reverse(ContainerTy &&C)
Error joinErrors(Error E1, Error E2)
Concatenate errors.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
std::error_code errnoAsErrorCode()
Helper to get errno as an std::error_code.
std::error_code mapWindowsError(unsigned EV)
Represents an address range in the exceutor process.
Represents a single allocation containing multiple segments and initialization and deinitialization a...
std::vector< SegInfo > Segments
shared::AllocActions Actions
ExecutorAddr Deinitialize
std::vector< SharedMemorySegFinalizeRequest > Segments
shared::AllocActions Actions