Bug 1927209 - Try to handle fragmented memory better for large buffers in IPC, r=afranchuk,ipc-reviewers,jld

This adds a new type, `SharedMemoryCursor`, as well as platform support
for mapping subregions of shared memory handles. This type will attempt
to map the entire shared memory region, and will back off on the size of
the region until it can successfully map a portion of the region to read
data from.

Ideally, this should help reduce the chances of encountering memory
fragmentation issues when sending large JS structured clone buffers over
IPC.

Differential Revision: https://phabricator.services.mozilla.com/D233116
This commit is contained in:
Nika Layzell
2025-02-10 19:30:09 +00:00
parent f98f0694e2
commit a5bc212f36
15 changed files with 448 additions and 61 deletions

View File

@@ -5,7 +5,7 @@
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "chrome/common/ipc_message_utils.h"
#include "mozilla/ipc/SharedMemory.h"
#include "mozilla/ipc/SharedMemoryCursor.h"
namespace IPC {
@@ -16,20 +16,14 @@ MessageBufferWriter::MessageBufferWriter(MessageWriter* writer,
// kMessageBufferShmemThreshold to avoid bloating the size of messages with
// small buffers.
if (full_len > kMessageBufferShmemThreshold) {
shmem_ = new mozilla::ipc::SharedMemory();
bool shmem_ok = shmem_->Create(full_len) && shmem_->Map(full_len);
auto handle = mozilla::ipc::shared_memory::Create(full_len);
bool shmem_ok = handle.IsValid();
writer->WriteBool(shmem_ok);
if (shmem_ok) {
if (!shmem_->WriteHandle(writer)) {
writer->FatalError("SharedMemory::WriteHandle failed");
return;
}
buffer_ = reinterpret_cast<char*>(shmem_->Memory());
shmem_cursor_ = mozilla::MakeUnique<mozilla::ipc::shared_memory::Cursor>(
std::move(handle));
MOZ_ASSERT(shmem_cursor_->IsValid());
} else {
// Creating or mapping the shared memory region failed, perhaps due to FD
// exhaustion or address space fragmentation. Fall back to trying to send
// data inline.
shmem_ = nullptr;
writer->NoteLargeBufferShmemFailure(full_len);
}
}
@@ -40,6 +34,13 @@ MessageBufferWriter::~MessageBufferWriter() {
if (remaining_ != 0) {
writer_->FatalError("didn't fully write message buffer");
}
// We couldn't write out the shared memory region until now, as the cursor
// needs to hold on to the handle to potentially re-map sub-regions while
// writing.
if (shmem_cursor_) {
IPC::WriteParam(writer_, shmem_cursor_->TakeHandle());
}
}
bool MessageBufferWriter::WriteBytes(const void* data, uint32_t len) {
@@ -51,12 +52,10 @@ bool MessageBufferWriter::WriteBytes(const void* data, uint32_t len) {
return false;
}
remaining_ -= len;
// If we're serializing using a shared memory region, `buffer_` will be
// initialized to point into that region.
if (buffer_) {
memcpy(buffer_, data, len);
buffer_ += len;
return true;
// If we're serializing using a shared memory region, `shmem_cursor_` will be
// initialized.
if (shmem_cursor_) {
return shmem_cursor_->Write(data, len);
}
return writer_->WriteBytes(data, len);
}
@@ -74,16 +73,22 @@ MessageBufferReader::MessageBufferReader(MessageReader* reader,
return;
}
if (shmem_ok) {
shmem_ = new mozilla::ipc::SharedMemory();
if (!shmem_->ReadHandle(reader)) {
reader->FatalError("SharedMemory::ReadHandle failed!");
mozilla::ipc::shared_memory::Handle handle;
if (!IPC::ReadParam(reader, &handle)) {
reader->FatalError("failed to read shared memory handle");
return;
}
if (!shmem_->Map(full_len)) {
reader->FatalError("SharedMemory::Map failed");
if (!handle.IsValid()) {
reader->FatalError("invalid shared memory handle");
return;
}
buffer_ = reinterpret_cast<const char*>(shmem_->Memory());
if (handle.Size() < full_len) {
reader->FatalError("too small shared memory handle");
return;
}
shmem_cursor_ = mozilla::MakeUnique<mozilla::ipc::shared_memory::Cursor>(
std::move(handle));
MOZ_ASSERT(shmem_cursor_->IsValid());
}
}
remaining_ = full_len;
@@ -104,12 +109,10 @@ bool MessageBufferReader::ReadBytesInto(void* data, uint32_t len) {
return false;
}
remaining_ -= len;
// If we're serializing using a shared memory region, `buffer_` will be
// initialized to point into that region.
if (buffer_) {
memcpy(data, buffer_, len);
buffer_ += len;
return true;
// If we're serializing using a shared memory region, `shmem_cursor_` will be
// initialized.
if (shmem_cursor_) {
return shmem_cursor_->Read(data, len);
}
return reader_->ReadBytesInto(data, len);
}

View File

@@ -33,6 +33,9 @@ class IProtocol;
template <typename P>
struct IPDLParamTraits;
class SharedMemory;
namespace shared_memory {
class Cursor;
}
// Implemented in ProtocolUtils.cpp
MOZ_NEVER_INLINE void PickleFatalError(const char* aMsg, IProtocol* aActor);
@@ -528,8 +531,7 @@ class MOZ_STACK_CLASS MessageBufferWriter {
private:
MessageWriter* writer_;
RefPtr<mozilla::ipc::SharedMemory> shmem_;
char* buffer_ = nullptr;
mozilla::UniquePtr<mozilla::ipc::shared_memory::Cursor> shmem_cursor_;
uint32_t remaining_ = 0;
};
@@ -560,8 +562,7 @@ class MOZ_STACK_CLASS MessageBufferReader {
private:
MessageReader* reader_;
RefPtr<mozilla::ipc::SharedMemory> shmem_;
const char* buffer_ = nullptr;
mozilla::UniquePtr<mozilla::ipc::shared_memory::Cursor> shmem_cursor_;
uint32_t remaining_ = 0;
};

View File

@@ -0,0 +1,105 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SharedMemoryCursor.h"
namespace mozilla::ipc::shared_memory {
bool Cursor::Read(void* aBuffer, size_t aCount) {
return Consume(aBuffer, aCount, /* aWriteToShmem */ false);
}
bool Cursor::Write(const void* aBuffer, size_t aCount) {
return Consume(const_cast<void*>(aBuffer), aCount, /* aWriteToShmem */ true);
}
void Cursor::Seek(uint64_t aOffset) {
MOZ_ASSERT(aOffset <= Size());
// Update our offset, and invalidate `mMapping` if our current chunk changed.
uint64_t oldChunkStart = ChunkStart();
mOffset = aOffset;
if (mMapping && oldChunkStart != ChunkStart()) {
mMapping = nullptr;
}
}
Handle Cursor::TakeHandle() {
mMapping = nullptr;
return std::move(mHandle);
}
void Cursor::SetChunkSize(size_t aChunkSize) {
MOZ_ASSERT(IsPowerOfTwo(aChunkSize),
"Cannot specify non power-of-two maximum chunk size");
MOZ_ASSERT(aChunkSize >= SystemAllocationGranularity(),
"Cannot specify a chunk size which is smaller than the system "
"allocation granularity");
mChunkSize = aChunkSize;
mMapping = nullptr; // Invalidate any existing mappings.
}
bool Cursor::Consume(void* aBuffer, size_t aCount, bool aWriteToShmem) {
if (aCount > Remaining()) {
NS_WARNING("count too large");
return false;
}
size_t consumed = 0;
while (consumed < aCount) {
// Ensure we have a valid mapping each trip through the loop. This will
// automatically back off on chunk size to avoid mapping failure.
if (!EnsureMapping()) {
return false;
}
// Determine how many of the requested bytes are available in mMapping, and
// perform the operation on them.
size_t mappingOffset = ChunkOffset();
size_t mappingRemaining = mMapping.Size() - mappingOffset;
size_t toCopy = std::min<size_t>(mappingRemaining, aCount - consumed);
void* shmemPtr = static_cast<char*>(mMapping.Data()) + mappingOffset;
void* bufferPtr = static_cast<char*>(aBuffer) + consumed;
if (aWriteToShmem) {
memcpy(shmemPtr, bufferPtr, toCopy);
} else {
memcpy(bufferPtr, shmemPtr, toCopy);
}
// Seek and advance offsets. This will invalidate our mapping if it no
// longer applies to the current chunk.
Seek(mOffset + toCopy);
consumed += toCopy;
}
return true;
}
bool Cursor::EnsureMapping() {
MOZ_ASSERT(mHandle.IsValid());
while (!mMapping) {
// Attempt to map at the current chunk size.
uint64_t chunkStart = ChunkStart();
size_t chunkSize = std::min<uint64_t>(ChunkSize(), Size() - chunkStart);
mMapping = mHandle.MapSubregion(chunkStart, chunkSize);
if (MOZ_UNLIKELY(!mMapping)) {
// If we failed to map a single allocation granularity, we can't go
// smaller, so give up.
if (chunkSize <= SystemAllocationGranularity()) {
NS_WARNING(
"Failed to map the smallest allocation granularity of shared "
"memory region!");
return false;
}
// Try to allocate a smaller chunk next time.
mChunkSize = RoundUpPow2(chunkSize) >> 1;
}
}
return true;
}
} // namespace mozilla::ipc::shared_memory

View File

@@ -0,0 +1,91 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ipc_SharedMemoryCursor_h
#define mozilla_ipc_SharedMemoryCursor_h
#include "mozilla/ipc/SharedMemoryHandle.h"
#include "mozilla/ipc/SharedMemoryMapping.h"
namespace mozilla::ipc::shared_memory {
// The `Cursor` is a similar type to a mutable `Mapping`, in that it
// provides read/write access to the contents of a shared memory region.
// However, it can recover from situations where address fragmentation means
// that mapping the full shared memory region fails, by instead mapping each
// page at a time, and seeking around the region.
//
// Because of this, the `Cursor` does not provide direct access to the shared
// memory region.
//
// NOTE: Cursor currently only operates on mutable mappings, even when reading.
// It can be generalized in the future if it would be found to be useful.
class Cursor {
public:
// Default constructor for invalid cursor. All reads and writes will fail.
Cursor() = default;
// Construct a new Cursor which can be used to read from or write to the
// shared memory region indicated by aHandle.
explicit Cursor(Handle&& aHandle) : mHandle(std::move(aHandle)) {}
bool IsValid() const { return mHandle.IsValid(); }
uint64_t Size() const { return mHandle.Size(); }
uint64_t Offset() const { return mOffset; }
uint64_t Remaining() const { return Size() - Offset(); }
// Read aCount bytes into aBuffer from the shared memory region, advancing the
// internal offset. Returns `false` if this fails for any reason.
bool Read(void* aBuffer, size_t aCount);
// Write aCount bytes from aBuffer into the shared memory region, advancing
// the internal offset. Returns `false` if this fails for any reason.
bool Write(const void* aBuffer, size_t aCount);
// Seek the Cursor to a given offset in the shared memory region.
// aOffset must be less than Size().
void Seek(uint64_t aOffset);
// Invalidate the Cursor, and return the underlying handle.
Handle TakeHandle();
// Set the ChunkSize for the shared memory regions in this chunk. This is
// intended to be used for testing purposes.
// The chunk size must be a power of two, and at least
// SystemAllocationGranularity().
void SetChunkSize(size_t aChunkSize);
private:
// Default to mapping at most 1GiB/256MiB, depending on address space size.
#ifdef HAVE_64BIT_BUILD
static constexpr size_t kDefaultMaxChunkSize = size_t(1) << 30; // 1GiB
#else
static constexpr size_t kDefaultMaxChunkSize = size_t(1) << 28; // 256MiB
#endif
size_t ChunkSize() const { return mChunkSize; }
uint64_t ChunkOffsetMask() const { return uint64_t(ChunkSize()) - 1; }
uint64_t ChunkStartMask() const { return ~ChunkOffsetMask(); }
size_t ChunkOffset() const { return Offset() & ChunkOffsetMask(); }
uint64_t ChunkStart() const { return Offset() & ChunkStartMask(); }
bool Consume(void* aBuffer, size_t aCount, bool aWriteToShmem);
bool EnsureMapping();
// Shared memory handle this Cursor allows accessing.
Handle mHandle;
// Memory map for the currently active chunk. Lazily initialized.
Mapping mMapping;
// Absolute offset into the shared memory handle.
uint64_t mOffset = 0;
// Current size of each chunk. Always a power of two. May be reduced in
// response to allocation failures.
size_t mChunkSize = kDefaultMaxChunkSize;
};
} // namespace mozilla::ipc::shared_memory
#endif // mozilla_ipc_SharedMemoryCursor_h

View File

@@ -99,10 +99,20 @@ Mapping Handle::Map(void* aFixedAddress) const {
return Mapping(*this, aFixedAddress);
}
Mapping Handle::MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress) const {
return Mapping(*this, aOffset, aSize, aFixedAddress);
}
ReadOnlyMapping ReadOnlyHandle::Map(void* aFixedAddress) const {
return ReadOnlyMapping(*this, aFixedAddress);
}
ReadOnlyMapping ReadOnlyHandle::MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress) const {
return ReadOnlyMapping(*this, aOffset, aSize, aFixedAddress);
}
FreezableHandle::~FreezableHandle() {
NS_WARNING_ASSERTION(!IsValid(), "freezable shared memory was never frozen");
}
@@ -124,6 +134,11 @@ FreezableMapping FreezableHandle::Map(void* aFixedAddress) && {
return FreezableMapping(std::move(*this), aFixedAddress);
}
FreezableMapping FreezableHandle::MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress) && {
return FreezableMapping(std::move(*this), aOffset, aSize, aFixedAddress);
}
Handle Create(uint64_t aSize) {
Handle h;
const auto success = Platform::Create(h, aSize);

View File

@@ -107,6 +107,12 @@ struct Handle : HandleBase {
* Map the shared memory region into memory.
*/
struct Mapping Map(void* aFixedAddress = nullptr) const;
/**
* Map a subregion of the shared memory region into memory.
*/
struct Mapping MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr) const;
};
/**
@@ -128,6 +134,12 @@ struct ReadOnlyHandle : HandleBase {
* Map the shared memory region into memory.
*/
struct ReadOnlyMapping Map(void* aFixedAddress = nullptr) const;
/**
* Map a subregion of the shared memory region into memory.
*/
struct ReadOnlyMapping MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr) const;
};
/**
@@ -162,6 +174,12 @@ struct FreezableHandle : HandleBase {
*/
struct FreezableMapping Map(void* aFixedAddress = nullptr) &&;
/**
* Map a subregion of the shared memory region into memory.
*/
struct FreezableMapping MapSubregion(uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr) &&;
friend class Platform;
#if !defined(XP_DARWIN) && !defined(XP_WIN) && !defined(ANDROID)
private:

View File

@@ -88,14 +88,30 @@ bool MappingBase::Map(const HandleBase& aHandle, void* aFixedAddress,
// call will fail).
CheckedInt<size_t> checkedSize(aHandle.Size());
if (!checkedSize.isValid()) {
MOZ_LOG_FMT(gSharedMemoryLog, LogLevel::Warning,
MOZ_LOG_FMT(gSharedMemoryLog, LogLevel::Error,
"handle size to map exceeds address space size");
return false;
}
if (auto mem = Platform::Map(aHandle, aFixedAddress, aReadOnly)) {
return MapSubregion(aHandle, /* aOffset */ 0, checkedSize.value(),
aFixedAddress, aReadOnly);
}
bool MappingBase::MapSubregion(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress,
bool aReadOnly) {
CheckedInt<uint64_t> endOffset(aOffset);
endOffset += aSize;
if (!endOffset.isValid() || endOffset.value() > aHandle.Size()) {
MOZ_LOG_FMT(gSharedMemoryLog, LogLevel::Error,
"cannot map region exceeding aHandle.Size()");
return false;
}
if (auto mem =
Platform::Map(aHandle, aOffset, aSize, aFixedAddress, aReadOnly)) {
mMemory = *mem;
mSize = checkedSize.value();
mSize = aSize;
MappingReporter::mapped += mSize;
return true;
}
@@ -106,11 +122,22 @@ Mapping::Mapping(const Handle& aHandle, void* aFixedAddress) {
Map(aHandle, aFixedAddress, false);
}
Mapping::Mapping(const Handle& aHandle, uint64_t aOffset, size_t aSize,
void* aFixedAddress) {
MapSubregion(aHandle, aOffset, aSize, aFixedAddress, false);
}
ReadOnlyMapping::ReadOnlyMapping(const ReadOnlyHandle& aHandle,
void* aFixedAddress) {
Map(aHandle, aFixedAddress, true);
}
ReadOnlyMapping::ReadOnlyMapping(const ReadOnlyHandle& aHandle,
uint64_t aOffset, size_t aSize,
void* aFixedAddress) {
MapSubregion(aHandle, aOffset, aSize, aFixedAddress, true);
}
FreezableMapping::FreezableMapping(FreezableHandle&& aHandle,
void* aFixedAddress) {
if (Map(aHandle, aFixedAddress, false)) {
@@ -118,6 +145,13 @@ FreezableMapping::FreezableMapping(FreezableHandle&& aHandle,
}
}
FreezableMapping::FreezableMapping(FreezableHandle&& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress) {
if (MapSubregion(aHandle, aOffset, aSize, aFixedAddress, false)) {
mHandle = std::move(aHandle);
}
}
std::tuple<Mapping, ReadOnlyHandle> FreezableMapping::Freeze() && {
auto handle = std::move(mHandle);
return std::make_tuple(std::move(*this).ConvertTo<Mapping>(),
@@ -136,6 +170,10 @@ void* FindFreeAddressSpace(size_t aSize) {
size_t SystemPageSize() { return Platform::PageSize(); }
size_t SystemAllocationGranularity() {
return Platform::AllocationGranularity();
}
size_t PageAlignedSize(size_t aMinimum) {
const size_t pageSize = Platform::PageSize();
size_t nPagesNeeded = size_t(ceil(double(aMinimum) / double(pageSize)));

View File

@@ -87,6 +87,8 @@ class MappingBase {
}
bool Map(const HandleBase& aHandle, void* aFixedAddress, bool aReadOnly);
bool MapSubregion(const HandleBase& aHandle, uint64_t aOffset, size_t aSize,
void* aFixedAddress, bool aReadOnly);
template <typename Derived>
Derived ConvertTo() && {
@@ -118,6 +120,8 @@ struct Mapping : MappingBase {
MOZ_IMPLICIT Mapping(std::nullptr_t) {}
explicit Mapping(const Handle& aHandle, void* aFixedAddress = nullptr);
Mapping(const Handle& aHandle, uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr);
using MappingBase::release;
};
@@ -134,6 +138,8 @@ struct ReadOnlyMapping : MappingBase {
explicit ReadOnlyMapping(const ReadOnlyHandle& aHandle,
void* aFixedAddress = nullptr);
ReadOnlyMapping(const ReadOnlyHandle& aHandle, uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr);
};
/**
@@ -154,6 +160,8 @@ struct FreezableMapping : MappingBase {
*/
explicit FreezableMapping(FreezableHandle&& aHandle,
void* aFixedAddress = nullptr);
FreezableMapping(FreezableHandle&& aHandle, uint64_t aOffset, size_t aSize,
void* aFixedAddress = nullptr);
/**
* Freeze the shared memory region.
@@ -211,6 +219,14 @@ void* FindFreeAddressSpace(size_t aSize);
*/
size_t SystemPageSize();
/**
* Get the system allocation granularity.
*
* This may be distinct from the page size, and controls the required
* alignment for fixed mapping addresses and shared memory offsets.
*/
size_t SystemAllocationGranularity();
/**
* Return a size which is page-aligned and can fit at least `minimum` bytes.
*

View File

@@ -81,14 +81,16 @@ class Platform {
* Map the given handle with the size ane fixed address.
*
* @param aHandle The handle to map.
* @param aOffset Offset into the shared memory region to map.
* @param aSize Size of the shared memory region to map.
* @param aFixedAddress The address at which to map the memory, or nullptr to
* map anywhere.
* @param aReadOnly Whether the mapping should be read-only.
*
* @returns The location of the mapping.
*/
static Maybe<void*> Map(const HandleBase& aHandle, void* aFixedAddress,
bool aReadOnly);
static Maybe<void*> Map(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress, bool aReadOnly);
/**
* Unmap previously mapped memory.
@@ -122,6 +124,13 @@ class Platform {
* Return the page size of the system.
*/
static size_t PageSize();
/**
* Return the allocation granularity of the system.
* This may be distinct from the page size, and controls the required
* alignment for fixed mapping addresses and shared memory offsets.
*/
static size_t AllocationGranularity();
};
} // namespace mozilla::ipc::shared_memory

View File

@@ -78,13 +78,13 @@ bool Platform::Freeze(FreezableHandle& aHandle) {
return true;
}
Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
bool aReadOnly) {
Maybe<void*> Platform::Map(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress, bool aReadOnly) {
// Don't use MAP_FIXED when a fixed_address was specified, since that can
// replace pages that are alread mapped at that address.
void* mem = mmap(aFixedAddress, aHandle.Size(),
PROT_READ | (aReadOnly ? 0 : PROT_WRITE), MAP_SHARED,
aHandle.mHandle.get(), 0);
void* mem =
mmap(aFixedAddress, aSize, PROT_READ | (aReadOnly ? 0 : PROT_WRITE),
MAP_SHARED, aHandle.mHandle.get(), aOffset);
if (mem == MAP_FAILED) {
MOZ_LOG_FMT(gSharedMemoryLog, LogLevel::Warning, "call to mmap failed: {}",
@@ -93,7 +93,7 @@ Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
}
if (aFixedAddress && mem != aFixedAddress) {
DebugOnly<bool> munmap_succeeded = munmap(mem, aHandle.Size()) == 0;
DebugOnly<bool> munmap_succeeded = munmap(mem, aSize) == 0;
MOZ_ASSERT(munmap_succeeded, "call to munmap failed");
return Nothing();
}
@@ -123,6 +123,8 @@ void* Platform::FindFreeAddressSpace(size_t aSize) {
size_t Platform::PageSize() { return sysconf(_SC_PAGESIZE); }
size_t Platform::AllocationGranularity() { return PageSize(); }
bool Platform::IsSafeToMap(const PlatformHandle&) { return true; }
} // namespace mozilla::ipc::shared_memory

View File

@@ -91,7 +91,8 @@ PlatformHandle Platform::CloneHandle(const PlatformHandle& aHandle) {
return mozilla::RetainMachSendRight(aHandle.get());
}
static Maybe<void*> MapMemory(size_t aSize, void* aFixedAddress,
static Maybe<void*> MapMemory(uint64_t aOffset, size_t aSize,
void* aFixedAddress,
const mozilla::UniqueMachSendRight& aPort,
bool aReadOnly) {
kern_return_t kr;
@@ -104,7 +105,7 @@ static Maybe<void*> MapMemory(size_t aSize, void* aFixedAddress,
kr = mach_vm_map(mach_task_self(), &address, round_page(aSize), 0,
aFixedAddress ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
aPort.get(), 0, false, vmProtection, vmProtection,
aPort.get(), aOffset, false, vmProtection, vmProtection,
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
if (!aFixedAddress) {
@@ -137,7 +138,7 @@ bool Platform::Freeze(FreezableHandle& aHandle) {
mozilla::UniqueMachSendRight port;
// Temporarily map memory (as readonly) to get an address.
auto memory = MapMemory(memoryObjectSize, nullptr, aHandle.mHandle, true);
auto memory = MapMemory(0, memoryObjectSize, nullptr, aHandle.mHandle, true);
if (!memory) {
return false;
}
@@ -168,9 +169,9 @@ bool Platform::Freeze(FreezableHandle& aHandle) {
return true;
}
Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
bool aReadOnly) {
return MapMemory(aHandle.Size(), aFixedAddress, aHandle.mHandle, aReadOnly);
Maybe<void*> Platform::Map(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress, bool aReadOnly) {
return MapMemory(aOffset, aSize, aFixedAddress, aHandle.mHandle, aReadOnly);
}
void Platform::Unmap(void* aMemory, size_t aSize) {
@@ -220,6 +221,8 @@ size_t Platform::PageSize() {
#endif
}
size_t Platform::AllocationGranularity() { return PageSize(); }
bool Platform::IsSafeToMap(const PlatformHandle&) { return true; }
} // namespace mozilla::ipc::shared_memory

View File

@@ -413,13 +413,13 @@ bool Platform::Freeze(FreezableHandle& aHandle) {
return true;
}
Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
bool aReadOnly) {
Maybe<void*> Platform::Map(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress, bool aReadOnly) {
// Don't use MAP_FIXED when a fixed_address was specified, since that can
// replace pages that are alread mapped at that address.
void* mem = mmap(aFixedAddress, aHandle.Size(),
PROT_READ | (aReadOnly ? 0 : PROT_WRITE), MAP_SHARED,
aHandle.mHandle.get(), 0);
void* mem =
mmap(aFixedAddress, aSize, PROT_READ | (aReadOnly ? 0 : PROT_WRITE),
MAP_SHARED, aHandle.mHandle.get(), aOffset);
if (mem == MAP_FAILED) {
MOZ_LOG_FMT(gSharedMemoryLog, LogLevel::Warning, "call to mmap failed: {}",
@@ -428,7 +428,7 @@ Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
}
if (aFixedAddress && mem != aFixedAddress) {
DebugOnly<bool> munmap_succeeded = munmap(mem, aHandle.Size()) == 0;
DebugOnly<bool> munmap_succeeded = munmap(mem, aSize) == 0;
MOZ_ASSERT(munmap_succeeded, "call to munmap failed");
return Nothing();
}
@@ -458,6 +458,8 @@ void* Platform::FindFreeAddressSpace(size_t aSize) {
size_t Platform::PageSize() { return sysconf(_SC_PAGESIZE); }
size_t Platform::AllocationGranularity() { return PageSize(); }
bool Platform::IsSafeToMap(const PlatformHandle&) { return true; }
} // namespace mozilla::ipc::shared_memory

View File

@@ -197,12 +197,14 @@ bool Platform::Freeze(FreezableHandle& aHandle) {
return true;
}
Maybe<void*> Platform::Map(const HandleBase& aHandle, void* aFixedAddress,
bool aReadOnly) {
Maybe<void*> Platform::Map(const HandleBase& aHandle, uint64_t aOffset,
size_t aSize, void* aFixedAddress, bool aReadOnly) {
DWORD fileOffsetHigh = (aOffset >> 32) & 0xffffffff;
DWORD fileOffsetLow = aOffset & 0xffffffff;
void* mem = ::MapViewOfFileEx(
aHandle.mHandle.get(),
aReadOnly ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE, 0, 0,
aHandle.Size(), aFixedAddress);
aReadOnly ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
fileOffsetHigh, fileOffsetLow, aSize, aFixedAddress);
if (mem) {
MOZ_ASSERT(!aFixedAddress || mem == aFixedAddress,
"MapViewOfFileEx returned an expected address");
@@ -242,6 +244,12 @@ size_t Platform::PageSize() {
return si.dwPageSize;
}
size_t Platform::AllocationGranularity() {
SYSTEM_INFO si;
::GetSystemInfo(&si);
return si.dwAllocationGranularity;
}
bool Platform::IsSafeToMap(const PlatformHandle& aHandle) {
return IsSectionSafeToMap(aHandle.get());
}

View File

@@ -57,6 +57,7 @@ EXPORTS.mozilla.ipc += [
"ScopedPort.h",
"SerializedStructuredCloneBuffer.h",
"SharedMemory.h",
"SharedMemoryCursor.h",
"SharedMemoryHandle.h",
"SharedMemoryMapping.h",
"Shmem.h",
@@ -216,6 +217,7 @@ SOURCES += [
"BackgroundChildImpl.cpp",
"BackgroundParentImpl.cpp",
# TODO move to UNIFIED_SOURCES when old shared memory impl is removed
"SharedMemoryCursor.cpp",
"SharedMemoryHandle.cpp",
"SharedMemoryMapping.cpp",
]

View File

@@ -8,6 +8,7 @@
#include "mozilla/RefPtr.h"
#include "mozilla/ipc/SharedMemory.h"
#include "mozilla/ipc/SharedMemoryCursor.h"
#ifdef XP_LINUX
# include <errno.h>
@@ -360,4 +361,77 @@ TEST_F(IPCSharedMemoryLinuxTest, MemfdNoExec) {
}
#endif
TEST(IPCSharedMemory, CursorWriteRead)
{
// Select a chunk size which is at least as big as the allocation granularity,
// as smaller sizes will not be able to map.
const size_t chunkSize = ipc::shared_memory::SystemAllocationGranularity();
ASSERT_TRUE(IsPowerOfTwo(chunkSize));
const uint64_t fullSize = chunkSize * 20;
auto handle = ipc::shared_memory::Create(fullSize);
ASSERT_TRUE(handle.IsValid());
ASSERT_EQ(handle.Size(), fullSize);
// Map the entire region.
auto mapping = handle.Map();
ASSERT_TRUE(mapping.IsValid());
ASSERT_EQ(mapping.Size(), fullSize);
// Use a cursor to write some data.
ipc::shared_memory::Cursor cursor(std::move(handle));
ASSERT_EQ(cursor.Offset(), 0u);
ASSERT_EQ(cursor.Size(), fullSize);
// Set the chunk size to ensure we use multiple mappings for this data region.
cursor.SetChunkSize(chunkSize);
// Two basic blocks of data which are used for writeReadTest.
const char data[] = "Hello, World!";
const char data2[] = "AnotherString";
auto writeReadTest = [&]() {
uint64_t initialOffset = cursor.Offset();
// Clear out the buffer to a known state so that any checks will fail if
// they're depending on previous writes.
memset(mapping.Data(), 0xe5, mapping.Size());
// Write "Hello, World" at the offset, and ensure it is reflected in the
// full mapping.
ASSERT_TRUE(cursor.Write(data, std::size(data)));
ASSERT_EQ(cursor.Offset(), initialOffset + std::size(data));
ASSERT_STREQ(static_cast<char*>(mapping.Data()) + initialOffset, data);
// Write some data in the full mapping at the same offset, and enure it can
// be read.
memcpy(static_cast<char*>(mapping.Data()) + initialOffset, data2,
std::size(data2));
cursor.Seek(initialOffset);
ASSERT_EQ(cursor.Offset(), initialOffset);
char buffer[std::size(data2)];
ASSERT_TRUE(cursor.Read(buffer, std::size(buffer)));
ASSERT_EQ(cursor.Offset(), initialOffset + std::size(buffer));
ASSERT_STREQ(buffer, data2);
};
writeReadTest();
// Run the writeReadTest at various offsets within the buffer, including at
// every chunk boundary, and in the middle of each chunk.
for (size_t offset = chunkSize - 3; offset < fullSize - 3;
offset += chunkSize / 2) {
cursor.Seek(offset);
writeReadTest();
}
// Do a writeReadTest at the very end of the allocated region to ensure that
// edge case is handled.
cursor.Seek(mapping.Size() - std::max(std::size(data), std::size(data2)));
writeReadTest();
// Ensure that writes past the end fail safely.
cursor.Seek(mapping.Size() - 3);
ASSERT_FALSE(cursor.Write(data, std::size(data)));
}
} // namespace mozilla