/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "WebGPUParent.h" #include "mozilla/webgpu/ffi/wgpu.h" #include "mozilla/layers/ImageDataSerializer.h" #include "mozilla/layers/TextureHost.h" namespace mozilla { namespace webgpu { const uint64_t POLL_TIME_MS = 100; class PresentationData { NS_INLINE_DECL_REFCOUNTING(PresentationData); public: RawId mDeviceId = 0; RawId mQueueId = 0; RefPtr mTextureHost; uint32_t mSourcePitch = 0; uint32_t mTargetPitch = 0; uint32_t mRowCount = 0; std::vector mUnassignedBufferIds; std::vector mAvailableBufferIds; std::vector mQueuedBufferIds; Mutex mBuffersLock; PresentationData() : mBuffersLock("WebGPU presentation buffers") { MOZ_COUNT_CTOR(PresentationData); } private: ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); } }; static void FreeAdapter(RawId id, void* param) { if (!static_cast(param)->SendFreeAdapter(id)) { MOZ_CRASH("IPC failure"); } } static void FreeDevice(RawId id, void* param) { if (!static_cast(param)->SendFreeDevice(id)) { MOZ_CRASH("IPC failure"); } } static void FreeSwapChain(RawId id, void* param) { Unused << id; Unused << param; } static void FreePipelineLayout(RawId id, void* param) { if (!static_cast(param)->SendFreePipelineLayout(id)) { MOZ_CRASH("IPC failure"); } } static void FreeShaderModule(RawId id, void* param) { if (!static_cast(param)->SendFreeShaderModule(id)) { MOZ_CRASH("IPC failure"); } } static void FreeBindGroupLayout(RawId id, void* param) { if (!static_cast(param)->SendFreeBindGroupLayout(id)) { MOZ_CRASH("IPC failure"); } } static void FreeBindGroup(RawId id, void* param) { if (!static_cast(param)->SendFreeBindGroup(id)) { MOZ_CRASH("IPC failure"); } } static void FreeCommandBuffer(RawId id, void* param) { if (!static_cast(param)->SendFreeCommandBuffer(id)) { MOZ_CRASH("IPC failure"); } } static void FreeRenderPipeline(RawId id, void* param) { if (!static_cast(param)->SendFreeRenderPipeline(id)) { MOZ_CRASH("IPC failure"); } } static void FreeComputePipeline(RawId id, void* param) { if (!static_cast(param)->SendFreeComputePipeline(id)) { MOZ_CRASH("IPC failure"); } } static void FreeBuffer(RawId id, void* param) { if (!static_cast(param)->SendFreeBuffer(id)) { MOZ_CRASH("IPC failure"); } } static void FreeTexture(RawId id, void* param) { if (!static_cast(param)->SendFreeTexture(id)) { MOZ_CRASH("IPC failure"); } } static void FreeTextureView(RawId id, void* param) { if (!static_cast(param)->SendFreeTextureView(id)) { MOZ_CRASH("IPC failure"); } } static void FreeSampler(RawId id, void* param) { if (!static_cast(param)->SendFreeSampler(id)) { MOZ_CRASH("IPC failure"); } } static void FreeSurface(RawId id, void* param) { Unused << id; Unused << param; } static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) { // Note: careful about the order here! const ffi::WGPUIdentityRecyclerFactory factory = { param, FreeAdapter, FreeDevice, FreeSwapChain, FreePipelineLayout, FreeShaderModule, FreeBindGroupLayout, FreeBindGroup, FreeCommandBuffer, FreeRenderPipeline, FreeComputePipeline, FreeBuffer, FreeTexture, FreeTextureView, FreeSampler, FreeSurface, }; return factory; } WebGPUParent::WebGPUParent() : mContext(ffi::wgpu_server_new(MakeFactory(this))) { mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this, &WebGPUParent::MaintainDevices); } WebGPUParent::~WebGPUParent() = default; void WebGPUParent::MaintainDevices() { ffi::wgpu_server_poll_all_devices(mContext, false); } ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter( const dom::GPURequestAdapterOptions& aOptions, const nsTArray& aTargetIds, InstanceRequestAdapterResolver&& resolver) { ffi::WGPURequestAdapterOptions options = {}; if (aOptions.mPowerPreference.WasPassed()) { options.power_preference = static_cast( aOptions.mPowerPreference.Value()); } // TODO: make available backends configurable by prefs int8_t index = ffi::wgpu_server_instance_request_adapter( mContext, &options, aTargetIds.Elements(), aTargetIds.Length()); if (index >= 0) { resolver(aTargetIds[index]); } else { resolver(0); } // free the unused IDs for (size_t i = 0; i < aTargetIds.Length(); ++i) { if (static_cast(i) != index && !SendFreeAdapter(aTargetIds[i])) { MOZ_CRASH("IPC failure"); } } return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice( RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) { ffi::WGPUDeviceDescriptor desc = {}; desc.limits.max_bind_groups = aDesc.mLimits.WasPassed() ? aDesc.mLimits.Value().mMaxBindGroups : WGPUDEFAULT_BIND_GROUPS; // TODO: fill up the descriptor ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aSelfId) { ffi::wgpu_server_adapter_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) { ffi::wgpu_server_device_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer( RawId aSelfId, const ffi::WGPUBufferDescriptor& aDesc, const nsCString& aLabel, RawId aNewId) { ffi::WGPUBufferDescriptor desc = aDesc; if (!aLabel.IsEmpty()) { desc.label = aLabel.Data(); } ffi::wgpu_server_device_create_buffer(mContext, aSelfId, &desc, aNewId); if (desc.usage & (WGPUBufferUsage_MAP_READ | WGPUBufferUsage_MAP_WRITE)) { mSharedMemoryMap.insert({aNewId, Shmem()}); } return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvBufferReturnShmem(RawId aSelfId, Shmem&& aShmem) { mSharedMemoryMap[aSelfId] = aShmem; return IPC_OK(); } struct MapRequest { const ffi::WGPUGlobal* const mContext; ffi::WGPUBufferId mBufferId; ffi::WGPUHostMap mHostMap; uint64_t mOffset; ipc::Shmem mShmem; WebGPUParent::BufferMapResolver mResolver; MapRequest(const ffi::WGPUGlobal* context, ffi::WGPUBufferId bufferId, ffi::WGPUHostMap hostMap, uint64_t offset, ipc::Shmem&& shmem, WebGPUParent::BufferMapResolver&& resolver) : mContext(context), mBufferId(bufferId), mHostMap(hostMap), mOffset(offset), mShmem(shmem), mResolver(resolver) {} }; static void MapCallback(ffi::WGPUBufferMapAsyncStatus status, uint8_t* userdata) { auto* req = reinterpret_cast(userdata); // TODO: better handle errors MOZ_ASSERT(status == ffi::WGPUBufferMapAsyncStatus_Success); if (req->mHostMap == ffi::WGPUHostMap_Read) { const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range( req->mContext, req->mBufferId, req->mOffset, req->mShmem.Size()); memcpy(req->mShmem.get(), ptr, req->mShmem.Size()); } req->mResolver(std::move(req->mShmem)); delete req; } ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap, uint64_t aOffset, uint64_t aSize, BufferMapResolver&& aResolver) { auto* request = new MapRequest(mContext, aSelfId, aHostMap, aOffset, std::move(mSharedMemoryMap[aSelfId]), std::move(aResolver)); ffi::WGPUBufferMapOperation mapOperation = { aHostMap, &MapCallback, reinterpret_cast(request)}; ffi::wgpu_server_buffer_map(mContext, aSelfId, aOffset, aSize, mapOperation); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush) { if (aFlush) { // TODO: flush exact modified sub-range uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range( mContext, aSelfId, 0, aShmem.Size()); MOZ_ASSERT(ptr != nullptr); memcpy(ptr, aShmem.get(), aShmem.Size()); } ffi::wgpu_server_buffer_unmap(mContext, aSelfId); const auto iter = mSharedMemoryMap.find(aSelfId); if (iter == mSharedMemoryMap.end()) { DeallocShmem(aShmem); } else { iter->second = aShmem; } return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) { ffi::wgpu_server_buffer_destroy(mContext, aSelfId); const auto iter = mSharedMemoryMap.find(aSelfId); if (iter != mSharedMemoryMap.end()) { DeallocShmem(iter->second); mSharedMemoryMap.erase(iter); } return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateTexture( RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc, const nsCString& aLabel, RawId aNewId) { ffi::WGPUTextureDescriptor desc = aDesc; if (!aLabel.IsEmpty()) { desc.label = aLabel.Data(); } ffi::wgpu_server_device_create_texture(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvTextureCreateView( RawId aSelfId, const ffi::WGPUTextureViewDescriptor& aDesc, const nsCString& aLabel, RawId aNewId) { ffi::WGPUTextureViewDescriptor desc = aDesc; if (!aLabel.IsEmpty()) { desc.label = aLabel.Data(); } ffi::wgpu_server_texture_create_view(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aSelfId) { ffi::wgpu_server_texture_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) { ffi::wgpu_server_texture_view_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateSampler( RawId aSelfId, const SerialSamplerDescriptor& aDesc, RawId aNewId) { ffi::WGPUSamplerDescriptor desc = {}; desc.address_modes[0] = aDesc.mAddressU; desc.address_modes[1] = aDesc.mAddressV; desc.address_modes[2] = aDesc.mAddressW; desc.mag_filter = aDesc.mMagFilter; desc.min_filter = aDesc.mMinFilter; desc.mipmap_filter = aDesc.mMipmapFilter; desc.lod_min_clamp = aDesc.mLodMinClamp; desc.lod_max_clamp = aDesc.mLodMaxClamp; if (aDesc.mCompare) { desc.compare = aDesc.mCompare.ptr(); } if (!aDesc.mLabel.IsEmpty()) { desc.label = aDesc.mLabel.Data(); } ffi::wgpu_server_device_create_sampler(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSelfId) { ffi::wgpu_server_sampler_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateCommandEncoder( RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc, RawId aNewId) { ffi::WGPUCommandEncoderDescriptor desc = {}; if (aDesc.mLabel.WasPassed()) { // TODO: desc.label = aDesc.mLabel.Value(); } ffi::wgpu_server_device_create_encoder(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToBuffer( RawId aSelfId, RawId aSourceId, BufferAddress aSourceOffset, RawId aDestinationId, BufferAddress aDestinationOffset, BufferAddress aSize) { ffi::wgpu_server_encoder_copy_buffer_to_buffer(mContext, aSelfId, aSourceId, aSourceOffset, aDestinationId, aDestinationOffset, aSize); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToTexture( RawId aSelfId, WGPUBufferCopyView aSource, WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize) { ffi::wgpu_server_encoder_copy_buffer_to_texture(mContext, aSelfId, &aSource, &aDestination, &aCopySize); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToBuffer( RawId aSelfId, WGPUTextureCopyView aSource, WGPUBufferCopyView aDestination, WGPUExtent3d aCopySize) { ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aSelfId, &aSource, &aDestination, &aCopySize); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToTexture( RawId aSelfId, WGPUTextureCopyView aSource, WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize) { ffi::wgpu_server_encoder_copy_texture_to_texture(mContext, aSelfId, &aSource, &aDestination, &aCopySize); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderRunComputePass(RawId aSelfId, Shmem&& aShmem) { ffi::wgpu_server_encode_compute_pass(mContext, aSelfId, aShmem.get(), aShmem.Size()); DeallocShmem(aShmem); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderRunRenderPass(RawId aSelfId, Shmem&& aShmem) { ffi::wgpu_server_encode_render_pass(mContext, aSelfId, aShmem.get(), aShmem.Size()); DeallocShmem(aShmem); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish( RawId aSelfId, const dom::GPUCommandBufferDescriptor& aDesc) { Unused << aDesc; ffi::WGPUCommandBufferDescriptor desc = {}; ffi::wgpu_server_encoder_finish(mContext, aSelfId, &desc); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aSelfId) { ffi::wgpu_server_encoder_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aSelfId) { ffi::wgpu_server_command_buffer_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvQueueSubmit( RawId aSelfId, const nsTArray& aCommandBuffers) { ffi::wgpu_server_queue_submit(mContext, aSelfId, aCommandBuffers.Elements(), aCommandBuffers.Length()); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvQueueWriteBuffer(RawId aSelfId, RawId aBufferId, uint64_t aBufferOffset, Shmem&& aShmem) { ffi::wgpu_server_queue_write_buffer(mContext, aSelfId, aBufferId, aBufferOffset, aShmem.get(), aShmem.Size()); DeallocShmem(aShmem); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvQueueWriteTexture( RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination, Shmem&& aShmem, const ffi::WGPUTextureDataLayout& aDataLayout, const ffi::WGPUExtent3d& aExtent) { ffi::wgpu_server_queue_write_texture( mContext, aSelfId, &aDestination, aShmem.get(), aShmem.Size(), &aDataLayout, &aExtent); DeallocShmem(aShmem); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroupLayout( RawId aSelfId, const SerialBindGroupLayoutDescriptor& aDesc, RawId aNewId) { ffi::WGPUBindGroupLayoutDescriptor desc = {}; desc.entries = aDesc.mEntries.Elements(); desc.entries_length = aDesc.mEntries.Length(); ffi::wgpu_server_device_create_bind_group_layout(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aSelfId) { ffi::wgpu_server_bind_group_layout_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreatePipelineLayout( RawId aSelfId, const SerialPipelineLayoutDescriptor& aDesc, RawId aNewId) { ffi::WGPUPipelineLayoutDescriptor desc = {}; desc.bind_group_layouts = aDesc.mBindGroupLayouts.Elements(); desc.bind_group_layouts_length = aDesc.mBindGroupLayouts.Length(); ffi::wgpu_server_device_create_pipeline_layout(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aSelfId) { ffi::wgpu_server_pipeline_layout_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroup( RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId) { nsTArray ffiEntries(aDesc.mEntries.Length()); for (const auto& entry : aDesc.mEntries) { ffi::WGPUBindGroupEntry bge = {}; bge.binding = entry.mBinding; switch (entry.mType) { case SerialBindGroupEntryType::Buffer: bge.buffer = entry.mValue; bge.offset = entry.mBufferOffset; bge.size = ffi::make_buffer_size(entry.mBufferSize); break; case SerialBindGroupEntryType::Texture: bge.texture_view = entry.mValue; break; case SerialBindGroupEntryType::Sampler: bge.sampler = entry.mValue; break; default: MOZ_CRASH("unreachable"); } ffiEntries.AppendElement(bge); } ffi::WGPUBindGroupDescriptor desc = {}; desc.layout = aDesc.mLayout; desc.entries = ffiEntries.Elements(); desc.entries_length = ffiEntries.Length(); ffi::wgpu_server_device_create_bind_group(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) { ffi::wgpu_server_bind_group_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule( RawId aSelfId, const nsTArray& aSpirv, const nsCString& aWgsl, RawId aNewId) { ffi::WGPUShaderModuleDescriptor desc = {}; desc.code.bytes = aSpirv.Elements(); desc.code.length = aSpirv.Length(); ffi::wgpu_server_device_create_shader_module(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aSelfId) { ffi::wgpu_server_shader_module_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateComputePipeline( RawId aSelfId, const SerialComputePipelineDescriptor& aDesc, RawId aNewId) { const NS_LossyConvertUTF16toASCII entryPoint(aDesc.mComputeStage.mEntryPoint); ffi::WGPUComputePipelineDescriptor desc = {}; desc.layout = aDesc.mLayout; desc.compute_stage.module = aDesc.mComputeStage.mModule; desc.compute_stage.entry_point = entryPoint.get(); ffi::wgpu_server_device_create_compute_pipeline(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) { ffi::wgpu_server_compute_pipeline_destroy(mContext, aSelfId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvDeviceCreateRenderPipeline( RawId aSelfId, const SerialRenderPipelineDescriptor& aDesc, RawId aNewId) { const NS_LossyConvertUTF16toASCII vsEntryPoint( aDesc.mVertexStage.mEntryPoint); const NS_LossyConvertUTF16toASCII fsEntryPoint( aDesc.mFragmentStage.mEntryPoint); nsTArray vertexBuffers( aDesc.mVertexState.mVertexBuffers.Length()); ffi::WGPURenderPipelineDescriptor desc = {}; ffi::WGPUProgrammableStageDescriptor fragmentDesc = {}; desc.layout = aDesc.mLayout; desc.vertex_stage.module = aDesc.mVertexStage.mModule; desc.vertex_stage.entry_point = vsEntryPoint.get(); if (aDesc.mFragmentStage.mModule != 0) { fragmentDesc.module = aDesc.mFragmentStage.mModule; fragmentDesc.entry_point = fsEntryPoint.get(); desc.fragment_stage = &fragmentDesc; } desc.primitive_topology = aDesc.mPrimitiveTopology; if (aDesc.mRasterizationState.isSome()) { desc.rasterization_state = aDesc.mRasterizationState.ptr(); } desc.color_states = aDesc.mColorStates.Elements(); desc.color_states_length = aDesc.mColorStates.Length(); if (aDesc.mDepthStencilState.isSome()) { desc.depth_stencil_state = aDesc.mDepthStencilState.ptr(); } for (const auto& vertexBuffer : aDesc.mVertexState.mVertexBuffers) { ffi::WGPUVertexBufferLayoutDescriptor vb = {}; vb.array_stride = vertexBuffer.mArrayStride; vb.step_mode = vertexBuffer.mStepMode; vb.attributes = vertexBuffer.mAttributes.Elements(); vb.attributes_length = vertexBuffer.mAttributes.Length(); vertexBuffers.AppendElement(vb); } desc.vertex_state.index_format = aDesc.mVertexState.mIndexFormat; desc.vertex_state.vertex_buffers = vertexBuffers.Elements(); desc.vertex_state.vertex_buffers_length = vertexBuffers.Length(); desc.sample_count = aDesc.mSampleCount; desc.sample_mask = aDesc.mSampleMask; desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled; ffi::wgpu_server_device_create_render_pipeline(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) { ffi::wgpu_server_render_pipeline_destroy(mContext, aSelfId); return IPC_OK(); } // TODO: proper destruction static const uint64_t kBufferAlignment = 0x100; static uint64_t Align(uint64_t value) { return (value | (kBufferAlignment - 1)) + 1; } ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain( RawId aSelfId, RawId aQueueId, const RGBDescriptor& aDesc, const nsTArray& aBufferIds, ExternalImageId aExternalId) { const auto rows = aDesc.size().height; const auto bufferStride = Align(static_cast(aDesc.size().width) * 4); const auto textureStride = layers::ImageDataSerializer::GetRGBStride(aDesc); const auto wholeBufferSize = CheckedInt(textureStride) * rows; if (!wholeBufferSize.isValid()) { NS_ERROR("Invalid total buffer size!"); return IPC_OK(); } auto* textureHostData = new (fallible) uint8_t[wholeBufferSize.value()]; if (!textureHostData) { NS_ERROR("Unable to allocate host data!"); return IPC_OK(); } RefPtr textureHost = new layers::MemoryTextureHost( textureHostData, aDesc, layers::TextureFlags::NO_FLAGS); textureHost->CreateRenderTexture(aExternalId); nsTArray bufferIds(aBufferIds.Clone()); RefPtr data = new PresentationData(); data->mDeviceId = aSelfId; data->mQueueId = aQueueId; data->mTextureHost = textureHost; data->mSourcePitch = bufferStride; data->mTargetPitch = textureStride; data->mRowCount = rows; for (const RawId id : bufferIds) { data->mUnassignedBufferIds.push_back(id); } if (!mCanvasMap.insert({AsUint64(aExternalId), data}).second) { NS_ERROR("External image is already registered as WebGPU canvas!"); } return IPC_OK(); } struct PresentRequest { const ffi::WGPUGlobal* mContext; RefPtr mData; }; static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status, uint8_t* userdata) { auto* req = reinterpret_cast(userdata); PresentationData* data = req->mData.get(); // get the buffer ID data->mBuffersLock.Lock(); RawId bufferId = data->mQueuedBufferIds.back(); data->mQueuedBufferIds.pop_back(); data->mAvailableBufferIds.push_back(bufferId); data->mBuffersLock.Unlock(); // copy the data if (status == ffi::WGPUBufferMapAsyncStatus_Success) { const auto bufferSize = data->mRowCount * data->mSourcePitch; const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range( req->mContext, bufferId, 0, bufferSize); uint8_t* dst = data->mTextureHost->GetBuffer(); for (uint32_t row = 0; row < data->mRowCount; ++row) { memcpy(dst, ptr, data->mTargetPitch); dst += data->mTargetPitch; ptr += data->mSourcePitch; } wgpu_server_buffer_unmap(req->mContext, bufferId); } else { // TODO: better handle errors NS_WARNING("WebGPU frame mapping failed!"); } // free yourself delete req; } ipc::IPCResult WebGPUParent::RecvSwapChainPresent( wr::ExternalImageId aExternalId, RawId aTextureId, RawId aCommandEncoderId) { // step 0: get the data associated with the swapchain const auto& lookup = mCanvasMap.find(AsUint64(aExternalId)); if (lookup == mCanvasMap.end()) { NS_WARNING("WebGPU presenting on a destroyed swap chain!"); return IPC_OK(); } RefPtr data = lookup->second.get(); RawId bufferId = 0; const auto& size = data->mTextureHost->GetSize(); const auto bufferSize = data->mRowCount * data->mSourcePitch; // step 1: find an available staging buffer, or create one data->mBuffersLock.Lock(); if (!data->mAvailableBufferIds.empty()) { bufferId = data->mAvailableBufferIds.back(); data->mAvailableBufferIds.pop_back(); } else if (!data->mUnassignedBufferIds.empty()) { bufferId = data->mUnassignedBufferIds.back(); data->mUnassignedBufferIds.pop_back(); ffi::WGPUBufferUsage usage = WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ; ffi::WGPUBufferDescriptor desc = {}; desc.size = bufferSize; desc.usage = usage; ffi::wgpu_server_device_create_buffer(mContext, data->mDeviceId, &desc, bufferId); } else { bufferId = 0; } if (bufferId) { data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId); } data->mBuffersLock.Unlock(); if (!bufferId) { // TODO: add a warning - no buffer are available! return IPC_OK(); } // step 3: submit a copy command for the frame ffi::WGPUCommandEncoderDescriptor encoderDesc = {}; ffi::wgpu_server_device_create_encoder(mContext, data->mDeviceId, &encoderDesc, aCommandEncoderId); const ffi::WGPUTextureCopyView texView = { aTextureId, }; const ffi::WGPUTextureDataLayout bufLayout = { 0, data->mSourcePitch, 0, }; const ffi::WGPUBufferCopyView bufView = { bufferId, bufLayout, }; const ffi::WGPUExtent3d extent = { static_cast(size.width), static_cast(size.height), 1, }; ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aCommandEncoderId, &texView, &bufView, &extent); ffi::WGPUCommandBufferDescriptor commandDesc = {}; ffi::wgpu_server_encoder_finish(mContext, aCommandEncoderId, &commandDesc); ffi::wgpu_server_queue_submit(mContext, data->mQueueId, &aCommandEncoderId, 1); // step 4: request the pixels to be copied into the external texture // TODO: this isn't strictly necessary. When WR wants to Lock() the external // texture, // we can just give it the contents of the last mapped buffer instead of the // copy. auto* const presentRequest = new PresentRequest{ mContext, data, }; ffi::WGPUBufferMapOperation mapOperation = { ffi::WGPUHostMap_Read, &PresentCallback, reinterpret_cast(presentRequest)}; ffi::wgpu_server_buffer_map(mContext, bufferId, 0, bufferSize, mapOperation); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvSwapChainDestroy( wr::ExternalImageId aExternalId) { const auto& lookup = mCanvasMap.find(AsUint64(aExternalId)); MOZ_ASSERT(lookup != mCanvasMap.end()); RefPtr data = lookup->second.get(); mCanvasMap.erase(AsUint64(aExternalId)); data->mTextureHost = nullptr; layers::TextureHost::DestroyRenderTexture(aExternalId); data->mBuffersLock.Lock(); for (const auto bid : data->mUnassignedBufferIds) { if (!SendFreeBuffer(bid)) { NS_WARNING("Unable to free an ID for non-assigned buffer"); } } for (const auto bid : data->mAvailableBufferIds) { ffi::wgpu_server_buffer_destroy(mContext, bid); } for (const auto bid : data->mQueuedBufferIds) { ffi::wgpu_server_buffer_destroy(mContext, bid); } data->mBuffersLock.Unlock(); return IPC_OK(); } ipc::IPCResult WebGPUParent::RecvShutdown() { mTimer.Stop(); for (const auto& p : mCanvasMap) { const wr::ExternalImageId extId = {p.first}; layers::TextureHost::DestroyRenderTexture(extId); } mCanvasMap.clear(); ffi::wgpu_server_poll_all_devices(mContext, true); ffi::wgpu_server_delete(const_cast(mContext)); return IPC_OK(); } } // namespace webgpu } // namespace mozilla