On Android, SurfaceTextures provide a transform that should be applied to texture coordinates when sampling from the texture. Usually this is simply a y-flip, but sometimes it includes a scale and slight translation, eg when the video frame is contained within a larger texture. Previously we ignored this transform but performed a y-flip, meaning we rendered correctly most of the time, but not all of the time. Our first attempt to fix this was in bug 1731980. When rendering as a compositor surface with RenderCompositorOGLSWGL, we supplied the transform to CompositorOGL's shaders, which correctly fixed the bug for this rendering path. However, the attempted fix for hardware webrender in fact made things worse. As UV coordinates are supplied to webrender unnormalized, then the shaders normalize them by dividing by the actual texture size, this effectively handled the scale component of the transform. (Though not quite scaling by the correct amount, and ignoring the translation component, sometimes resulting in a pixel-wide green seam being visible at the video's edges.) When we additionally applied the transformation to the coordinates, it resulted in the scale being applied twice, and the video being rendered too far zoomed in. To make matters worse, when we received subsequent bug reports of incorrect rendering on various devices we mistakenly assumed that the devices must be buggy, rather than our code being incorrect. We therefore reverted to ignoring the transform on these devices, thereby breaking the software webrender path again. Additionally, on devices without GL_OES_EGL_image_external_essl3 support, we must sample from the SurfaceTexture using an ESSL1 shader. This means we do not have access to the correct texture size, meaning we cannot correctly normalize the UV coordinates. This results in the video being rendered too far zoomed out. And in the non-compositor-surface software webrender path, we were accidentally downscaling the texture when reading back into a CPU buffer, resulting in the video being rendered at the correct zoom, but being very blurry. This patch aims to handle the transform correctly, in all rendering paths, hopefully once and for all. For hardware webrender, we now supply the texture coordinates to webrender already normalized, using the functionality added in the previous patch. This avoids the shaders scaling the coordinates again, or using an incorrect texture size to do so. For RenderCompositorOGLSWGL, we continue to apply the transform using CompositorOGL's shaders. In the non-compositor-surface software webrender path, we make GLReadPixelsHelper apply the transform when reading from the SurfaceTexture in to the CPU buffer. Again using functionality added earlier in this patch series. This avoids downscaling the image. We can then provide the default untransformed and unnormalized UVs to webrender. As a result we can now remove the virtual function RenderTextureHost::GetUvCoords(), added in bug 1731980, as it no longer serves any purpose: we no longer want to share the implementation between RenderAndroidSurfaceTextureHost::Lock and RenderTextureHostSWGL::LockSWGL. Finally, we remove all transform overrides on the devices we mistakenly assumed were buggy. Differential Revision: https://phabricator.services.mozilla.com/D220582
223 lines
6.8 KiB
C++
223 lines
6.8 KiB
C++
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
#include "RenderEGLImageTextureHost.h"
|
|
|
|
#include "mozilla/gfx/Logging.h"
|
|
#include "GLContextEGL.h"
|
|
#include "GLLibraryEGL.h"
|
|
#include "GLReadTexImageHelper.h"
|
|
#include "OGLShaderConfig.h"
|
|
|
|
namespace mozilla {
|
|
namespace wr {
|
|
|
|
RenderEGLImageTextureHost::RenderEGLImageTextureHost(EGLImage aImage,
|
|
EGLSync aSync,
|
|
gfx::IntSize aSize,
|
|
gfx::SurfaceFormat aFormat)
|
|
: mImage(aImage),
|
|
mSync(aSync),
|
|
mSize(aSize),
|
|
mFormat(aFormat),
|
|
mTextureTarget(LOCAL_GL_TEXTURE_2D),
|
|
mTextureHandle(0) {
|
|
MOZ_COUNT_CTOR_INHERITED(RenderEGLImageTextureHost, RenderTextureHost);
|
|
}
|
|
|
|
RenderEGLImageTextureHost::~RenderEGLImageTextureHost() {
|
|
MOZ_COUNT_DTOR_INHERITED(RenderEGLImageTextureHost, RenderTextureHost);
|
|
DeleteTextureHandle();
|
|
}
|
|
|
|
wr::WrExternalImage RenderEGLImageTextureHost::Lock(uint8_t aChannelIndex,
|
|
gl::GLContext* aGL) {
|
|
MOZ_ASSERT(aChannelIndex == 0);
|
|
|
|
if (mGL.get() != aGL) {
|
|
if (mGL) {
|
|
// This should not happen. On android, SingletonGL is used.
|
|
MOZ_ASSERT_UNREACHABLE("Unexpected GL context");
|
|
return InvalidToWrExternalImage();
|
|
}
|
|
mGL = aGL;
|
|
}
|
|
|
|
if (!mImage || !mGL || !mGL->MakeCurrent()) {
|
|
return InvalidToWrExternalImage();
|
|
}
|
|
|
|
if (!WaitSync() || !CreateTextureHandle()) {
|
|
return InvalidToWrExternalImage();
|
|
}
|
|
|
|
return NativeTextureToWrExternalImage(mTextureHandle, 0.0, 0.0,
|
|
static_cast<float>(mSize.width),
|
|
static_cast<float>(mSize.height));
|
|
}
|
|
|
|
void RenderEGLImageTextureHost::Unlock() {}
|
|
|
|
RefPtr<layers::TextureSource> RenderEGLImageTextureHost::CreateTextureSource(
|
|
layers::TextureSourceProvider* aProvider) {
|
|
gl::GLContext* gl = aProvider->GetGLContext();
|
|
if (mGL.get() != gl) {
|
|
if (mGL) {
|
|
// This should not happen. On android, SingletonGL is used.
|
|
MOZ_ASSERT_UNREACHABLE("Unexpected GL context");
|
|
return nullptr;
|
|
}
|
|
mGL = gl;
|
|
}
|
|
|
|
if (!WaitSync()) {
|
|
return nullptr;
|
|
}
|
|
|
|
return new layers::EGLImageTextureSource(
|
|
aProvider, mImage, mFormat, gl->GetPreferredEGLImageTextureTarget(),
|
|
LOCAL_GL_CLAMP_TO_EDGE, mSize);
|
|
}
|
|
|
|
gfx::SurfaceFormat RenderEGLImageTextureHost::GetFormat() const {
|
|
MOZ_ASSERT(mFormat == gfx::SurfaceFormat::R8G8B8A8 ||
|
|
mFormat == gfx::SurfaceFormat::R8G8B8X8);
|
|
// SWGL does not support RGBA/RGBX so we must provide data in BGRA/BGRX
|
|
// format. ReadTexImage() called by MapPlane() will ensure that data gets
|
|
// converted correctly.
|
|
if (mFormat == gfx::SurfaceFormat::R8G8B8A8) {
|
|
return gfx::SurfaceFormat::B8G8R8A8;
|
|
}
|
|
|
|
if (mFormat == gfx::SurfaceFormat::R8G8B8X8) {
|
|
return gfx::SurfaceFormat::B8G8R8X8;
|
|
}
|
|
|
|
gfxCriticalNoteOnce << "Unexpected color format of RenderEGLImageTextureHost";
|
|
|
|
return gfx::SurfaceFormat::UNKNOWN;
|
|
}
|
|
|
|
bool RenderEGLImageTextureHost::MapPlane(RenderCompositor* aCompositor,
|
|
uint8_t aChannelIndex,
|
|
PlaneInfo& aPlaneInfo) {
|
|
RefPtr<gfx::DataSourceSurface> readback = ReadTexImage();
|
|
if (!readback) {
|
|
return false;
|
|
}
|
|
|
|
gfx::DataSourceSurface::MappedSurface map;
|
|
if (!readback->Map(gfx::DataSourceSurface::MapType::READ, &map)) {
|
|
return false;
|
|
}
|
|
|
|
mReadback = readback;
|
|
aPlaneInfo.mSize = mSize;
|
|
aPlaneInfo.mStride = map.mStride;
|
|
aPlaneInfo.mData = map.mData;
|
|
return true;
|
|
}
|
|
|
|
void RenderEGLImageTextureHost::UnmapPlanes() {
|
|
if (mReadback) {
|
|
mReadback->Unmap();
|
|
mReadback = nullptr;
|
|
}
|
|
}
|
|
|
|
bool RenderEGLImageTextureHost::CreateTextureHandle() {
|
|
if (mTextureHandle) {
|
|
return true;
|
|
}
|
|
|
|
mTextureTarget = mGL->GetPreferredEGLImageTextureTarget();
|
|
MOZ_ASSERT(mTextureTarget == LOCAL_GL_TEXTURE_2D ||
|
|
mTextureTarget == LOCAL_GL_TEXTURE_EXTERNAL);
|
|
|
|
mGL->fGenTextures(1, &mTextureHandle);
|
|
ActivateBindAndTexParameteri(mGL, LOCAL_GL_TEXTURE0, mTextureTarget,
|
|
mTextureHandle);
|
|
mGL->fEGLImageTargetTexture2D(mTextureTarget, mImage);
|
|
return true;
|
|
}
|
|
|
|
void RenderEGLImageTextureHost::DeleteTextureHandle() {
|
|
if (mTextureHandle) {
|
|
if (mGL && mGL->MakeCurrent()) {
|
|
// XXX recycle gl texture, since SharedSurface_EGLImage and
|
|
// RenderEGLImageTextureHost is not recycled.
|
|
mGL->fDeleteTextures(1, &mTextureHandle);
|
|
}
|
|
mTextureHandle = 0;
|
|
}
|
|
}
|
|
|
|
bool RenderEGLImageTextureHost::WaitSync() {
|
|
bool syncSucceeded = true;
|
|
if (mSync) {
|
|
const auto& gle = gl::GLContextEGL::Cast(mGL);
|
|
const auto& egl = gle->mEgl;
|
|
MOZ_ASSERT(egl->IsExtensionSupported(gl::EGLExtension::KHR_fence_sync));
|
|
if (egl->IsExtensionSupported(gl::EGLExtension::KHR_wait_sync)) {
|
|
syncSucceeded = egl->fWaitSync(mSync, 0) == LOCAL_EGL_TRUE;
|
|
} else {
|
|
syncSucceeded = egl->fClientWaitSync(mSync, 0, LOCAL_EGL_FOREVER) ==
|
|
LOCAL_EGL_CONDITION_SATISFIED;
|
|
}
|
|
// We do not need to delete sync here. It is deleted by
|
|
// SharedSurface_EGLImage.
|
|
mSync = 0;
|
|
}
|
|
|
|
MOZ_ASSERT(
|
|
syncSucceeded,
|
|
"(Client)WaitSync generated an error. Has mSync already been destroyed?");
|
|
return syncSucceeded;
|
|
}
|
|
|
|
already_AddRefed<gfx::DataSourceSurface>
|
|
RenderEGLImageTextureHost::ReadTexImage() {
|
|
if (!mGL) {
|
|
mGL = RenderThread::Get()->SingletonGL();
|
|
if (!mGL) {
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
if (!WaitSync() || !CreateTextureHandle()) {
|
|
return nullptr;
|
|
}
|
|
|
|
// Allocate resulting image surface.
|
|
// Use GetFormat() rather than mFormat for the DataSourceSurface. eg BGRA
|
|
// rather than RGBA, as the latter is not supported by swgl.
|
|
// ReadTexImageHelper will take care of converting the data for us.
|
|
const gfx::SurfaceFormat surfFormat = GetFormat();
|
|
int32_t stride = mSize.width * BytesPerPixel(surfFormat);
|
|
RefPtr<gfx::DataSourceSurface> surf =
|
|
gfx::Factory::CreateDataSourceSurfaceWithStride(mSize, surfFormat,
|
|
stride);
|
|
if (!surf) {
|
|
return nullptr;
|
|
}
|
|
|
|
layers::ShaderConfigOGL config =
|
|
layers::ShaderConfigFromTargetAndFormat(mTextureTarget, mFormat);
|
|
int shaderConfig = config.mFeatures;
|
|
|
|
bool ret = mGL->ReadTexImageHelper()->ReadTexImage(
|
|
surf, mTextureHandle, mTextureTarget, mSize, gfx::Matrix4x4(),
|
|
shaderConfig, /* aYInvert */ false);
|
|
if (!ret) {
|
|
return nullptr;
|
|
}
|
|
|
|
return surf.forget();
|
|
}
|
|
|
|
} // namespace wr
|
|
} // namespace mozilla
|