Bug 1495025 - P5. Add Windows P010 and P016 support for software decoder r=cpearce

As we do not have an IMF nor D3D11 NV12 image, we always require a full copy of the data that will deinterleave the chroma channels.

Depends on D7316

Differential Revision: https://phabricator.services.mozilla.com/D7318
This commit is contained in:
Jean-Yves Avenard
2018-10-04 09:41:58 +00:00
parent 3ee9a02d40
commit 0c31c33770
5 changed files with 64 additions and 39 deletions

View File

@@ -323,14 +323,18 @@ VideoData::CreateAndCopyData(const VideoInfo& aInfo,
#if XP_WIN
// We disable this code path on Windows version earlier of Windows 8 due to
// intermittent crashes with old drivers. See bug 1405110.
if (IsWin8OrLater() && !XRE_IsParentProcess() &&
aAllocator && aAllocator->SupportsD3D11()) {
// D3D11YCbCrImage can only handle YCbCr images using 3 non-interleaved planes
// non-zero mSkip value indicates that one of the plane would be interleaved.
if (IsWin8OrLater() && !XRE_IsParentProcess() && aAllocator &&
aAllocator->SupportsD3D11() && aBuffer.mPlanes[0].mSkip == 0 &&
aBuffer.mPlanes[1].mSkip == 0 && aBuffer.mPlanes[2].mSkip == 0) {
RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
? layers::ImageBridgeChild::GetSingleton().get()
: aAllocator,
aContainer, data)) {
? layers::ImageBridgeChild::GetSingleton().get()
: aAllocator,
aContainer,
data)) {
v->mImage = d3d11Image;
return v.forget();
}

View File

@@ -88,7 +88,6 @@ MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
std::function<HRESULT(IMFMediaType*)>&& aCallback)
{
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
mOutputType = aOutputType;
// Set the input type to the one the caller gave us...
HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
@@ -132,10 +131,7 @@ MFTDecoder::FindDecoderOutputType(bool aMatchAllAttributes)
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once");
GUID currentSubtype = {0};
HRESULT hr = mOutputType->GetGUID(MF_MT_SUBTYPE, &currentSubtype);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
return FindDecoderOutputTypeWithSubtype(currentSubtype, aMatchAllAttributes);
return FindDecoderOutputTypeWithSubtype(mOutputSubType, aMatchAllAttributes);
}
HRESULT
@@ -191,6 +187,7 @@ MFTDecoder::SetDecoderOutputType(
MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
mOutputType = outputType;
mOutputSubType = outSubtype;
return S_OK;
}

View File

@@ -49,6 +49,7 @@ public:
// Retrieves the media type being output. This may not be valid until
// the first sample is decoded.
HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
const GUID& GetOutputMediaSubType() const { return mOutputSubType; }
// Submits data into the MFT for processing.
//
@@ -105,6 +106,7 @@ private:
RefPtr<IMFTransform> mDecoder;
RefPtr<IMFMediaType> mOutputType;
GUID mOutputSubType;
// True if the IMFTransform allocates the samples that it returns.
bool mMFTProvidesOutputSamples = false;

View File

@@ -899,8 +899,18 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
stride = mVideoStride;
}
// YV12, planar format: [YYYY....][VVVV....][UUUU....]
const GUID& subType = mDecoder->GetOutputMediaSubType();
MOZ_DIAGNOSTIC_ASSERT(subType == MFVideoFormat_YV12 ||
subType == MFVideoFormat_P010 ||
subType == MFVideoFormat_P016);
const gfx::ColorDepth colorDepth = subType == MFVideoFormat_YV12
? gfx::ColorDepth::COLOR_8
: gfx::ColorDepth::COLOR_16;
// YV12, planar format (3 planes): [YYYY....][VVVV....][UUUU....]
// i.e., Y, then V, then U.
// P010, P016 planar format (2 planes) [YYYY....][UVUV...]
// See https://docs.microsoft.com/en-us/windows/desktop/medfound/10-bit-and-16-bit-yuv-video-formats
VideoData::YCbCrBuffer b;
uint32_t videoWidth = mImageSize.width;
@@ -922,24 +932,43 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
uint32_t halfHeight = (videoHeight + 1) / 2;
uint32_t halfWidth = (videoWidth + 1) / 2;
// U plane (Cb)
b.mPlanes[1].mData = data + y_size + v_size;
b.mPlanes[1].mStride = halfStride;
b.mPlanes[1].mHeight = halfHeight;
b.mPlanes[1].mWidth = halfWidth;
b.mPlanes[1].mOffset = 0;
b.mPlanes[1].mSkip = 0;
if (subType == MFVideoFormat_YV12) {
// U plane (Cb)
b.mPlanes[1].mData = data + y_size + v_size;
b.mPlanes[1].mStride = halfStride;
b.mPlanes[1].mHeight = halfHeight;
b.mPlanes[1].mWidth = halfWidth;
b.mPlanes[1].mOffset = 0;
b.mPlanes[1].mSkip = 0;
// V plane (Cr)
b.mPlanes[2].mData = data + y_size;
b.mPlanes[2].mStride = halfStride;
b.mPlanes[2].mHeight = halfHeight;
b.mPlanes[2].mWidth = halfWidth;
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
// V plane (Cr)
b.mPlanes[2].mData = data + y_size;
b.mPlanes[2].mStride = halfStride;
b.mPlanes[2].mHeight = halfHeight;
b.mPlanes[2].mWidth = halfWidth;
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
} else {
// U plane (Cb)
b.mPlanes[1].mData = data + y_size;
b.mPlanes[1].mStride = stride;
b.mPlanes[1].mHeight = halfHeight;
b.mPlanes[1].mWidth = halfWidth;
b.mPlanes[1].mOffset = 0;
b.mPlanes[1].mSkip = 1;
// V plane (Cr)
b.mPlanes[2].mData = data + y_size + sizeof(short);
b.mPlanes[2].mStride = stride;
b.mPlanes[2].mHeight = halfHeight;
b.mPlanes[2].mWidth = halfWidth;
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 1;
}
// YuvColorSpace
b.mYUVColorSpace = mYUVColorSpace;
b.mColorDepth = colorDepth;
TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
@@ -948,7 +977,8 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
gfx::IntRect pictureRegion =
mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
if (!mKnowsCompositor || !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
if (colorDepth != gfx::ColorDepth::COLOR_8 || !mKnowsCompositor ||
!mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mVideoInfo,
mImageContainer,
@@ -1065,15 +1095,14 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
// Attempt to find an appropriate OutputType, trying in order:
// if HW accelerated: NV12, P010, P016
// if SW: YV12
// if SW: YV12, P010, P016
if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype(
mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12,
false)))) &&
(!mUseHwAccel ||
(FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
MFVideoFormat_P010, false))) &&
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
MFVideoFormat_P016, false)))))) {
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
MFVideoFormat_P010, false))) &&
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
MFVideoFormat_P016, false)))) {
LOG("No suitable output format found");
return hr;
}

View File

@@ -1895,13 +1895,6 @@ MappedYCbCrChannelData::CopyInto(MappedYCbCrChannelData& aDst)
if (bytesPerPixel == 1) {
copyData(aDst.data, aDst, data, *this);
} else if (bytesPerPixel == 2) {
if (skip != 0) {
// The skip value definition doesn't specify if it's in bytes, or in
// "pixels". We will assume the later. There are currently no decoders
// returning HDR content with a skip value different than zero anyway.
NS_WARNING("skip value non zero for HDR content, please verify code "
"(see bug 1421187)");
}
copyData(reinterpret_cast<uint16_t*>(aDst.data),
aDst,
reinterpret_cast<uint16_t*>(data),