Bug 1495025 - P5. Add Windows P010 and P016 support for software decoder r=cpearce
As we do not have an IMF nor D3D11 NV12 image, we always require a full copy of the data that will deinterleave the chroma channels. Depends on D7316 Differential Revision: https://phabricator.services.mozilla.com/D7318
This commit is contained in:
@@ -323,14 +323,18 @@ VideoData::CreateAndCopyData(const VideoInfo& aInfo,
|
|||||||
#if XP_WIN
|
#if XP_WIN
|
||||||
// We disable this code path on Windows version earlier of Windows 8 due to
|
// We disable this code path on Windows version earlier of Windows 8 due to
|
||||||
// intermittent crashes with old drivers. See bug 1405110.
|
// intermittent crashes with old drivers. See bug 1405110.
|
||||||
if (IsWin8OrLater() && !XRE_IsParentProcess() &&
|
// D3D11YCbCrImage can only handle YCbCr images using 3 non-interleaved planes
|
||||||
aAllocator && aAllocator->SupportsD3D11()) {
|
// non-zero mSkip value indicates that one of the plane would be interleaved.
|
||||||
|
if (IsWin8OrLater() && !XRE_IsParentProcess() && aAllocator &&
|
||||||
|
aAllocator->SupportsD3D11() && aBuffer.mPlanes[0].mSkip == 0 &&
|
||||||
|
aBuffer.mPlanes[1].mSkip == 0 && aBuffer.mPlanes[2].mSkip == 0) {
|
||||||
RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
|
RefPtr<layers::D3D11YCbCrImage> d3d11Image = new layers::D3D11YCbCrImage();
|
||||||
PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
|
PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture);
|
||||||
if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
|
if (d3d11Image->SetData(layers::ImageBridgeChild::GetSingleton()
|
||||||
? layers::ImageBridgeChild::GetSingleton().get()
|
? layers::ImageBridgeChild::GetSingleton().get()
|
||||||
: aAllocator,
|
: aAllocator,
|
||||||
aContainer, data)) {
|
aContainer,
|
||||||
|
data)) {
|
||||||
v->mImage = d3d11Image;
|
v->mImage = d3d11Image;
|
||||||
return v.forget();
|
return v.forget();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,6 @@ MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
|
|||||||
std::function<HRESULT(IMFMediaType*)>&& aCallback)
|
std::function<HRESULT(IMFMediaType*)>&& aCallback)
|
||||||
{
|
{
|
||||||
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
|
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
|
||||||
mOutputType = aOutputType;
|
|
||||||
|
|
||||||
// Set the input type to the one the caller gave us...
|
// Set the input type to the one the caller gave us...
|
||||||
HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
|
HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
|
||||||
@@ -132,10 +131,7 @@ MFTDecoder::FindDecoderOutputType(bool aMatchAllAttributes)
|
|||||||
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
|
MOZ_ASSERT(mscom::IsCurrentThreadMTA());
|
||||||
MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once");
|
MOZ_ASSERT(mOutputType, "SetDecoderTypes must have been called once");
|
||||||
|
|
||||||
GUID currentSubtype = {0};
|
return FindDecoderOutputTypeWithSubtype(mOutputSubType, aMatchAllAttributes);
|
||||||
HRESULT hr = mOutputType->GetGUID(MF_MT_SUBTYPE, ¤tSubtype);
|
|
||||||
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
|
||||||
return FindDecoderOutputTypeWithSubtype(currentSubtype, aMatchAllAttributes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HRESULT
|
HRESULT
|
||||||
@@ -191,6 +187,7 @@ MFTDecoder::SetDecoderOutputType(
|
|||||||
MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
|
MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
|
||||||
|
|
||||||
mOutputType = outputType;
|
mOutputType = outputType;
|
||||||
|
mOutputSubType = outSubtype;
|
||||||
|
|
||||||
return S_OK;
|
return S_OK;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ public:
|
|||||||
// Retrieves the media type being output. This may not be valid until
|
// Retrieves the media type being output. This may not be valid until
|
||||||
// the first sample is decoded.
|
// the first sample is decoded.
|
||||||
HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
|
HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
|
||||||
|
const GUID& GetOutputMediaSubType() const { return mOutputSubType; }
|
||||||
|
|
||||||
// Submits data into the MFT for processing.
|
// Submits data into the MFT for processing.
|
||||||
//
|
//
|
||||||
@@ -105,6 +106,7 @@ private:
|
|||||||
RefPtr<IMFTransform> mDecoder;
|
RefPtr<IMFTransform> mDecoder;
|
||||||
|
|
||||||
RefPtr<IMFMediaType> mOutputType;
|
RefPtr<IMFMediaType> mOutputType;
|
||||||
|
GUID mOutputSubType;
|
||||||
|
|
||||||
// True if the IMFTransform allocates the samples that it returns.
|
// True if the IMFTransform allocates the samples that it returns.
|
||||||
bool mMFTProvidesOutputSamples = false;
|
bool mMFTProvidesOutputSamples = false;
|
||||||
|
|||||||
@@ -899,8 +899,18 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
|
|||||||
stride = mVideoStride;
|
stride = mVideoStride;
|
||||||
}
|
}
|
||||||
|
|
||||||
// YV12, planar format: [YYYY....][VVVV....][UUUU....]
|
const GUID& subType = mDecoder->GetOutputMediaSubType();
|
||||||
|
MOZ_DIAGNOSTIC_ASSERT(subType == MFVideoFormat_YV12 ||
|
||||||
|
subType == MFVideoFormat_P010 ||
|
||||||
|
subType == MFVideoFormat_P016);
|
||||||
|
const gfx::ColorDepth colorDepth = subType == MFVideoFormat_YV12
|
||||||
|
? gfx::ColorDepth::COLOR_8
|
||||||
|
: gfx::ColorDepth::COLOR_16;
|
||||||
|
|
||||||
|
// YV12, planar format (3 planes): [YYYY....][VVVV....][UUUU....]
|
||||||
// i.e., Y, then V, then U.
|
// i.e., Y, then V, then U.
|
||||||
|
// P010, P016 planar format (2 planes) [YYYY....][UVUV...]
|
||||||
|
// See https://docs.microsoft.com/en-us/windows/desktop/medfound/10-bit-and-16-bit-yuv-video-formats
|
||||||
VideoData::YCbCrBuffer b;
|
VideoData::YCbCrBuffer b;
|
||||||
|
|
||||||
uint32_t videoWidth = mImageSize.width;
|
uint32_t videoWidth = mImageSize.width;
|
||||||
@@ -922,24 +932,43 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
|
|||||||
uint32_t halfHeight = (videoHeight + 1) / 2;
|
uint32_t halfHeight = (videoHeight + 1) / 2;
|
||||||
uint32_t halfWidth = (videoWidth + 1) / 2;
|
uint32_t halfWidth = (videoWidth + 1) / 2;
|
||||||
|
|
||||||
// U plane (Cb)
|
if (subType == MFVideoFormat_YV12) {
|
||||||
b.mPlanes[1].mData = data + y_size + v_size;
|
// U plane (Cb)
|
||||||
b.mPlanes[1].mStride = halfStride;
|
b.mPlanes[1].mData = data + y_size + v_size;
|
||||||
b.mPlanes[1].mHeight = halfHeight;
|
b.mPlanes[1].mStride = halfStride;
|
||||||
b.mPlanes[1].mWidth = halfWidth;
|
b.mPlanes[1].mHeight = halfHeight;
|
||||||
b.mPlanes[1].mOffset = 0;
|
b.mPlanes[1].mWidth = halfWidth;
|
||||||
b.mPlanes[1].mSkip = 0;
|
b.mPlanes[1].mOffset = 0;
|
||||||
|
b.mPlanes[1].mSkip = 0;
|
||||||
|
|
||||||
// V plane (Cr)
|
// V plane (Cr)
|
||||||
b.mPlanes[2].mData = data + y_size;
|
b.mPlanes[2].mData = data + y_size;
|
||||||
b.mPlanes[2].mStride = halfStride;
|
b.mPlanes[2].mStride = halfStride;
|
||||||
b.mPlanes[2].mHeight = halfHeight;
|
b.mPlanes[2].mHeight = halfHeight;
|
||||||
b.mPlanes[2].mWidth = halfWidth;
|
b.mPlanes[2].mWidth = halfWidth;
|
||||||
b.mPlanes[2].mOffset = 0;
|
b.mPlanes[2].mOffset = 0;
|
||||||
b.mPlanes[2].mSkip = 0;
|
b.mPlanes[2].mSkip = 0;
|
||||||
|
} else {
|
||||||
|
// U plane (Cb)
|
||||||
|
b.mPlanes[1].mData = data + y_size;
|
||||||
|
b.mPlanes[1].mStride = stride;
|
||||||
|
b.mPlanes[1].mHeight = halfHeight;
|
||||||
|
b.mPlanes[1].mWidth = halfWidth;
|
||||||
|
b.mPlanes[1].mOffset = 0;
|
||||||
|
b.mPlanes[1].mSkip = 1;
|
||||||
|
|
||||||
|
// V plane (Cr)
|
||||||
|
b.mPlanes[2].mData = data + y_size + sizeof(short);
|
||||||
|
b.mPlanes[2].mStride = stride;
|
||||||
|
b.mPlanes[2].mHeight = halfHeight;
|
||||||
|
b.mPlanes[2].mWidth = halfWidth;
|
||||||
|
b.mPlanes[2].mOffset = 0;
|
||||||
|
b.mPlanes[2].mSkip = 1;
|
||||||
|
}
|
||||||
|
|
||||||
// YuvColorSpace
|
// YuvColorSpace
|
||||||
b.mYUVColorSpace = mYUVColorSpace;
|
b.mYUVColorSpace = mYUVColorSpace;
|
||||||
|
b.mColorDepth = colorDepth;
|
||||||
|
|
||||||
TimeUnit pts = GetSampleTime(aSample);
|
TimeUnit pts = GetSampleTime(aSample);
|
||||||
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
|
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
|
||||||
@@ -948,7 +977,8 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
|
|||||||
gfx::IntRect pictureRegion =
|
gfx::IntRect pictureRegion =
|
||||||
mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
|
mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
|
||||||
|
|
||||||
if (!mKnowsCompositor || !mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
|
if (colorDepth != gfx::ColorDepth::COLOR_8 || !mKnowsCompositor ||
|
||||||
|
!mKnowsCompositor->SupportsD3D11() || !mIMFUsable) {
|
||||||
RefPtr<VideoData> v =
|
RefPtr<VideoData> v =
|
||||||
VideoData::CreateAndCopyData(mVideoInfo,
|
VideoData::CreateAndCopyData(mVideoInfo,
|
||||||
mImageContainer,
|
mImageContainer,
|
||||||
@@ -1065,15 +1095,14 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
|
|||||||
|
|
||||||
// Attempt to find an appropriate OutputType, trying in order:
|
// Attempt to find an appropriate OutputType, trying in order:
|
||||||
// if HW accelerated: NV12, P010, P016
|
// if HW accelerated: NV12, P010, P016
|
||||||
// if SW: YV12
|
// if SW: YV12, P010, P016
|
||||||
if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype(
|
if (FAILED((hr = (mDecoder->FindDecoderOutputTypeWithSubtype(
|
||||||
mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12,
|
mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12,
|
||||||
false)))) &&
|
false)))) &&
|
||||||
(!mUseHwAccel ||
|
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
|
||||||
(FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
|
MFVideoFormat_P010, false))) &&
|
||||||
MFVideoFormat_P010, false))) &&
|
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
|
||||||
FAILED((hr = mDecoder->FindDecoderOutputTypeWithSubtype(
|
MFVideoFormat_P016, false)))) {
|
||||||
MFVideoFormat_P016, false)))))) {
|
|
||||||
LOG("No suitable output format found");
|
LOG("No suitable output format found");
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1895,13 +1895,6 @@ MappedYCbCrChannelData::CopyInto(MappedYCbCrChannelData& aDst)
|
|||||||
if (bytesPerPixel == 1) {
|
if (bytesPerPixel == 1) {
|
||||||
copyData(aDst.data, aDst, data, *this);
|
copyData(aDst.data, aDst, data, *this);
|
||||||
} else if (bytesPerPixel == 2) {
|
} else if (bytesPerPixel == 2) {
|
||||||
if (skip != 0) {
|
|
||||||
// The skip value definition doesn't specify if it's in bytes, or in
|
|
||||||
// "pixels". We will assume the later. There are currently no decoders
|
|
||||||
// returning HDR content with a skip value different than zero anyway.
|
|
||||||
NS_WARNING("skip value non zero for HDR content, please verify code "
|
|
||||||
"(see bug 1421187)");
|
|
||||||
}
|
|
||||||
copyData(reinterpret_cast<uint16_t*>(aDst.data),
|
copyData(reinterpret_cast<uint16_t*>(aDst.data),
|
||||||
aDst,
|
aDst,
|
||||||
reinterpret_cast<uint16_t*>(data),
|
reinterpret_cast<uint16_t*>(data),
|
||||||
|
|||||||
Reference in New Issue
Block a user