Bug 557982. Use Image objects in the video frame queue so we can avoid making an extra copy as we put frames into the queue. r=kinetik

This commit is contained in:
Rich Dougherty
2010-05-19 15:04:33 +12:00
parent 0476d9cb00
commit 52adda535c
8 changed files with 225 additions and 222 deletions

View File

@@ -47,6 +47,8 @@
#include "VideoUtils.h"
using namespace mozilla;
using mozilla::layers::ImageContainer;
using mozilla::layers::PlanarYCbCrImage;
// Un-comment to enable logging of seek bisections.
//#define SEEK_LOGGING
@@ -64,47 +66,100 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
#define SEEK_LOG(type, msg)
#endif
// 32 bit integer multiplication with overflow checking. Returns PR_TRUE
// if the multiplication was successful, or PR_FALSE if the operation resulted
// in an integer overflow.
PRBool MulOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult) {
PRUint64 a64 = a;
PRUint64 b64 = b;
PRUint64 r64 = a64 * b64;
if (r64 > PR_UINT32_MAX)
// Adds two 32bit unsigned numbers, retuns PR_TRUE if addition succeeded,
// or PR_FALSE the if addition would result in an overflow.
static PRBool AddOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult) {
PRUint64 rl = static_cast<PRUint64>(a) + static_cast<PRUint64>(b);
if (rl > PR_UINT32_MAX) {
return PR_FALSE;
aResult = static_cast<PRUint32>(r64);
return PR_TRUE;
}
aResult = static_cast<PRUint32>(rl);
return true;
}
VideoData* VideoData::Create(PRInt64 aOffset,
static PRBool
ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
{
return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
aPlane.mStride > 0;
}
VideoData* VideoData::Create(nsVideoInfo& aInfo,
ImageContainer* aContainer,
PRInt64 aOffset,
PRInt64 aTime,
const YCbCrBuffer &aBuffer,
const YCbCrBuffer& aBuffer,
PRBool aKeyframe,
PRInt64 aTimecode)
{
nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aKeyframe, aTimecode));
for (PRUint32 i=0; i < 3; ++i) {
PRUint32 size = 0;
if (!MulOverflow32(PR_ABS(aBuffer.mPlanes[i].mHeight),
PR_ABS(aBuffer.mPlanes[i].mStride),
size))
{
// Invalid frame size. Skip this plane. The plane will have 0
// dimensions, thanks to our constructor.
continue;
}
unsigned char* p = static_cast<unsigned char*>(moz_xmalloc(size));
if (!p) {
NS_WARNING("Failed to allocate memory for video frame");
return nsnull;
}
v->mBuffer.mPlanes[i].mData = p;
v->mBuffer.mPlanes[i].mWidth = aBuffer.mPlanes[i].mWidth;
v->mBuffer.mPlanes[i].mHeight = aBuffer.mPlanes[i].mHeight;
v->mBuffer.mPlanes[i].mStride = aBuffer.mPlanes[i].mStride;
memcpy(v->mBuffer.mPlanes[i].mData, aBuffer.mPlanes[i].mData, size);
if (!aContainer) {
return nsnull;
}
// The following situation should never happen unless there is a bug
// in the decoder
if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
NS_ERROR("C planes with different sizes");
return nsnull;
}
// The following situations could be triggered by invalid input
if (aInfo.mPicture.width <= 0 || aInfo.mPicture.height <= 0) {
NS_WARNING("Empty picture rect");
return nsnull;
}
if (aBuffer.mPlanes[0].mWidth != PRUint32(aInfo.mFrame.width) ||
aBuffer.mPlanes[0].mHeight != PRUint32(aInfo.mFrame.height)) {
NS_WARNING("Unexpected frame size");
return nsnull;
}
if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
!ValidatePlane(aBuffer.mPlanes[2])) {
NS_WARNING("Invalid plane size");
return nsnull;
}
// Ensure the picture size specified in the headers can be extracted out of
// the frame we've been supplied without indexing out of bounds.
PRUint32 picXLimit;
PRUint32 picYLimit;
if (!AddOverflow32(aInfo.mPicture.x, aInfo.mPicture.width, picXLimit) ||
picXLimit > PRUint32(aBuffer.mPlanes[0].mStride) ||
!AddOverflow32(aInfo.mPicture.y, aInfo.mPicture.height, picYLimit) ||
picYLimit > PRUint32(aBuffer.mPlanes[0].mHeight))
{
// The specified picture dimensions can't be contained inside the video
// frame, we'll stomp memory if we try to copy it. Fail.
NS_WARNING("Overflowing picture rect");
return nsnull;
}
nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aKeyframe, aTimecode));
// Currently our decoder only knows how to output to PLANAR_YCBCR
// format.
Image::Format format = Image::PLANAR_YCBCR;
v->mImage = aContainer->CreateImage(&format, 1);
if (!v->mImage) {
return nsnull;
}
NS_ASSERTION(v->mImage->GetFormat() == Image::PLANAR_YCBCR,
"Wrong format?");
PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());
PlanarYCbCrImage::Data data;
data.mYChannel = aBuffer.mPlanes[0].mData;
data.mYSize = gfxIntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight);
data.mYStride = aBuffer.mPlanes[0].mStride;
data.mCbChannel = aBuffer.mPlanes[1].mData;
data.mCrChannel = aBuffer.mPlanes[2].mData;
data.mCbCrSize = gfxIntSize(aBuffer.mPlanes[1].mWidth, aBuffer.mPlanes[1].mHeight);
data.mCbCrStride = aBuffer.mPlanes[1].mStride;
data.mPicX = aInfo.mPicture.x;
data.mPicY = aInfo.mPicture.y;
data.mPicSize = gfxIntSize(aInfo.mPicture.width, aInfo.mPicture.height);
videoImage->SetData(data); // Copies buffer
return v.forget();
}

View File

@@ -40,6 +40,8 @@
#define nsBuiltinDecoderReader_h_
#include <nsDeque.h>
#include "Layers.h"
#include "ImageLayers.h"
#include "nsAutoLock.h"
#include "nsClassHashtable.h"
#include "mozilla/TimeStamp.h"
@@ -49,6 +51,53 @@
class nsBuiltinDecoderStateMachine;
// Stores info relevant to presenting media samples.
class nsVideoInfo {
public:
nsVideoInfo()
: mFramerate(0.0),
mAspectRatio(1.0),
mCallbackPeriod(1),
mAudioRate(0),
mAudioChannels(0),
mFrame(0,0),
mHasAudio(PR_FALSE),
mHasVideo(PR_FALSE)
{}
// Frames per second.
float mFramerate;
// Aspect ratio, as stored in the metadata.
float mAspectRatio;
// Length of a video frame in milliseconds, or the callback period if
// there's no audio.
PRUint32 mCallbackPeriod;
// Samples per second.
PRUint32 mAudioRate;
// Number of audio channels.
PRUint32 mAudioChannels;
// Dimensions of the video frame.
nsIntSize mFrame;
// The picture region inside the video frame to be displayed.
nsIntRect mPicture;
// The offset of the first non-header page in the file, in bytes.
// Used to seek to the start of the media.
PRInt64 mDataOffset;
// PR_TRUE if we have an active audio bitstream.
PRPackedBool mHasAudio;
// PR_TRUE if we have an active video bitstream.
PRPackedBool mHasVideo;
};
// Holds chunk a decoded sound samples.
class SoundData {
public:
@@ -106,6 +155,9 @@ public:
// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
class VideoData {
public:
typedef mozilla::layers::ImageContainer ImageContainer;
typedef mozilla::layers::Image Image;
// YCbCr data obtained from decoding the video. The index's are:
// 0 = Y
// 1 = Cb
@@ -122,10 +174,14 @@ public:
};
// Constructs a VideoData object. Makes a copy of YCbCr data in aBuffer.
// This may return nsnull if we run out of memory when allocating buffers
// to store the frame. aTimecode is a codec specific number representing
// the timestamp of the frame of video data.
static VideoData* Create(PRInt64 aOffset,
// aTimecode is a codec specific number representing the timestamp of
// the frame of video data. Returns nsnull if an error occurs. This may
// indicate that memory couldn't be allocated to create the VideoData
// object, or it may indicate some problem with the input data (e.g.
// negative stride).
static VideoData* Create(nsVideoInfo& aInfo,
ImageContainer* aContainer,
PRInt64 aOffset,
PRInt64 aTime,
const YCbCrBuffer &aBuffer,
PRBool aKeyframe,
@@ -144,9 +200,6 @@ public:
~VideoData()
{
MOZ_COUNT_DTOR(VideoData);
for (PRUint32 i = 0; i < 3; ++i) {
moz_free(mBuffer.mPlanes[i].mData);
}
}
// Approximate byte offset of the end of the frame in the media.
@@ -159,7 +212,8 @@ public:
// granulepos.
PRInt64 mTimecode;
YCbCrBuffer mBuffer;
// This frame's image.
nsRefPtr<Image> mImage;
// When PR_TRUE, denotes that this frame is identical to the frame that
// came before; it's a duplicate. mBuffer will be empty.
@@ -175,7 +229,6 @@ public:
mKeyframe(PR_FALSE)
{
MOZ_COUNT_CTOR(VideoData);
memset(&mBuffer, 0, sizeof(YCbCrBuffer));
}
VideoData(PRInt64 aOffset,
@@ -333,53 +386,6 @@ public:
PRInt64 mTimeStart, mTimeEnd; // in ms.
};
// Stores info relevant to presenting media samples.
class nsVideoInfo {
public:
nsVideoInfo()
: mFramerate(0.0),
mAspectRatio(1.0),
mCallbackPeriod(1),
mAudioRate(0),
mAudioChannels(0),
mFrame(0,0),
mHasAudio(PR_FALSE),
mHasVideo(PR_FALSE)
{}
// Frames per second.
float mFramerate;
// Aspect ratio, as stored in the metadata.
float mAspectRatio;
// Length of a video frame in milliseconds, or the callback period if
// there's no audio.
PRUint32 mCallbackPeriod;
// Samples per second.
PRUint32 mAudioRate;
// Number of audio channels.
PRUint32 mAudioChannels;
// Dimensions of the video frame.
nsIntSize mFrame;
// The picture region inside the video frame to be displayed.
nsIntRect mPicture;
// The offset of the first non-header page in the file, in bytes.
// Used to seek to the start of the media.
PRInt64 mDataOffset;
// PR_TRUE if we have an active audio bitstream.
PRPackedBool mHasAudio;
// PR_TRUE if we have an active video bitstream.
PRPackedBool mHasVideo;
};
// Encapsulates the decoding and reading of media data. Reading can be done
// on either the state machine thread (when loading and seeking) or on
// the reader thread (when it's reading and decoding). The reader encapsulates
@@ -415,10 +421,10 @@ public:
virtual PRBool HasAudio() = 0;
virtual PRBool HasVideo() = 0;
// Read header data for all bitstreams in the file. Fills aInfo with
// Read header data for all bitstreams in the file. Fills mInfo with
// the data required to present the media. Returns NS_OK on success,
// or NS_ERROR_FAILURE on failure.
virtual nsresult ReadMetadata(nsVideoInfo& aInfo) = 0;
virtual nsresult ReadMetadata() = 0;
// Stores the presentation time of the first sample in the stream in
@@ -434,6 +440,11 @@ public:
// denote the start and end times of the media.
virtual nsresult Seek(PRInt64 aTime, PRInt64 aStartTime, PRInt64 aEndTime) = 0;
// Gets presentation info required for playback.
const nsVideoInfo& GetInfo() {
return mInfo;
}
// Queue of audio samples. This queue is threadsafe.
MediaQueue<SoundData> mAudioQueue;
@@ -490,6 +501,9 @@ protected:
// The offset of the start of the first non-header page in the file.
// Used to seek to media start time.
PRInt64 mDataOffset;
// Stores presentation info required for playback.
nsVideoInfo mInfo;
};
#endif

View File

@@ -49,10 +49,6 @@
using namespace mozilla;
using namespace mozilla::layers;
// Adds two 32bit unsigned numbers, retuns PR_TRUE if addition succeeded,
// or PR_FALSE the if addition would result in an overflow.
static PRBool AddOverflow(PRUint32 a, PRUint32 b, PRUint32& aResult);
#ifdef PR_LOGGING
extern PRLogModuleInfo* gBuiltinDecoderLog;
#define LOG(type, msg) PR_LOG(gBuiltinDecoderLog, type, msg)
@@ -96,10 +92,6 @@ const unsigned AMPLE_AUDIO_MS = 2000;
// less than LOW_VIDEO_FRAMES frames.
static const PRUint32 LOW_VIDEO_FRAMES = 1;
// The frame rate to use if there is no video data in the resource to
// be played.
#define AUDIO_FRAME_RATE 25.0
nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDecoder,
nsBuiltinDecoderReader* aReader) :
mDecoder(aDecoder),
@@ -500,9 +492,10 @@ void nsBuiltinDecoderStateMachine::StartPlayback()
mAudioStream->Resume();
} else {
// No audiostream, create one.
const nsVideoInfo& info = mReader->GetInfo();
mAudioStream = new nsAudioStream();
mAudioStream->Init(mInfo.mAudioChannels,
mInfo.mAudioRate,
mAudioStream->Init(info.mAudioChannels,
info.mAudioRate,
nsAudioStream::FORMAT_FLOAT32);
mAudioStream->SetVolume(mVolume);
}
@@ -897,7 +890,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
RenderVideoFrame(video);
if (!audio) {
NS_ASSERTION(video->mTime <= seekTime &&
seekTime <= video->mTime + mInfo.mCallbackPeriod,
seekTime <= video->mTime + mReader->GetInfo().mCallbackPeriod,
"Seek target should lie inside the first frame after seek");
mPlayDuration = TimeDuration::FromMilliseconds(seekTime);
}
@@ -1006,7 +999,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
StopDecodeThreads();
if (mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING) {
PRInt64 videoTime = HasVideo() ? (mVideoFrameTime + mInfo.mCallbackPeriod) : 0;
PRInt64 videoTime = HasVideo() ? (mVideoFrameTime + mReader->GetInfo().mCallbackPeriod) : 0;
PRInt64 clockTime = NS_MAX(mEndTime, NS_MAX(videoTime, GetAudioClock()));
UpdatePlaybackPosition(clockTime);
{
@@ -1036,77 +1029,10 @@ void nsBuiltinDecoderStateMachine::RenderVideoFrame(VideoData* aData)
return;
}
NS_ASSERTION(mInfo.mPicture.width != 0 && mInfo.mPicture.height != 0,
"We can only render non-zero-sized video");
NS_ASSERTION(aData->mBuffer.mPlanes[0].mStride >= 0 && aData->mBuffer.mPlanes[0].mHeight >= 0 &&
aData->mBuffer.mPlanes[1].mStride >= 0 && aData->mBuffer.mPlanes[1].mHeight >= 0 &&
aData->mBuffer.mPlanes[2].mStride >= 0 && aData->mBuffer.mPlanes[2].mHeight >= 0,
"YCbCr stride and height must be non-negative");
// Ensure the picture size specified in the headers can be extracted out of
// the frame we've been supplied without indexing out of bounds.
PRUint32 picXLimit;
PRUint32 picYLimit;
if (!AddOverflow(mInfo.mPicture.x, mInfo.mPicture.width, picXLimit) ||
picXLimit > PRUint32(PR_ABS(aData->mBuffer.mPlanes[0].mStride)) ||
!AddOverflow(mInfo.mPicture.y, mInfo.mPicture.height, picYLimit) ||
picYLimit > PRUint32(PR_ABS(aData->mBuffer.mPlanes[0].mHeight)))
{
// The specified picture dimensions can't be contained inside the video
// frame, we'll stomp memory if we try to copy it. Fail.
return;
}
unsigned ySize = aData->mBuffer.mPlanes[0].mStride * aData->mBuffer.mPlanes[0].mHeight;
unsigned cbSize = aData->mBuffer.mPlanes[1].mStride * aData->mBuffer.mPlanes[1].mHeight;
unsigned crSize = aData->mBuffer.mPlanes[2].mStride * aData->mBuffer.mPlanes[2].mHeight;
unsigned cbCrSize = ySize + cbSize + crSize;
if (cbCrSize != mCbCrSize) {
mCbCrSize = cbCrSize;
mCbCrBuffer = static_cast<unsigned char*>(moz_xmalloc(cbCrSize));
if (!mCbCrBuffer) {
// Malloc failed...
NS_WARNING("Malloc failure allocating YCbCr->RGB buffer");
return;
}
}
unsigned char* data = mCbCrBuffer.get();
unsigned char* y = data;
unsigned char* cb = y + ySize;
unsigned char* cr = cb + cbSize;
memcpy(y, aData->mBuffer.mPlanes[0].mData, ySize);
memcpy(cb, aData->mBuffer.mPlanes[1].mData, cbSize);
memcpy(cr, aData->mBuffer.mPlanes[2].mData, crSize);
ImageContainer* container = mDecoder->GetImageContainer();
// Currently our decoder only knows how to output to PLANAR_YCBCR
// format.
Image::Format format = Image::PLANAR_YCBCR;
nsRefPtr<Image> image;
if (container) {
image = container->CreateImage(&format, 1);
}
nsRefPtr<Image> image = aData->mImage;
if (image) {
NS_ASSERTION(image->GetFormat() == Image::PLANAR_YCBCR,
"Wrong format?");
PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(image.get());
PlanarYCbCrImage::Data data;
data.mYChannel = y;
data.mYSize = gfxIntSize(mInfo.mFrame.width, mInfo.mFrame.height);
data.mYStride = aData->mBuffer.mPlanes[0].mStride;
data.mCbChannel = cb;
data.mCrChannel = cr;
data.mCbCrSize = gfxIntSize(aData->mBuffer.mPlanes[1].mWidth, aData->mBuffer.mPlanes[1].mHeight);
data.mCbCrStride = aData->mBuffer.mPlanes[1].mStride;
data.mPicX = mInfo.mPicture.x;
data.mPicY = mInfo.mPicture.y;
data.mPicSize = gfxIntSize(mInfo.mPicture.width, mInfo.mPicture.height);
videoImage->SetData(data);
mDecoder->SetVideoData(data.mPicSize, mInfo.mAspectRatio, image);
const nsVideoInfo& info = mReader->GetInfo();
mDecoder->SetVideoData(gfxIntSize(info.mPicture.width, info.mPicture.height), info.mAspectRatio, image);
}
}
@@ -1137,7 +1063,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// not played a sample on the audio thread, so we can't get a time
// from the audio clock. Just wait and then return, to give the audio
// clock time to tick.
Wait(mInfo.mCallbackPeriod);
Wait(mReader->GetInfo().mCallbackPeriod);
return;
}
@@ -1209,7 +1135,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// ready state. Post an update to do so.
UpdateReadyState();
Wait(mInfo.mCallbackPeriod);
Wait(mReader->GetInfo().mCallbackPeriod);
} else {
if (IsPlaying()) {
StopPlayback(AUDIO_PAUSE);
@@ -1251,7 +1177,7 @@ VideoData* nsBuiltinDecoderStateMachine::FindStartTime()
VideoData* v = nsnull;
{
MonitorAutoExit exitMon(mDecoder->GetMonitor());
v = mReader->FindStartTime(mInfo.mDataOffset, startTime);
v = mReader->FindStartTime(mReader->GetInfo().mDataOffset, startTime);
}
if (startTime != 0) {
mStartTime = startTime;
@@ -1289,11 +1215,11 @@ void nsBuiltinDecoderStateMachine::FindEndTime()
mEndTime = endTime;
}
NS_ASSERTION(mInfo.mDataOffset > 0,
NS_ASSERTION(mReader->GetInfo().mDataOffset > 0,
"Should have offset of first non-header page");
{
MonitorAutoExit exitMon(mDecoder->GetMonitor());
stream->Seek(nsISeekableStream::NS_SEEK_SET, mInfo.mDataOffset);
stream->Seek(nsISeekableStream::NS_SEEK_SET, mReader->GetInfo().mDataOffset);
}
LOG(PR_LOG_DEBUG, ("%p Media end time is %lldms", mDecoder, mEndTime));
}
@@ -1319,16 +1245,6 @@ void nsBuiltinDecoderStateMachine::UpdateReadyState() {
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
}
static PRBool AddOverflow(PRUint32 a, PRUint32 b, PRUint32& aResult) {
PRUint64 rl = static_cast<PRUint64>(a) + static_cast<PRUint64>(b);
if (rl > PR_UINT32_MAX) {
return PR_FALSE;
}
aResult = static_cast<PRUint32>(rl);
return true;
}
void nsBuiltinDecoderStateMachine::LoadMetadata()
{
NS_ASSERTION(IsCurrentThread(mDecoder->mStateMachineThread),
@@ -1339,15 +1255,14 @@ void nsBuiltinDecoderStateMachine::LoadMetadata()
nsMediaStream* stream = mDecoder->mStream;
nsVideoInfo info;
{
MonitorAutoExit exitMon(mDecoder->GetMonitor());
mReader->ReadMetadata(info);
mReader->ReadMetadata();
}
mInfo = info;
mDecoder->StartProgressUpdates();
const nsVideoInfo& info = mReader->GetInfo();
if (!mInfo.mHasVideo && !mInfo.mHasAudio) {
if (!info.mHasVideo && !info.mHasAudio) {
mState = DECODER_STATE_SHUTDOWN;
nsCOMPtr<nsIRunnable> event =
NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::DecodeError);
@@ -1355,10 +1270,7 @@ void nsBuiltinDecoderStateMachine::LoadMetadata()
return;
}
if (!mInfo.mHasVideo) {
mInfo.mCallbackPeriod = 1000 / AUDIO_FRAME_RATE;
}
LOG(PR_LOG_DEBUG, ("%p Callback Period: %u", mDecoder, mInfo.mCallbackPeriod));
LOG(PR_LOG_DEBUG, ("%p Callback Period: %u", mDecoder, info.mCallbackPeriod));
// TODO: Get the duration from Skeleton index, if available.

View File

@@ -182,14 +182,14 @@ public:
// The decoder monitor must be obtained before calling this.
PRBool HasAudio() const {
mDecoder->GetMonitor().AssertCurrentThreadIn();
return mInfo.mHasAudio;
return mReader->GetInfo().mHasAudio;
}
// This is called on the state machine thread and audio thread.
// The decoder monitor must be obtained before calling this.
PRBool HasVideo() const {
mDecoder->GetMonitor().AssertCurrentThreadIn();
return mInfo.mHasVideo;
return mReader->GetInfo().mHasVideo;
}
// Should be called by main thread.
@@ -375,9 +375,6 @@ protected:
// monitor when using the audio stream!
nsAutoPtr<nsAudioStream> mAudioStream;
// Stores presentation info about required for playback of the media.
nsVideoInfo mInfo;
// The reader, don't call its methods with the decoder monitor held.
// This is created in the play state machine's constructor, and destroyed
// in the play state machine's destructor.

View File

@@ -61,9 +61,19 @@ static PRBool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// in an integer overflow.
static PRBool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// Defined in nsOggReader.cpp.
extern PRBool MulOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult);
static PRBool MulOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult)
{
// 32 bit integer multiplication with overflow checking. Returns PR_TRUE
// if the multiplication was successful, or PR_FALSE if the operation resulted
// in an integer overflow.
PRUint64 a64 = a;
PRUint64 b64 = b;
PRUint64 r64 = a64 * b64;
if (r64 > PR_UINT32_MAX)
return PR_FALSE;
aResult = static_cast<PRUint32>(r64);
return PR_TRUE;
}
nsOggCodecState*
nsOggCodecState::Create(ogg_page* aPage)
@@ -178,7 +188,8 @@ PRBool nsTheoraState::Init() {
}
mFrameDuration = static_cast<PRUint32>(f);
n = mInfo.aspect_numerator;
n = mInfo.aspect_numerator;
d = mInfo.aspect_denominator;
mAspectRatio = (n == 0 || d == 0) ?
1.0f : static_cast<float>(n) / static_cast<float>(d);

View File

@@ -65,6 +65,10 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
// is about 4300 bytes, so we read the file in chunks larger than that.
static const int PAGE_STEP = 8192;
// The frame rate to use if there is no video data in the resource to
// be played.
#define AUDIO_FRAME_RATE 25.0
nsOggReader::nsOggReader(nsBuiltinDecoder* aDecoder)
: nsBuiltinDecoderReader(aDecoder),
mTheoraState(nsnull),
@@ -135,7 +139,7 @@ static PRBool DoneReadingHeaders(nsTArray<nsOggCodecState*>& aBitstreams) {
}
nsresult nsOggReader::ReadMetadata(nsVideoInfo& aInfo)
nsresult nsOggReader::ReadMetadata()
{
NS_ASSERTION(mDecoder->OnStateMachineThread(), "Should be on play state machine thread.");
MonitorAutoEnter mon(mMonitor);
@@ -247,6 +251,8 @@ nsresult nsOggReader::ReadMetadata(nsVideoInfo& aInfo)
// Theora spec these can be considered the 'primary' bitstreams for playback.
// Extract the metadata needed from these streams.
float aspectRatio = 0;
// Set a default callback period for if we have no video data
mCallbackPeriod = 1000 / AUDIO_FRAME_RATE;
if (mTheoraState) {
if (mTheoraState->Init()) {
mCallbackPeriod = mTheoraState->mFrameDuration;
@@ -262,24 +268,24 @@ nsresult nsOggReader::ReadMetadata(nsVideoInfo& aInfo)
mVorbisState->Init();
}
aInfo.mHasAudio = HasAudio();
aInfo.mHasVideo = HasVideo();
aInfo.mCallbackPeriod = mCallbackPeriod;
mInfo.mHasAudio = HasAudio();
mInfo.mHasVideo = HasVideo();
mInfo.mCallbackPeriod = mCallbackPeriod;
if (HasAudio()) {
aInfo.mAudioRate = mVorbisState->mInfo.rate;
aInfo.mAudioChannels = mVorbisState->mInfo.channels;
mInfo.mAudioRate = mVorbisState->mInfo.rate;
mInfo.mAudioChannels = mVorbisState->mInfo.channels;
}
if (HasVideo()) {
aInfo.mFramerate = mTheoraState->mFrameRate;
aInfo.mAspectRatio = mTheoraState->mAspectRatio;
aInfo.mPicture.width = mTheoraState->mInfo.pic_width;
aInfo.mPicture.height = mTheoraState->mInfo.pic_height;
aInfo.mPicture.x = mTheoraState->mInfo.pic_x;
aInfo.mPicture.y = mTheoraState->mInfo.pic_y;
aInfo.mFrame.width = mTheoraState->mInfo.frame_width;
aInfo.mFrame.height = mTheoraState->mInfo.frame_height;
mInfo.mFramerate = mTheoraState->mFrameRate;
mInfo.mAspectRatio = mTheoraState->mAspectRatio;
mInfo.mPicture.width = mTheoraState->mInfo.pic_width;
mInfo.mPicture.height = mTheoraState->mInfo.pic_height;
mInfo.mPicture.x = mTheoraState->mInfo.pic_x;
mInfo.mPicture.y = mTheoraState->mInfo.pic_y;
mInfo.mFrame.width = mTheoraState->mInfo.frame_width;
mInfo.mFrame.height = mTheoraState->mInfo.frame_height;
}
aInfo.mDataOffset = mDataOffset;
mInfo.mDataOffset = mDataOffset;
LOG(PR_LOG_DEBUG, ("Done loading headers, data offset %lld", mDataOffset));
@@ -491,12 +497,16 @@ nsresult nsOggReader::DecodeTheora(nsTArray<VideoData*>& aFrames,
b.mPlanes[i].mWidth = buffer[i].width;
b.mPlanes[i].mStride = buffer[i].stride;
}
VideoData *v = VideoData::Create(mPageOffset,
VideoData *v = VideoData::Create(mInfo,
mDecoder->GetImageContainer(),
mPageOffset,
time,
b,
isKeyframe,
aPacket->granulepos);
if (!v) {
// There may be other reasons for this error, but for
// simplicity just assume the worst case: out of memory.
NS_WARNING("Failed to allocate memory for video frame");
Clear(aFrames);
return NS_ERROR_OUT_OF_MEMORY;

View File

@@ -76,7 +76,7 @@ public:
return mTheoraState != 0 && mTheoraState->mActive;
}
virtual nsresult ReadMetadata(nsVideoInfo& aInfo);
virtual nsresult ReadMetadata();
virtual nsresult Seek(PRInt64 aTime, PRInt64 aStartTime, PRInt64 aEndTime);
private:

View File

@@ -243,6 +243,10 @@ public:
gfxIntSize mPicSize;
};
enum {
MAX_DIMENSION = 16384
};
/**
* This makes a copy of the data buffers.
* XXX Eventually we will change this to not make a copy of the data,