Merge "Adds audio support to DirectRenderer." into jb-mr2-dev
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
index 5efcd17..12338e9 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -22,6 +22,7 @@
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
+#include <media/AudioTrack.h>
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -34,12 +35,438 @@
namespace android {
+/*
+ Drives the decoding process using a MediaCodec instance. Input buffers
+ queued by calls to "queueInputBuffer" are fed to the decoder as soon
+ as the decoder is ready for them, the client is notified about output
+ buffers as the decoder spits them out.
+*/
+struct DirectRenderer::DecoderContext : public AHandler {
+ enum {
+ kWhatOutputBufferReady,
+ };
+ DecoderContext(const sp<AMessage> ¬ify);
+
+ status_t init(
+ const sp<AMessage> &format,
+ const sp<IGraphicBufferProducer> &surfaceTex);
+
+ void queueInputBuffer(const sp<ABuffer> &accessUnit);
+
+ status_t renderOutputBufferAndRelease(size_t index);
+ status_t releaseOutputBuffer(size_t index);
+
+protected:
+ virtual ~DecoderContext();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum {
+ kWhatDecoderNotify,
+ };
+
+ sp<AMessage> mNotify;
+ sp<ALooper> mDecoderLooper;
+ sp<MediaCodec> mDecoder;
+ Vector<sp<ABuffer> > mDecoderInputBuffers;
+ Vector<sp<ABuffer> > mDecoderOutputBuffers;
+ List<size_t> mDecoderInputBuffersAvailable;
+ bool mDecoderNotificationPending;
+
+ List<sp<ABuffer> > mAccessUnits;
+
+ void onDecoderNotify();
+ void scheduleDecoderNotification();
+ void queueDecoderInputBuffers();
+
+ void queueOutputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecoderContext);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/*
+ A "push" audio renderer. The primary function of this renderer is to use
+ an AudioTrack in push mode and making sure not to block the event loop
+ be ensuring that calls to AudioTrack::write never block. This is done by
+ estimating an upper bound of data that can be written to the AudioTrack
+ buffer without delay.
+*/
+struct DirectRenderer::AudioRenderer : public AHandler {
+ AudioRenderer(const sp<DecoderContext> &decoderContext);
+
+ void queueInputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+protected:
+ virtual ~AudioRenderer();
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum {
+ kWhatPushAudio,
+ };
+
+ struct BufferInfo {
+ size_t mIndex;
+ int64_t mTimeUs;
+ sp<ABuffer> mBuffer;
+ };
+
+ sp<DecoderContext> mDecoderContext;
+ sp<AudioTrack> mAudioTrack;
+
+ List<BufferInfo> mInputBuffers;
+ bool mPushPending;
+
+ size_t mNumFramesWritten;
+
+ void schedulePushIfNecessary();
+ void onPushAudio();
+
+ ssize_t writeNonBlocking(const uint8_t *data, size_t size);
+
+ DISALLOW_EVIL_CONSTRUCTORS(AudioRenderer);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DecoderContext::DecoderContext(const sp<AMessage> ¬ify)
+ : mNotify(notify),
+ mDecoderNotificationPending(false) {
+}
+
+DirectRenderer::DecoderContext::~DecoderContext() {
+ if (mDecoder != NULL) {
+ mDecoder->release();
+ mDecoder.clear();
+
+ mDecoderLooper->stop();
+ mDecoderLooper.clear();
+ }
+}
+
+status_t DirectRenderer::DecoderContext::init(
+ const sp<AMessage> &format,
+ const sp<IGraphicBufferProducer> &surfaceTex) {
+ CHECK(mDecoder == NULL);
+
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
+ mDecoderLooper = new ALooper;
+ mDecoderLooper->setName("video codec looper");
+
+ mDecoderLooper->start(
+ false /* runOnCallingThread */,
+ false /* canCallJava */,
+ PRIORITY_DEFAULT);
+
+ mDecoder = MediaCodec::CreateByType(
+ mDecoderLooper, mime.c_str(), false /* encoder */);
+
+ CHECK(mDecoder != NULL);
+
+ status_t err = mDecoder->configure(
+ format,
+ surfaceTex == NULL
+ ? NULL : new Surface(surfaceTex),
+ NULL /* crypto */,
+ 0 /* flags */);
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->start();
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->getInputBuffers(
+ &mDecoderInputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->getOutputBuffers(
+ &mDecoderOutputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+
+ scheduleDecoderNotification();
+
+ return OK;
+}
+
+void DirectRenderer::DecoderContext::queueInputBuffer(
+ const sp<ABuffer> &accessUnit) {
+ CHECK(mDecoder != NULL);
+
+ mAccessUnits.push_back(accessUnit);
+ queueDecoderInputBuffers();
+}
+
+status_t DirectRenderer::DecoderContext::renderOutputBufferAndRelease(
+ size_t index) {
+ return mDecoder->renderOutputBufferAndRelease(index);
+}
+
+status_t DirectRenderer::DecoderContext::releaseOutputBuffer(size_t index) {
+ return mDecoder->releaseOutputBuffer(index);
+}
+
+void DirectRenderer::DecoderContext::queueDecoderInputBuffers() {
+ if (mDecoder == NULL) {
+ return;
+ }
+
+ bool submittedMore = false;
+
+ while (!mAccessUnits.empty()
+ && !mDecoderInputBuffersAvailable.empty()) {
+ size_t index = *mDecoderInputBuffersAvailable.begin();
+
+ mDecoderInputBuffersAvailable.erase(
+ mDecoderInputBuffersAvailable.begin());
+
+ sp<ABuffer> srcBuffer = *mAccessUnits.begin();
+ mAccessUnits.erase(mAccessUnits.begin());
+
+ const sp<ABuffer> &dstBuffer =
+ mDecoderInputBuffers.itemAt(index);
+
+ memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
+
+ int64_t timeUs;
+ CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+
+ status_t err = mDecoder->queueInputBuffer(
+ index,
+ 0 /* offset */,
+ srcBuffer->size(),
+ timeUs,
+ 0 /* flags */);
+ CHECK_EQ(err, (status_t)OK);
+
+ submittedMore = true;
+ }
+
+ if (submittedMore) {
+ scheduleDecoderNotification();
+ }
+}
+
+void DirectRenderer::DecoderContext::onMessageReceived(
+ const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatDecoderNotify:
+ {
+ onDecoderNotify();
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void DirectRenderer::DecoderContext::onDecoderNotify() {
+ mDecoderNotificationPending = false;
+
+ for (;;) {
+ size_t index;
+ status_t err = mDecoder->dequeueInputBuffer(&index);
+
+ if (err == OK) {
+ mDecoderInputBuffersAvailable.push_back(index);
+ } else if (err == -EAGAIN) {
+ break;
+ } else {
+ TRESPASS();
+ }
+ }
+
+ queueDecoderInputBuffers();
+
+ for (;;) {
+ size_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ uint32_t flags;
+ status_t err = mDecoder->dequeueOutputBuffer(
+ &index,
+ &offset,
+ &size,
+ &timeUs,
+ &flags);
+
+ if (err == OK) {
+ queueOutputBuffer(
+ index, timeUs, mDecoderOutputBuffers.itemAt(index));
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ err = mDecoder->getOutputBuffers(
+ &mDecoderOutputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+ } else if (err == INFO_FORMAT_CHANGED) {
+ // We don't care.
+ } else if (err == -EAGAIN) {
+ break;
+ } else {
+ TRESPASS();
+ }
+ }
+
+ scheduleDecoderNotification();
+}
+
+void DirectRenderer::DecoderContext::scheduleDecoderNotification() {
+ if (mDecoderNotificationPending) {
+ return;
+ }
+
+ sp<AMessage> notify =
+ new AMessage(kWhatDecoderNotify, id());
+
+ mDecoder->requestActivityNotification(notify);
+ mDecoderNotificationPending = true;
+}
+
+void DirectRenderer::DecoderContext::queueOutputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatOutputBufferReady);
+ msg->setSize("index", index);
+ msg->setInt64("timeUs", timeUs);
+ msg->setBuffer("buffer", buffer);
+ msg->post();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::AudioRenderer::AudioRenderer(
+ const sp<DecoderContext> &decoderContext)
+ : mDecoderContext(decoderContext),
+ mPushPending(false),
+ mNumFramesWritten(0) {
+ mAudioTrack = new AudioTrack(
+ AUDIO_STREAM_DEFAULT,
+ 48000.0f,
+ AUDIO_FORMAT_PCM,
+ AUDIO_CHANNEL_OUT_STEREO,
+ (int)0 /* frameCount */);
+
+ CHECK_EQ((status_t)OK, mAudioTrack->initCheck());
+
+ mAudioTrack->start();
+}
+
+DirectRenderer::AudioRenderer::~AudioRenderer() {
+}
+
+void DirectRenderer::AudioRenderer::queueInputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ BufferInfo info;
+ info.mIndex = index;
+ info.mTimeUs = timeUs;
+ info.mBuffer = buffer;
+
+ mInputBuffers.push_back(info);
+ schedulePushIfNecessary();
+}
+
+void DirectRenderer::AudioRenderer::onMessageReceived(
+ const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatPushAudio:
+ {
+ onPushAudio();
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void DirectRenderer::AudioRenderer::schedulePushIfNecessary() {
+ if (mPushPending || mInputBuffers.empty()) {
+ return;
+ }
+
+ mPushPending = true;
+
+ uint32_t numFramesPlayed;
+ CHECK_EQ(mAudioTrack->getPosition(&numFramesPlayed),
+ (status_t)OK);
+
+ uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+
+ // This is how long the audio sink will have data to
+ // play back.
+ const float msecsPerFrame = 1000.0f / mAudioTrack->getSampleRate();
+
+ int64_t delayUs =
+ msecsPerFrame * numFramesPendingPlayout * 1000ll;
+
+ // Let's give it more data after about half that time
+ // has elapsed.
+ (new AMessage(kWhatPushAudio, id()))->post(delayUs / 2);
+}
+
+void DirectRenderer::AudioRenderer::onPushAudio() {
+ mPushPending = false;
+
+ while (!mInputBuffers.empty()) {
+ const BufferInfo &info = *mInputBuffers.begin();
+
+ ssize_t n = writeNonBlocking(
+ info.mBuffer->data(), info.mBuffer->size());
+
+ if (n < (ssize_t)info.mBuffer->size()) {
+ CHECK_GE(n, 0);
+
+ info.mBuffer->setRange(
+ info.mBuffer->offset() + n, info.mBuffer->size() - n);
+ break;
+ }
+
+ mDecoderContext->releaseOutputBuffer(info.mIndex);
+
+ mInputBuffers.erase(mInputBuffers.begin());
+ }
+
+ schedulePushIfNecessary();
+}
+
+ssize_t DirectRenderer::AudioRenderer::writeNonBlocking(
+ const uint8_t *data, size_t size) {
+ uint32_t numFramesPlayed;
+ status_t err = mAudioTrack->getPosition(&numFramesPlayed);
+ if (err != OK) {
+ return err;
+ }
+
+ ssize_t numFramesAvailableToWrite =
+ mAudioTrack->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+ size_t numBytesAvailableToWrite =
+ numFramesAvailableToWrite * mAudioTrack->frameSize();
+
+ if (size > numBytesAvailableToWrite) {
+ size = numBytesAvailableToWrite;
+ }
+
+ CHECK_EQ(mAudioTrack->write(data, size), (ssize_t)size);
+
+ size_t numFramesWritten = size / mAudioTrack->frameSize();
+ mNumFramesWritten += numFramesWritten;
+
+ return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
DirectRenderer::DirectRenderer(
const sp<IGraphicBufferProducer> &bufferProducer)
: mSurfaceTex(bufferProducer),
- mVideoDecoderNotificationPending(false),
- mRenderPending(false),
- mTimeOffsetUs(0ll),
+ mVideoRenderPending(false),
mLatencySum(0ll),
mLatencyCount(0),
mNumFramesLate(0),
@@ -47,17 +474,6 @@
}
DirectRenderer::~DirectRenderer() {
- if (mVideoDecoder != NULL) {
- mVideoDecoder->release();
- mVideoDecoder.clear();
-
- mVideoDecoderLooper->stop();
- mVideoDecoderLooper.clear();
- }
-}
-
-void DirectRenderer::setTimeOffset(int64_t offset) {
- mTimeOffsetUs = offset;
}
int64_t DirectRenderer::getAvgLatenessUs() {
@@ -81,15 +497,15 @@
void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
- case kWhatVideoDecoderNotify:
+ case kWhatDecoderNotify:
{
- onVideoDecoderNotify();
+ onDecoderNotify(msg);
break;
}
- case kWhatRender:
+ case kWhatRenderVideo:
{
- onRender();
+ onRenderVideo();
break;
}
@@ -98,196 +514,114 @@
}
}
-void DirectRenderer::setFormat(
- size_t trackIndex, const sp<AMessage> &format) {
+void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+ CHECK_LT(trackIndex, 2u);
+
+ CHECK(mDecoderContext[trackIndex] == NULL);
+
+ sp<AMessage> notify = new AMessage(kWhatDecoderNotify, id());
+ notify->setSize("trackIndex", trackIndex);
+
+ mDecoderContext[trackIndex] = new DecoderContext(notify);
+ looper()->registerHandler(mDecoderContext[trackIndex]);
+
+ CHECK_EQ((status_t)OK,
+ mDecoderContext[trackIndex]->init(
+ format, trackIndex == 0 ? mSurfaceTex : NULL));
+
if (trackIndex == 1) {
- // Ignore audio for now.
- return;
+ // Audio
+ mAudioRenderer = new AudioRenderer(mDecoderContext[1]);
+ looper()->registerHandler(mAudioRenderer);
}
-
- CHECK(mVideoDecoder == NULL);
-
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- mVideoDecoderLooper = new ALooper;
- mVideoDecoderLooper->setName("video codec looper");
-
- mVideoDecoderLooper->start(
- false /* runOnCallingThread */,
- false /* canCallJava */,
- PRIORITY_DEFAULT);
-
- mVideoDecoder = MediaCodec::CreateByType(
- mVideoDecoderLooper, mime.c_str(), false /* encoder */);
-
- CHECK(mVideoDecoder != NULL);
-
- status_t err = mVideoDecoder->configure(
- format,
- mSurfaceTex == NULL
- ? NULL : new Surface(mSurfaceTex),
- NULL /* crypto */,
- 0 /* flags */);
- CHECK_EQ(err, (status_t)OK);
-
- err = mVideoDecoder->start();
- CHECK_EQ(err, (status_t)OK);
-
- err = mVideoDecoder->getInputBuffers(
- &mVideoDecoderInputBuffers);
- CHECK_EQ(err, (status_t)OK);
-
- scheduleVideoDecoderNotification();
}
void DirectRenderer::queueAccessUnit(
size_t trackIndex, const sp<ABuffer> &accessUnit) {
- if (trackIndex == 1) {
- // Ignore audio for now.
- return;
- }
+ CHECK_LT(trackIndex, 2u);
- if (mVideoDecoder == NULL) {
+ if (mDecoderContext[trackIndex] == NULL) {
+ CHECK_EQ(trackIndex, 0u);
+
sp<AMessage> format = new AMessage;
format->setString("mime", "video/avc");
format->setInt32("width", 640);
format->setInt32("height", 360);
- setFormat(0, format);
+ setFormat(trackIndex, format);
}
- mVideoAccessUnits.push_back(accessUnit);
- queueVideoDecoderInputBuffers();
+ mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
}
-void DirectRenderer::queueVideoDecoderInputBuffers() {
- if (mVideoDecoder == NULL) {
+void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ switch (what) {
+ case DecoderContext::kWhatOutputBufferReady:
+ {
+ size_t index;
+ CHECK(msg->findSize("index", &index));
+
+ int64_t timeUs;
+ CHECK(msg->findInt64("timeUs", &timeUs));
+
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ queueOutputBuffer(trackIndex, index, timeUs, buffer);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void DirectRenderer::queueOutputBuffer(
+ size_t trackIndex,
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ if (trackIndex == 1) {
+ // Audio
+ mAudioRenderer->queueInputBuffer(index, timeUs, buffer);
return;
}
- bool submittedMore = false;
-
- while (!mVideoAccessUnits.empty()
- && !mVideoDecoderInputBuffersAvailable.empty()) {
- size_t index = *mVideoDecoderInputBuffersAvailable.begin();
-
- mVideoDecoderInputBuffersAvailable.erase(
- mVideoDecoderInputBuffersAvailable.begin());
-
- sp<ABuffer> srcBuffer = *mVideoAccessUnits.begin();
- mVideoAccessUnits.erase(mVideoAccessUnits.begin());
-
- const sp<ABuffer> &dstBuffer =
- mVideoDecoderInputBuffers.itemAt(index);
-
- memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
-
- int64_t timeUs;
- CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
-
- status_t err = mVideoDecoder->queueInputBuffer(
- index,
- 0 /* offset */,
- srcBuffer->size(),
- timeUs,
- 0 /* flags */);
- CHECK_EQ(err, (status_t)OK);
-
- submittedMore = true;
- }
-
- if (submittedMore) {
- scheduleVideoDecoderNotification();
- }
-}
-
-void DirectRenderer::onVideoDecoderNotify() {
- mVideoDecoderNotificationPending = false;
-
- for (;;) {
- size_t index;
- status_t err = mVideoDecoder->dequeueInputBuffer(&index);
-
- if (err == OK) {
- mVideoDecoderInputBuffersAvailable.push_back(index);
- } else if (err == -EAGAIN) {
- break;
- } else {
- TRESPASS();
- }
- }
-
- queueVideoDecoderInputBuffers();
-
- for (;;) {
- size_t index;
- size_t offset;
- size_t size;
- int64_t timeUs;
- uint32_t flags;
- status_t err = mVideoDecoder->dequeueOutputBuffer(
- &index,
- &offset,
- &size,
- &timeUs,
- &flags);
-
- if (err == OK) {
- queueOutputBuffer(index, timeUs);
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- // We don't care.
- } else if (err == INFO_FORMAT_CHANGED) {
- // We don't care.
- } else if (err == -EAGAIN) {
- break;
- } else {
- TRESPASS();
- }
- }
-
- scheduleVideoDecoderNotification();
-}
-
-void DirectRenderer::queueOutputBuffer(size_t index, int64_t timeUs) {
-#if 1
OutputInfo info;
info.mIndex = index;
- info.mTimeUs = timeUs + mTimeOffsetUs;
- mOutputBuffers.push_back(info);
+ info.mTimeUs = timeUs;
+ info.mBuffer = buffer;
+ mVideoOutputBuffers.push_back(info);
- scheduleRenderIfNecessary();
-#else
- mLatencySum += ALooper::GetNowUs() - (timeUs + mTimeOffsetUs);
- ++mLatencyCount;
-
- status_t err = mVideoDecoder->renderOutputBufferAndRelease(index);
- CHECK_EQ(err, (status_t)OK);
-#endif
+ scheduleVideoRenderIfNecessary();
}
-void DirectRenderer::scheduleRenderIfNecessary() {
- if (mRenderPending || mOutputBuffers.empty()) {
+void DirectRenderer::scheduleVideoRenderIfNecessary() {
+ if (mVideoRenderPending || mVideoOutputBuffers.empty()) {
return;
}
- mRenderPending = true;
+ mVideoRenderPending = true;
- int64_t timeUs = (*mOutputBuffers.begin()).mTimeUs;
+ int64_t timeUs = (*mVideoOutputBuffers.begin()).mTimeUs;
int64_t nowUs = ALooper::GetNowUs();
int64_t delayUs = timeUs - nowUs;
- (new AMessage(kWhatRender, id()))->post(delayUs);
+ (new AMessage(kWhatRenderVideo, id()))->post(delayUs);
}
-void DirectRenderer::onRender() {
- mRenderPending = false;
+void DirectRenderer::onRenderVideo() {
+ mVideoRenderPending = false;
int64_t nowUs = ALooper::GetNowUs();
- while (!mOutputBuffers.empty()) {
- const OutputInfo &info = *mOutputBuffers.begin();
+ while (!mVideoOutputBuffers.empty()) {
+ const OutputInfo &info = *mVideoOutputBuffers.begin();
if (info.mTimeUs > nowUs) {
break;
@@ -301,25 +635,14 @@
mLatencySum += nowUs - info.mTimeUs;
++mLatencyCount;
- status_t err = mVideoDecoder->renderOutputBufferAndRelease(info.mIndex);
+ status_t err =
+ mDecoderContext[0]->renderOutputBufferAndRelease(info.mIndex);
CHECK_EQ(err, (status_t)OK);
- mOutputBuffers.erase(mOutputBuffers.begin());
+ mVideoOutputBuffers.erase(mVideoOutputBuffers.begin());
}
- scheduleRenderIfNecessary();
-}
-
-void DirectRenderer::scheduleVideoDecoderNotification() {
- if (mVideoDecoderNotificationPending) {
- return;
- }
-
- sp<AMessage> notify =
- new AMessage(kWhatVideoDecoderNotify, id());
-
- mVideoDecoder->requestActivityNotification(notify);
- mVideoDecoderNotificationPending = true;
+ scheduleVideoRenderIfNecessary();
}
} // namespace android
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
index 44be8f8..92c176a 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.h
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -23,21 +23,17 @@
namespace android {
struct ABuffer;
+struct AudioTrack;
struct IGraphicBufferProducer;
struct MediaCodec;
-// An experimental renderer that only supports video and decodes video data
-// as soon as it arrives using a MediaCodec instance, rendering it without
-// delay. Primarily meant to finetune packet loss discovery and minimize
-// latency.
+// Renders audio and video data queued by calls to "queueAccessUnit".
struct DirectRenderer : public AHandler {
DirectRenderer(const sp<IGraphicBufferProducer> &bufferProducer);
void setFormat(size_t trackIndex, const sp<AMessage> &format);
void queueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
- void setTimeOffset(int64_t offset);
-
int64_t getAvgLatenessUs();
protected:
@@ -45,30 +41,28 @@
virtual ~DirectRenderer();
private:
+ struct DecoderContext;
+ struct AudioRenderer;
+
enum {
- kWhatVideoDecoderNotify,
- kWhatRender,
+ kWhatDecoderNotify,
+ kWhatRenderVideo,
};
struct OutputInfo {
size_t mIndex;
int64_t mTimeUs;
+ sp<ABuffer> mBuffer;
};
sp<IGraphicBufferProducer> mSurfaceTex;
- sp<ALooper> mVideoDecoderLooper;
- sp<MediaCodec> mVideoDecoder;
- Vector<sp<ABuffer> > mVideoDecoderInputBuffers;
- List<size_t> mVideoDecoderInputBuffersAvailable;
- bool mVideoDecoderNotificationPending;
+ sp<DecoderContext> mDecoderContext[2];
+ List<OutputInfo> mVideoOutputBuffers;
- List<sp<ABuffer> > mVideoAccessUnits;
+ bool mVideoRenderPending;
- List<OutputInfo> mOutputBuffers;
- bool mRenderPending;
-
- int64_t mTimeOffsetUs;
+ sp<AudioRenderer> mAudioRenderer;
int64_t mLatencySum;
size_t mLatencyCount;
@@ -76,14 +70,14 @@
int32_t mNumFramesLate;
int32_t mNumFrames;
- void onVideoDecoderNotify();
- void onRender();
+ void onDecoderNotify(const sp<AMessage> &msg);
- void queueVideoDecoderInputBuffers();
- void scheduleVideoDecoderNotification();
- void scheduleRenderIfNecessary();
+ void queueOutputBuffer(
+ size_t trackIndex,
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
- void queueOutputBuffer(size_t index, int64_t timeUs);
+ void scheduleVideoRenderIfNecessary();
+ void onRenderVideo();
DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
};
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
index d635c3a..62021c0 100644
--- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
@@ -337,13 +337,18 @@
ALOGI("Assuming %lld ms of latency.", latencyUs / 1000ll);
}
- // We are the timesync _client_,
- // client time = server time - time offset.
- mRenderer->setTimeOffset(-mTimeOffsetUs + mTargetLatencyUs);
-
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ // We are the timesync _client_,
+ // client time = server time - time offset.
+ timeUs += mTargetLatencyUs - mTimeOffsetUs;
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));