Use LinearBlock buffer for thumbnail extraction
For the thumbnail extraction use case, if the codec is C2 codec, use LinearBlock buffer to allocate single input buffer.
Bug: 329521645
Test: MediaMetadataRetrieverTest
Merged-In: I365db0a58b2dee4a36f1bb183ef21d11c4600916
Change-Id: I365db0a58b2dee4a36f1bb183ef21d11c4600916
diff --git a/media/aconfig/codec_fwk.aconfig b/media/aconfig/codec_fwk.aconfig
index d662585..362e98e 100644
--- a/media/aconfig/codec_fwk.aconfig
+++ b/media/aconfig/codec_fwk.aconfig
@@ -133,3 +133,10 @@
description: "Feature flag to track teamfood population"
bug: "328770262"
}
+
+flag {
+ name: "thumbnail_block_model"
+ namespace: "codec_fwk"
+ description: "Feature flag for using block model decoder in thumbnail generation"
+ bug: "329521645"
+}
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 46703bb..893b442 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -18,19 +18,14 @@
#define LOG_TAG "FrameDecoder"
#define ATRACE_TAG ATRACE_TAG_VIDEO
#include "include/FrameDecoder.h"
-#include "include/FrameCaptureLayer.h"
-#include "include/HevcUtils.h"
+#include <android_media_codec.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/Surface.h>
#include <inttypes.h>
-#include <mediadrm/ICrypto.h>
#include <media/IMediaSource.h>
#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/CodecBase.h>
#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/FrameCaptureProcessor.h>
#include <media/stagefright/MediaBuffer.h>
@@ -39,13 +34,24 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <mediadrm/ICrypto.h>
#include <private/media/VideoFrame.h>
#include <utils/Log.h>
#include <utils/Trace.h>
+#include "include/FrameCaptureLayer.h"
+#include "include/HevcUtils.h"
+
+#include <C2Buffer.h>
+#include <Codec2BufferUtils.h>
namespace android {
static const int64_t kBufferTimeOutUs = 10000LL; // 10 msec
+static const int64_t kAsyncBufferTimeOutUs = 2000000LL; // 2000 msec
static const size_t kRetryCount = 100; // must be >0
static const int64_t kDefaultSampleDurationUs = 33333LL; // 33ms
// For codec, 0 is the highest importance; higher the number lesser important.
@@ -232,6 +238,104 @@
return false;
}
+AsyncCodecHandler::AsyncCodecHandler(const wp<FrameDecoder>& frameDecoder) {
+ mFrameDecoder = frameDecoder;
+}
+
+void AsyncCodecHandler::onMessageReceived(const sp<AMessage>& msg) {
+ switch (msg->what()) {
+ case FrameDecoder::kWhatCallbackNotify:
+ int32_t callbackId;
+ if (!msg->findInt32("callbackID", &callbackId)) {
+ ALOGE("kWhatCallbackNotify: callbackID is expected.");
+ break;
+ }
+ switch (callbackId) {
+ case MediaCodec::CB_INPUT_AVAILABLE: {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ ALOGD("CB_INPUT_AVAILABLE received, index is %d", index);
+ sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
+ if (frameDecoder != nullptr) {
+ frameDecoder->handleInputBufferAsync(index);
+ }
+ break;
+ }
+ case MediaCodec::CB_OUTPUT_AVAILABLE: {
+ int32_t index;
+ int64_t timeUs;
+ CHECK(msg->findInt32("index", &index));
+ CHECK(msg->findInt64("timeUs", &timeUs));
+ ALOGD("CB_OUTPUT_AVAILABLE received, index is %d", index);
+ sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
+ if (frameDecoder != nullptr) {
+ frameDecoder->handleOutputBufferAsync(index, timeUs);
+ }
+ break;
+ }
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED: {
+ ALOGD("CB_OUTPUT_FORMAT_CHANGED received");
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format) || format == nullptr) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+ sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
+ if (frameDecoder != nullptr) {
+ frameDecoder->handleOutputFormatChangeAsync(format);
+ }
+ break;
+ }
+ case MediaCodec::CB_ERROR: {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("actionCode", &actionCode)) {
+ ALOGE("CB_ERROR: actionCode is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Codec reported error(0x%x/%s), actionCode(%d), detail(%s)", err,
+ StrMediaError(err).c_str(), actionCode, detail.c_str());
+ break;
+ }
+ default:
+ ALOGE("kWhatCallbackNotify: callbackID(%d) is unexpected.", callbackId);
+ break;
+ }
+ break;
+ default:
+ ALOGE("unexpected message received: %s", msg->debugString().c_str());
+ break;
+ }
+}
+
+void InputBufferIndexQueue::enqueue(int32_t index) {
+ std::scoped_lock<std::mutex> lock(mMutex);
+ mQueue.push(index);
+ mCondition.notify_one();
+}
+
+bool InputBufferIndexQueue::dequeue(int32_t* index, int32_t timeOutUs) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ bool hasAvailableIndex = mCondition.wait_for(lock, std::chrono::microseconds(timeOutUs),
+ [this] { return !mQueue.empty(); });
+ if (hasAvailableIndex) {
+ *index = mQueue.front();
+ mQueue.pop();
+ return true;
+ } else {
+ return false;
+ }
+}
+
//static
sp<IMemory> FrameDecoder::getMetadataOnly(
const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail, uint32_t bitDepth) {
@@ -281,6 +385,7 @@
const sp<MetaData> &trackMeta,
const sp<IMediaSource> &source)
: mComponentName(componentName),
+ mUseBlockModel(false),
mTrackMeta(trackMeta),
mSource(source),
mDstFormat(OMX_COLOR_Format16bitRGB565),
@@ -290,6 +395,10 @@
}
FrameDecoder::~FrameDecoder() {
+ if (mHandler != NULL) {
+ mAsyncLooper->stop();
+ mAsyncLooper->unregisterHandler(mHandler->id());
+ }
if (mDecoder != NULL) {
mDecoder->release();
mSource->stop();
@@ -333,8 +442,18 @@
return (decoder.get() == NULL) ? NO_MEMORY : err;
}
+ if (mUseBlockModel) {
+ mAsyncLooper = new ALooper;
+ mAsyncLooper->start();
+ mHandler = new AsyncCodecHandler(wp<FrameDecoder>(this));
+ mAsyncLooper->registerHandler(mHandler);
+ sp<AMessage> callbackMsg = new AMessage(kWhatCallbackNotify, mHandler);
+ decoder->setCallback(callbackMsg);
+ }
+
err = decoder->configure(
- videoFormat, mSurface, NULL /* crypto */, 0 /* flags */);
+ videoFormat, mSurface, NULL /* crypto */,
+ mUseBlockModel ? MediaCodec::CONFIGURE_FLAG_USE_BLOCK_MODEL : 0 /* flags */);
if (err != OK) {
ALOGW("configure returned error %d (%s)", err, asString(err));
decoder->release();
@@ -362,10 +481,18 @@
sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
ScopedTrace trace(ATRACE_TAG, "FrameDecoder::ExtractFrame");
status_t err = onExtractRect(rect);
- if (err == OK) {
+ if (err != OK) {
+ ALOGE("onExtractRect error %d", err);
+ return NULL;
+ }
+
+ if (!mUseBlockModel) {
err = extractInternal();
+ } else {
+ err = extractInternalUsingBlockModel();
}
if (err != OK) {
+ ALOGE("extractInternal error %d", err);
return NULL;
}
@@ -380,6 +507,7 @@
ALOGE("decoder is not initialized");
return NO_INIT;
}
+
do {
size_t index;
int64_t ptsUs = 0LL;
@@ -433,7 +561,8 @@
(const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
mediaBuffer->range_length());
- onInputReceived(codecBuffer, mediaBuffer->meta_data(), mFirstSample, &flags);
+ onInputReceived(codecBuffer->data(), codecBuffer->size(), mediaBuffer->meta_data(),
+ mFirstSample, &flags);
mFirstSample = false;
}
@@ -487,11 +616,14 @@
ALOGE("failed to get output buffer %zu", index);
break;
}
+ uint8_t* frameData = videoFrameBuffer->data();
+ sp<ABuffer> imageData;
+ videoFrameBuffer->meta()->findBuffer("image-data", &imageData);
if (mSurface != nullptr) {
mDecoder->renderOutputBufferAndRelease(index);
- err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ err = onOutputReceived(frameData, imageData, mOutputFormat, ptsUs, &done);
} else {
- err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ err = onOutputReceived(frameData, imageData, mOutputFormat, ptsUs, &done);
mDecoder->releaseOutputBuffer(index);
}
} else {
@@ -510,6 +642,73 @@
return err;
}
+status_t FrameDecoder::extractInternalUsingBlockModel() {
+ status_t err = OK;
+ MediaBufferBase* mediaBuffer = NULL;
+ int64_t ptsUs = 0LL;
+ uint32_t flags = 0;
+ int32_t index;
+ mHandleOutputBufferAsyncDone = false;
+
+ err = mSource->read(&mediaBuffer, &mReadOptions);
+ mReadOptions.clearSeekTo();
+ if (err != OK) {
+ ALOGW("Input Error: err=%d", err);
+ mediaBuffer->release();
+ return err;
+ }
+
+ size_t inputSize = mediaBuffer->range_length();
+ std::shared_ptr<C2LinearBlock> block =
+ MediaCodec::FetchLinearBlock(inputSize, {std::string{mComponentName.c_str()}});
+ C2WriteView view{block->map().get()};
+ if (view.error() != C2_OK) {
+ ALOGE("Fatal error: failed to allocate and map a block");
+ mediaBuffer->release();
+ return NO_MEMORY;
+ }
+ if (inputSize > view.capacity()) {
+ ALOGE("Fatal error: allocated block is too small "
+ "(input size %zu; block cap %u)",
+ inputSize, view.capacity());
+ mediaBuffer->release();
+ return BAD_VALUE;
+ }
+ CHECK(mediaBuffer->meta_data().findInt64(kKeyTime, &ptsUs));
+ memcpy(view.base(), (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
+ inputSize);
+ std::shared_ptr<C2Buffer> c2Buffer =
+ C2Buffer::CreateLinearBuffer(block->share(0, inputSize, C2Fence{}));
+ onInputReceived(view.base(), inputSize, mediaBuffer->meta_data(), true /* firstSample */,
+ &flags);
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ mediaBuffer->release();
+
+ std::vector<AccessUnitInfo> infoVec;
+ infoVec.emplace_back(flags, inputSize, ptsUs);
+ sp<BufferInfosWrapper> infos = new BufferInfosWrapper{std::move(infoVec)};
+
+ if (!mInputBufferIndexQueue.dequeue(&index, kAsyncBufferTimeOutUs)) {
+ ALOGE("No available input buffer index for async mode.");
+ return TIMED_OUT;
+ }
+
+ AString errorDetailMsg;
+ ALOGD("QueueLinearBlock: index=%d size=%zu ts=%" PRId64 " us flags=%x",
+ index, inputSize, ptsUs,flags);
+ err = mDecoder->queueBuffer(index, c2Buffer, infos, nullptr, &errorDetailMsg);
+ if (err != OK) {
+ ALOGE("failed to queueBuffer (err %d): %s", err, errorDetailMsg.c_str());
+ return err;
+ }
+
+ // wait for handleOutputBufferAsync() to finish
+ std::unique_lock _lk(mMutex);
+ mOutputFramePending.wait_for(_lk, std::chrono::microseconds(kAsyncBufferTimeOutUs),
+ [this] { return mHandleOutputBufferAsyncDone; });
+ return mHandleOutputBufferAsyncDone ? OK : TIMED_OUT;
+}
+
//////////////////////////////////////////////////////////////////////
VideoFrameDecoder::VideoFrameDecoder(
@@ -525,6 +724,81 @@
mDefaultSampleDurationUs(0) {
}
+status_t FrameDecoder::handleOutputFormatChangeAsync(sp<AMessage> format) {
+ // Here format is MediaCodec's internal copy of output format.
+ // Make a copy since the client might modify it.
+ mOutputFormat = format->dup();
+ ALOGD("receive output format in async mode: %s", mOutputFormat->debugString().c_str());
+ return OK;
+}
+
+status_t FrameDecoder::handleInputBufferAsync(int32_t index) {
+ mInputBufferIndexQueue.enqueue(index);
+ return OK;
+}
+
+status_t FrameDecoder::handleOutputBufferAsync(int32_t index, int64_t timeUs) {
+ if (mHandleOutputBufferAsyncDone) {
+ // we have already processed an output buffer, skip others
+ return OK;
+ }
+
+ status_t err = OK;
+ sp<MediaCodecBuffer> videoFrameBuffer;
+ err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
+ if (err != OK || videoFrameBuffer == nullptr) {
+ ALOGE("failed to get output buffer %d", index);
+ return err;
+ }
+
+ bool onOutputReceivedDone = false;
+ if (mSurface != nullptr) {
+ mDecoder->renderOutputBufferAndRelease(index);
+ // frameData and imgObj will be fetched by captureSurface() inside onOutputReceived()
+ // explicitly pass null here
+ err = onOutputReceived(nullptr, nullptr, mOutputFormat, timeUs, &onOutputReceivedDone);
+ } else {
+ // get stride and frame data for block model buffer
+ std::shared_ptr<C2Buffer> c2buffer = videoFrameBuffer->asC2Buffer();
+ if (!c2buffer
+ || c2buffer->data().type() != C2BufferData::GRAPHIC
+ || c2buffer->data().graphicBlocks().size() == 0u) {
+ ALOGE("C2Buffer precond fail");
+ return ERROR_MALFORMED;
+ }
+
+ std::unique_ptr<const C2GraphicView> view(std::make_unique<const C2GraphicView>(
+ c2buffer->data().graphicBlocks()[0].map().get()));
+ GraphicView2MediaImageConverter converter(*view, mOutputFormat, false /* copy */);
+ if (converter.initCheck() != OK) {
+ ALOGE("Converter init failed: %d", converter.initCheck());
+ return NO_INIT;
+ }
+
+ uint8_t* frameData = converter.wrap()->data();
+ sp<ABuffer> imageData = converter.imageData();
+ if (imageData != nullptr) {
+ mOutputFormat->setBuffer("image-data", imageData);
+ MediaImage2 *img = (MediaImage2*) imageData->data();
+ if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+ int32_t stride = img->mPlane[0].mRowInc;
+ mOutputFormat->setInt32(KEY_STRIDE, stride);
+ ALOGD("updating stride = %d", stride);
+ }
+ }
+
+ err = onOutputReceived(frameData, imageData, mOutputFormat, timeUs, &onOutputReceivedDone);
+ mDecoder->releaseOutputBuffer(index);
+ }
+
+ if (err == OK && onOutputReceivedDone) {
+ std::lock_guard _lm(mMutex);
+ mHandleOutputBufferAsyncDone = true;
+ mOutputFramePending.notify_one();
+ }
+ return err;
+}
+
sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
int64_t frameTimeUs, int seekMode,
MediaSource::ReadOptions *options,
@@ -575,8 +849,13 @@
bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
|| (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
if (!isSeekingClosest) {
- videoFormat->setInt32("android._num-input-buffers", 1);
- videoFormat->setInt32("android._num-output-buffers", 1);
+ if (mComponentName.startsWithIgnoreCase("c2.")) {
+ mUseBlockModel = android::media::codec::provider_->thumbnail_block_model();
+ } else {
+ // OMX Codec
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
}
if (isHDR(videoFormat)) {
@@ -601,9 +880,8 @@
return videoFormat;
}
-status_t VideoFrameDecoder::onInputReceived(
- const sp<MediaCodecBuffer> &codecBuffer,
- MetaDataBase &sampleMeta, bool firstSample, uint32_t *flags) {
+status_t VideoFrameDecoder::onInputReceived(uint8_t* data, size_t size, MetaDataBase& sampleMeta,
+ bool firstSample, uint32_t* flags) {
bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
|| (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
@@ -612,10 +890,7 @@
ALOGV("Seeking closest: targetTimeUs=%lld", (long long)mTargetTimeUs);
}
- if (!isSeekingClosest
- && ((mIsAvc && IsIDR(codecBuffer->data(), codecBuffer->size()))
- || (mIsHevc && IsIDR(
- codecBuffer->data(), codecBuffer->size())))) {
+ if (!isSeekingClosest && ((mIsAvc && IsIDR(data, size)) || (mIsHevc && IsIDR(data, size)))) {
// Only need to decode one IDR frame, unless we're seeking with CLOSEST
// option, in which case we need to actually decode to targetTimeUs.
*flags |= MediaCodec::BUFFER_FLAG_EOS;
@@ -630,7 +905,8 @@
}
status_t VideoFrameDecoder::onOutputReceived(
- const sp<MediaCodecBuffer> &videoFrameBuffer,
+ uint8_t* frameData,
+ sp<ABuffer> imgObj,
const sp<AMessage> &outputFormat,
int64_t timeUs, bool *done) {
int64_t durationUs = mDefaultSampleDurationUs;
@@ -703,7 +979,6 @@
}
mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
-
setFrame(frameMem);
}
@@ -712,7 +987,7 @@
if (mCaptureLayer != nullptr) {
return captureSurface();
}
- ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
+ ColorConverter colorConverter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
uint32_t standard, range, transfer;
if (!outputFormat->findInt32("color-standard", (int32_t*)&standard)) {
@@ -724,22 +999,25 @@
if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
transfer = 0;
}
- sp<ABuffer> imgObj;
- if (videoFrameBuffer->meta()->findBuffer("image-data", &imgObj)) {
+
+ if (imgObj != nullptr) {
MediaImage2 *imageData = nullptr;
imageData = (MediaImage2 *)(imgObj.get()->data());
if (imageData != nullptr) {
- converter.setSrcMediaImage2(*imageData);
+ colorConverter.setSrcMediaImage2(*imageData);
}
}
if (srcFormat == COLOR_FormatYUV420Flexible && imgObj.get() == nullptr) {
return ERROR_UNSUPPORTED;
}
- converter.setSrcColorSpace(standard, range, transfer);
- if (converter.isValid()) {
+ colorConverter.setSrcColorSpace(standard, range, transfer);
+ if (colorConverter.isValid()) {
ScopedTrace trace(ATRACE_TAG, "FrameDecoder::ColorConverter");
- converter.convert(
- (const uint8_t *)videoFrameBuffer->data(),
+ if (frameData == nullptr) {
+ ALOGD("frameData is null for ColorConverter");
+ }
+ colorConverter.convert(
+ (const uint8_t *)frameData,
width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
mFrame->getFlattenedData(),
@@ -955,7 +1233,8 @@
}
status_t MediaImageDecoder::onOutputReceived(
- const sp<MediaCodecBuffer> &videoFrameBuffer,
+ uint8_t* frameData,
+ sp<ABuffer> imgObj,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
if (outputFormat == NULL) {
return ERROR_MALFORMED;
@@ -1008,8 +1287,8 @@
if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
transfer = 0;
}
- sp<ABuffer> imgObj;
- if (videoFrameBuffer->meta()->findBuffer("image-data", &imgObj)) {
+
+ if (imgObj != nullptr) {
MediaImage2 *imageData = nullptr;
imageData = (MediaImage2 *)(imgObj.get()->data());
if (imageData != nullptr) {
@@ -1058,7 +1337,7 @@
if (converter.isValid()) {
converter.convert(
- (const uint8_t *)videoFrameBuffer->data(),
+ (const uint8_t *)frameData,
width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
mFrame->getFlattenedData(),
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index e417324..94c201f 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -18,12 +18,15 @@
#define FRAME_DECODER_H_
#include <memory>
+#include <mutex>
+#include <queue>
#include <vector>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaSource.h>
#include <media/openmax/OMX_Video.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AString.h>
#include <ui/GraphicTypes.h>
namespace android {
@@ -34,11 +37,23 @@
class MediaCodecBuffer;
class Surface;
class VideoFrame;
+struct AsyncCodecHandler;
struct FrameRect {
int32_t left, top, right, bottom;
};
+struct InputBufferIndexQueue {
+public:
+ void enqueue(int32_t index);
+ bool dequeue(int32_t* index, int32_t timeOutUs);
+
+private:
+ std::queue<int32_t> mQueue;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+};
+
struct FrameDecoder : public RefBase {
FrameDecoder(
const AString &componentName,
@@ -53,7 +68,19 @@
const sp<MetaData> &trackMeta, int colorFormat,
bool thumbnail = false, uint32_t bitDepth = 0);
+ status_t handleInputBufferAsync(int32_t index);
+ status_t handleOutputBufferAsync(int32_t index, int64_t timeUs);
+ status_t handleOutputFormatChangeAsync(sp<AMessage> format);
+
+ enum {
+ kWhatCallbackNotify,
+ };
+
protected:
+ AString mComponentName;
+ sp<AMessage> mOutputFormat;
+ bool mUseBlockModel;
+
virtual ~FrameDecoder();
virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -64,14 +91,12 @@
virtual status_t onExtractRect(FrameRect *rect) = 0;
- virtual status_t onInputReceived(
- const sp<MediaCodecBuffer> &codecBuffer,
- MetaDataBase &sampleMeta,
- bool firstSample,
- uint32_t *flags) = 0;
+ virtual status_t onInputReceived(uint8_t* data, size_t size, MetaDataBase& sampleMeta,
+ bool firstSample, uint32_t* flags) = 0;
virtual status_t onOutputReceived(
- const sp<MediaCodecBuffer> &videoFrameBuffer,
+ uint8_t* data,
+ sp<ABuffer> imgObj,
const sp<AMessage> &outputFormat,
int64_t timeUs,
bool *done) = 0;
@@ -83,7 +108,6 @@
void setFrame(const sp<IMemory> &frameMem) { mFrameMemory = frameMem; }
private:
- AString mComponentName;
sp<MetaData> mTrackMeta;
sp<IMediaSource> mSource;
OMX_COLOR_FORMATTYPE mDstFormat;
@@ -92,17 +116,32 @@
sp<IMemory> mFrameMemory;
MediaSource::ReadOptions mReadOptions;
sp<MediaCodec> mDecoder;
- sp<AMessage> mOutputFormat;
+ sp<AsyncCodecHandler> mHandler;
+ sp<ALooper> mAsyncLooper;
bool mHaveMoreInputs;
bool mFirstSample;
+ bool mHandleOutputBufferAsyncDone;
sp<Surface> mSurface;
+ std::mutex mMutex;
+ std::condition_variable mOutputFramePending;
+ InputBufferIndexQueue mInputBufferIndexQueue;
status_t extractInternal();
+ status_t extractInternalUsingBlockModel();
DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
};
struct FrameCaptureLayer;
+struct AsyncCodecHandler : public AHandler {
+public:
+ explicit AsyncCodecHandler(const wp<FrameDecoder>& frameDecoder);
+ virtual void onMessageReceived(const sp<AMessage>& msg);
+
+private:
+ wp<FrameDecoder> mFrameDecoder;
+};
+
struct VideoFrameDecoder : public FrameDecoder {
VideoFrameDecoder(
const AString &componentName,
@@ -121,14 +160,12 @@
return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
}
- virtual status_t onInputReceived(
- const sp<MediaCodecBuffer> &codecBuffer,
- MetaDataBase &sampleMeta,
- bool firstSample,
- uint32_t *flags) override;
+ virtual status_t onInputReceived(uint8_t* data, size_t size, MetaDataBase& sampleMeta,
+ bool firstSample, uint32_t* flags) override;
virtual status_t onOutputReceived(
- const sp<MediaCodecBuffer> &videoFrameBuffer,
+ uint8_t* data,
+ sp<ABuffer> imgObj,
const sp<AMessage> &outputFormat,
int64_t timeUs,
bool *done) override;
@@ -162,14 +199,13 @@
virtual status_t onExtractRect(FrameRect *rect) override;
- virtual status_t onInputReceived(
- const sp<MediaCodecBuffer> &codecBuffer __unused,
- MetaDataBase &sampleMeta __unused,
- bool firstSample __unused,
- uint32_t *flags __unused) override { return OK; }
+ virtual status_t onInputReceived(uint8_t* __unused, size_t __unused,
+ MetaDataBase& sampleMeta __unused, bool firstSample __unused,
+ uint32_t* flags __unused) override { return OK; }
virtual status_t onOutputReceived(
- const sp<MediaCodecBuffer> &videoFrameBuffer,
+ uint8_t* data,
+ sp<ABuffer> imgObj,
const sp<AMessage> &outputFormat,
int64_t timeUs,
bool *done) override;