Merge "AudioFlinger: Use actual MonoPipe depth for sleep computation" into pi-dev
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 8b8824f..712f118 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -37,9 +37,11 @@
// will calculate frame buffer size if |hasData| is set to true.
VideoFrame(uint32_t width, uint32_t height,
uint32_t displayWidth, uint32_t displayHeight,
+ uint32_t tileWidth, uint32_t tileHeight,
uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
+ mTileWidth(tileWidth), mTileHeight(tileHeight),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
mSize(hasData ? (bpp * width * height) : 0),
mIccSize(iccSize), mReserved(0) {
@@ -74,6 +76,8 @@
uint32_t mHeight; // Decoded image height before rotation
uint32_t mDisplayWidth; // Display width before rotation
uint32_t mDisplayHeight; // Display height before rotation
+ uint32_t mTileWidth; // Tile width (0 if image doesn't have grid)
+ uint32_t mTileHeight; // Tile height (0 if image doesn't have grid)
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes per pixel
uint32_t mRowBytes; // Number of bytes per row before rotation
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 8dae251..01f014f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -271,17 +271,43 @@
/////////////////////////////////////////////////////////////////////////
+struct HeifDecoderImpl::DecodeThread : public Thread {
+ explicit DecodeThread(HeifDecoderImpl *decoder) : mDecoder(decoder) {}
+
+private:
+ HeifDecoderImpl* mDecoder;
+
+ bool threadLoop();
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecodeThread);
+};
+
+bool HeifDecoderImpl::DecodeThread::threadLoop() {
+ return mDecoder->decodeAsync();
+}
+
+/////////////////////////////////////////////////////////////////////////
+
HeifDecoderImpl::HeifDecoderImpl() :
// output color format should always be set via setOutputColor(), in case
// it's not, default to HAL_PIXEL_FORMAT_RGB_565.
mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
mCurScanline(0),
+ mWidth(0),
+ mHeight(0),
mFrameDecoded(false),
mHasImage(false),
- mHasVideo(false) {
+ mHasVideo(false),
+ mAvailableLines(0),
+ mNumSlices(1),
+ mSliceHeight(0),
+ mAsyncDecodeDone(false) {
}
HeifDecoderImpl::~HeifDecoderImpl() {
+ if (mThread != nullptr) {
+ mThread->join();
+ }
}
bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
@@ -310,22 +336,23 @@
mHasImage = hasImage && !strcasecmp(hasImage, "yes");
mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
+ sp<IMemory> sharedMem;
if (mHasImage) {
// image index < 0 to retrieve primary image
- mFrameMemory = mRetriever->getImageAtIndex(
+ sharedMem = mRetriever->getImageAtIndex(
-1, mOutputColor, true /*metaOnly*/);
} else if (mHasVideo) {
- mFrameMemory = mRetriever->getFrameAtTime(0,
+ sharedMem = mRetriever->getFrameAtTime(0,
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
mOutputColor, true /*metaOnly*/);
}
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@@ -344,6 +371,14 @@
videoFrame->mIccSize,
videoFrame->getFlattenedIccData());
}
+ mWidth = videoFrame->mWidth;
+ mHeight = videoFrame->mHeight;
+ if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
+ // Try decoding in slices only if the image has tiles and is big enough.
+ mSliceHeight = videoFrame->mTileHeight;
+ mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
+ ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
+ }
return true;
}
@@ -376,6 +411,36 @@
return false;
}
+bool HeifDecoderImpl::decodeAsync() {
+ for (size_t i = 1; i < mNumSlices; i++) {
+ ALOGV("decodeAsync(): decoding slice %zu", i);
+ size_t top = i * mSliceHeight;
+ size_t bottom = (i + 1) * mSliceHeight;
+ if (bottom > mHeight) {
+ bottom = mHeight;
+ }
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, top, mWidth, bottom);
+ {
+ Mutex::Autolock autolock(mLock);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ mAsyncDecodeDone = true;
+ mScanlineReady.signal();
+ break;
+ }
+ mFrameMemory = frameMemory;
+ mAvailableLines = bottom;
+ ALOGV("decodeAsync(): available lines %zu", mAvailableLines);
+ mScanlineReady.signal();
+ }
+ }
+ // Aggressive clear to avoid holding on to resources
+ mRetriever.clear();
+ mDataSource.clear();
+ return false;
+}
+
bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
// reset scanline pointer
mCurScanline = 0;
@@ -384,6 +449,47 @@
return true;
}
+ // See if we want to decode in slices to allow client to start
+ // scanline processing in parallel with decode. If this fails
+ // we fallback to decoding the full frame.
+ if (mHasImage && mNumSlices > 1) {
+ // get first slice and metadata
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, 0, mWidth, mSliceHeight);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ ALOGE("decode: metadata is a nullptr");
+ return false;
+ }
+
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+
+ if (frameInfo != nullptr) {
+ frameInfo->set(
+ videoFrame->mWidth,
+ videoFrame->mHeight,
+ videoFrame->mRotationAngle,
+ videoFrame->mBytesPerPixel,
+ videoFrame->mIccSize,
+ videoFrame->getFlattenedIccData());
+ }
+
+ mFrameMemory = frameMemory;
+ mAvailableLines = mSliceHeight;
+ mThread = new DecodeThread(this);
+ if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
+ mFrameDecoded = true;
+ return true;
+ }
+
+ // Fallback to decode without slicing
+ mThread.clear();
+ mNumSlices = 1;
+ mSliceHeight = 0;
+ mAvailableLines = 0;
+ mFrameMemory.clear();
+ }
+
if (mHasImage) {
// image index < 0 to retrieve primary image
mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
@@ -393,14 +499,14 @@
}
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- ALOGE("getFrameAtTime: videoFrame is a nullptr");
+ ALOGE("decode: videoFrame is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
- ALOGE("getFrameAtTime: videoFrame size is invalid");
+ ALOGE("decode: videoFrame size is invalid");
return false;
}
@@ -424,36 +530,45 @@
}
mFrameDecoded = true;
- // Aggressive clear to avoid holding on to resources
+ // Aggressively clear to avoid holding on to resources
mRetriever.clear();
mDataSource.clear();
return true;
}
-bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
- if (mCurScanline >= videoFrame->mHeight) {
- ALOGE("no more scanline available");
- return false;
- }
uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
return true;
}
-size_t HeifDecoderImpl::skipScanlines(size_t count) {
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- return 0;
+bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+ if (mCurScanline >= mHeight) {
+ ALOGE("no more scanline available");
+ return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ if (mNumSlices > 1) {
+ Mutex::Autolock autolock(mLock);
+
+ while (!mAsyncDecodeDone && mCurScanline >= mAvailableLines) {
+ mScanlineReady.wait(mLock);
+ }
+ return (mCurScanline < mAvailableLines) ? getScanlineInner(dst) : false;
+ }
+
+ return getScanlineInner(dst);
+}
+
+size_t HeifDecoderImpl::skipScanlines(size_t count) {
uint32_t oldScanline = mCurScanline;
mCurScanline += count;
- if (mCurScanline > videoFrame->mHeight) {
- mCurScanline = videoFrame->mHeight;
+ if (mCurScanline > mHeight) {
+ mCurScanline = mHeight;
}
return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
}
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 406c2c1..528ee3b 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -19,6 +19,8 @@
#include "include/HeifDecoderAPI.h"
#include <system/graphics.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
#include <utils/RefBase.h>
namespace android {
@@ -49,14 +51,30 @@
size_t skipScanlines(size_t count) override;
private:
+ struct DecodeThread;
+
sp<IDataSource> mDataSource;
sp<MediaMetadataRetriever> mRetriever;
sp<IMemory> mFrameMemory;
android_pixel_format_t mOutputColor;
size_t mCurScanline;
+ uint32_t mWidth;
+ uint32_t mHeight;
bool mFrameDecoded;
bool mHasImage;
bool mHasVideo;
+
+ // Slice decoding only
+ Mutex mLock;
+ Condition mScanlineReady;
+ sp<DecodeThread> mThread;
+ size_t mAvailableLines;
+ size_t mNumSlices;
+ uint32_t mSliceHeight;
+ bool mAsyncDecodeDone;
+
+ bool decodeAsync();
+ bool getScanlineInner(uint8_t* dst);
};
} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 214117b..590ba1a 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -69,6 +69,7 @@
SET_DATA_SOURCE_CALLBACK,
GET_FRAME_AT_TIME,
GET_IMAGE_AT_INDEX,
+ GET_IMAGE_RECT_AT_INDEX,
GET_FRAME_AT_INDEX,
EXTRACT_ALBUM_ART,
EXTRACT_METADATA,
@@ -187,6 +188,30 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom)
+ {
+ ALOGV("getImageRectAtIndex: index %d, colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(index);
+ data.writeInt32(colorFormat);
+ data.writeInt32(left);
+ data.writeInt32(top);
+ data.writeInt32(right);
+ data.writeInt32(bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_IMAGE_RECT_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return NULL;
+ }
+ return interface_cast<IMemory>(reply.readStrongBinder());
+ }
+
status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly)
{
@@ -375,6 +400,34 @@
#endif
return NO_ERROR;
} break;
+
+ case GET_IMAGE_RECT_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int index = data.readInt32();
+ int colorFormat = data.readInt32();
+ int left = data.readInt32();
+ int top = data.readInt32();
+ int right = data.readInt32();
+ int bottom = data.readInt32();
+ ALOGV("getImageRectAtIndex: index(%d), colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ sp<IMemory> bitmap = getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (bitmap != 0) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(bitmap));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
+
case GET_FRAME_AT_INDEX: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
int frameIndex = data.readInt32();
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index 1a04552..c6f422d 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -46,6 +46,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 992e230..98d300f 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -47,6 +47,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
@@ -54,27 +56,6 @@
virtual const char* extractMetadata(int keyCode) = 0;
};
-// MediaMetadataRetrieverInterface
-class MediaMetadataRetrieverInterface : public MediaMetadataRetrieverBase
-{
-public:
- MediaMetadataRetrieverInterface() {}
-
- virtual ~MediaMetadataRetrieverInterface() {}
- virtual sp<IMemory> getFrameAtTime(
- int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
- { return NULL; }
- virtual sp<IMemory> getImageAtIndex(
- int /*index*/, int /*colorFormat*/, bool /*metaOnly*/, bool /*thumbnail*/)
- { return NULL; }
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> >* /*frames*/,
- int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
- { return ERROR_UNSUPPORTED; }
- virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
- virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
-};
-
}; // namespace android
#endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 4cdeeb7..cdef637 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -91,6 +91,8 @@
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> getImageAtIndex(int index,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index c10a907..e61b04d 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -166,6 +166,19 @@
return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
}
+sp<IMemory> MediaMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ return mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+}
+
status_t MediaMetadataRetriever::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index fa41c06..40b17bf 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -213,7 +213,7 @@
sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
- ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
@@ -229,6 +229,25 @@
return frame;
}
+sp<IMemory> MetadataRetrieverClient::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ sp<IMemory> frame = mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (frame == NULL) {
+ ALOGE("failed to extract image");
+ return NULL;
+ }
+ return frame;
+}
+
status_t MetadataRetrieverClient::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index 63ba44a..272d093 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -54,6 +54,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index e2605ca..29a219f 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -43,7 +43,8 @@
static const size_t kRetryCount = 50; // must be >0
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
- int32_t width, int32_t height, int32_t dstBpp, bool metaOnly = false) {
+ int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+ int32_t dstBpp, bool metaOnly = false) {
int32_t rotationAngle;
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
@@ -74,7 +75,7 @@
}
VideoFrame frame(width, height, displayWidth, displayHeight,
- rotationAngle, dstBpp, !metaOnly, iccSize);
+ tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
size_t size = frame.getFlattenedSize();
sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
@@ -155,7 +156,7 @@
return NULL;
}
- int32_t width, height;
+ int32_t width, height, tileWidth = 0, tileHeight = 0;
if (thumbnail) {
if (!findThumbnailInfo(trackMeta, &width, &height)) {
return NULL;
@@ -163,8 +164,14 @@
} else {
CHECK(trackMeta->findInt32(kKeyWidth, &width));
CHECK(trackMeta->findInt32(kKeyHeight, &height));
+
+ int32_t gridRows, gridCols;
+ if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+ tileWidth = tileHeight = 0;
+ }
}
- return allocVideoFrame(trackMeta, width, height, dstBpp, true /*metaOnly*/);
+ return allocVideoFrame(trackMeta,
+ width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
}
FrameDecoder::FrameDecoder(
@@ -237,8 +244,11 @@
return OK;
}
-sp<IMemory> FrameDecoder::extractFrame() {
- status_t err = extractInternal();
+sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
+ status_t err = onExtractRect(rect);
+ if (err == OK) {
+ err = extractInternal();
+ }
if (err != OK) {
return NULL;
}
@@ -503,6 +513,8 @@
trackMeta(),
(crop_right - crop_left + 1),
(crop_bottom - crop_top + 1),
+ 0,
+ 0,
dstBpp());
addFrame(frameMem);
VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
@@ -541,7 +553,10 @@
mHeight(0),
mGridRows(1),
mGridCols(1),
- mTilesDecoded(0) {
+ mTileWidth(0),
+ mTileHeight(0),
+ mTilesDecoded(0),
+ mTargetTiles(0) {
}
sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
@@ -585,10 +600,12 @@
overrideMeta = new MetaData(*(trackMeta()));
overrideMeta->setInt32(kKeyWidth, tileWidth);
overrideMeta->setInt32(kKeyHeight, tileHeight);
+ mTileWidth = tileWidth;
+ mTileHeight = tileHeight;
mGridCols = gridCols;
mGridRows = gridRows;
} else {
- ALOGE("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
+ ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
}
}
@@ -596,6 +613,7 @@
overrideMeta = trackMeta();
}
}
+ mTargetTiles = mGridCols * mGridRows;
sp<AMessage> videoFormat;
if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
@@ -614,6 +632,45 @@
return videoFormat;
}
+status_t ImageDecoder::onExtractRect(FrameRect *rect) {
+ // TODO:
+ // This callback is for verifying whether we can decode the rect,
+ // and if so, set up the internal variables for decoding.
+ // Currently, rect decoding is restricted to sequentially decoding one
+ // row of tiles at a time. We can't decode arbitrary rects, as the image
+ // track doesn't yet support seeking by tiles. So all we do here is to
+ // verify the rect against what we expect.
+ // When seeking by tile is supported, this code should be updated to
+ // set the seek parameters.
+ if (rect == NULL) {
+ if (mTilesDecoded > 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ mTargetTiles = mGridRows * mGridCols;
+ return OK;
+ }
+
+ if (mTileWidth <= 0 || mTileHeight <=0) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ int32_t row = mTilesDecoded / mGridCols;
+ int32_t expectedTop = row * mTileHeight;
+ int32_t expectedBot = (row + 1) * mTileHeight;
+ if (expectedBot > mHeight) {
+ expectedBot = mHeight;
+ }
+ if (rect->left != 0 || rect->top != expectedTop
+ || rect->right != mWidth || rect->bottom != expectedBot) {
+ ALOGE("currently only support sequential decoding of slices");
+ return ERROR_UNSUPPORTED;
+ }
+
+ // advance one row
+ mTargetTiles = mTilesDecoded + mGridCols;
+ return OK;
+}
+
status_t ImageDecoder::onOutputReceived(
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
@@ -626,7 +683,8 @@
CHECK(outputFormat->findInt32("height", &height));
if (mFrame == NULL) {
- sp<IMemory> frameMem = allocVideoFrame(trackMeta(), mWidth, mHeight, dstBpp());
+ sp<IMemory> frameMem = allocVideoFrame(
+ trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
mFrame = static_cast<VideoFrame*>(frameMem->pointer());
addFrame(frameMem);
@@ -638,8 +696,6 @@
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
int32_t dstLeft, dstTop, dstRight, dstBottom;
- int32_t numTiles = mGridRows * mGridCols;
-
dstLeft = mTilesDecoded % mGridCols * width;
dstTop = mTilesDecoded / mGridCols * height;
dstRight = dstLeft + width - 1;
@@ -663,7 +719,7 @@
dstBottom = dstTop + crop_bottom;
}
- *done = (++mTilesDecoded >= numTiles);
+ *done = (++mTilesDecoded >= mTargetTiles);
if (converter.isValid()) {
converter.convert(
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index e6c318c..e80ec3b 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -40,7 +40,8 @@
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
- mAlbumArt(NULL) {
+ mAlbumArt(NULL),
+ mLastImageIndex(-1) {
ALOGV("StagefrightMetadataRetriever()");
}
@@ -126,10 +127,30 @@
sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
-
ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
+ return getImageInternal(index, colorFormat, metaOnly, thumbnail, NULL);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+
+ FrameRect rect = {left, top, right, bottom};
+
+ if (mImageDecoder != NULL && index == mLastImageIndex) {
+ return mImageDecoder->extractFrame(&rect);
+ }
+
+ return getImageInternal(
+ index, colorFormat, false /*metaOnly*/, false /*thumbnail*/, &rect);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
+
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
return NULL;
@@ -192,12 +213,17 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
- ImageDecoder decoder(componentName, trackMeta, source);
+ sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
int64_t frameTimeUs = thumbnail ? -1 : 0;
- if (decoder.init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
- sp<IMemory> frame = decoder.extractFrame();
+ if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+ sp<IMemory> frame = decoder->extractFrame(rect);
if (frame != NULL) {
+ if (rect != NULL) {
+ // keep the decoder if slice decoding
+ mImageDecoder = decoder;
+ mLastImageIndex = index;
+ }
return frame;
}
}
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index 3d4ea39..dc58c15 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -34,7 +34,11 @@
class VideoFrame;
struct MediaCodec;
-struct FrameDecoder {
+struct FrameRect {
+ int32_t left, top, right, bottom;
+};
+
+struct FrameDecoder : public RefBase {
FrameDecoder(
const AString &componentName,
const sp<MetaData> &trackMeta,
@@ -43,7 +47,7 @@
status_t init(
int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
- sp<IMemory> extractFrame();
+ sp<IMemory> extractFrame(FrameRect *rect = NULL);
status_t extractFrames(std::vector<sp<IMemory> >* frames);
@@ -59,6 +63,8 @@
int seekMode,
MediaSource::ReadOptions *options) = 0;
+ virtual status_t onExtractRect(FrameRect *rect) = 0;
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@@ -110,6 +116,11 @@
int seekMode,
MediaSource::ReadOptions *options) override;
+ virtual status_t onExtractRect(FrameRect *rect) override {
+ // Rect extraction for sequences is not supported for now.
+ return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
+ }
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@@ -143,6 +154,8 @@
int seekMode,
MediaSource::ReadOptions *options) override;
+ virtual status_t onExtractRect(FrameRect *rect) override;
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer __unused,
MetaDataBase &sampleMeta __unused,
@@ -161,7 +174,10 @@
int32_t mHeight;
int32_t mGridRows;
int32_t mGridCols;
+ int32_t mTileWidth;
+ int32_t mTileHeight;
int32_t mTilesDecoded;
+ int32_t mTargetTiles;
};
} // namespace android
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 209f850..f78e125 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -27,8 +27,10 @@
class DataSource;
class MediaExtractor;
+struct ImageDecoder;
+struct FrameRect;
-struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
+struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
StagefrightMetadataRetriever();
virtual ~StagefrightMetadataRetriever();
@@ -44,6 +46,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
@@ -59,6 +63,8 @@
KeyedVector<int, String8> mMetaData;
MediaAlbumArt *mAlbumArt;
+ sp<ImageDecoder> mImageDecoder;
+ int mLastImageIndex;
void parseMetaData();
// Delete album art and clear metadata.
void clearMetadata();
@@ -66,6 +72,8 @@
status_t getFrameInternal(
int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
+ virtual sp<IMemory> getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);