Merge "audiopolicy: restore normal binder call watchdog timeout" into pi-dev
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 06e8487..cf08610 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -68,10 +68,12 @@
template<typename T>
std::string toBase64StringNoPad(const T* data, size_t size) {
- if (size == 0) {
+ // Note that the base 64 conversion only works with arrays of single-byte
+ // values. If the source is empty or is not an array of single-byte values,
+ // return empty string.
+ if (size == 0 || sizeof(data[0]) != 1) {
return "";
}
- CHECK(sizeof(data[0] == 1));
android::AString outputString;
encodeBase64(data, size, &outputString);
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 8b8824f..712f118 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -37,9 +37,11 @@
// will calculate frame buffer size if |hasData| is set to true.
VideoFrame(uint32_t width, uint32_t height,
uint32_t displayWidth, uint32_t displayHeight,
+ uint32_t tileWidth, uint32_t tileHeight,
uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
+ mTileWidth(tileWidth), mTileHeight(tileHeight),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
mSize(hasData ? (bpp * width * height) : 0),
mIccSize(iccSize), mReserved(0) {
@@ -74,6 +76,8 @@
uint32_t mHeight; // Decoded image height before rotation
uint32_t mDisplayWidth; // Display width before rotation
uint32_t mDisplayHeight; // Display height before rotation
+ uint32_t mTileWidth; // Tile width (0 if image doesn't have grid)
+ uint32_t mTileHeight; // Tile height (0 if image doesn't have grid)
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes per pixel
uint32_t mRowBytes; // Number of bytes per row before rotation
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a8369c2..f9df5b1 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -99,6 +99,11 @@
static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
+ static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
+ static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
+ static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
+ static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
+ static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
// constructor guarantees mAnalyticsItem is valid
@@ -109,6 +114,24 @@
audioFormatTypeString(record->mFormat).c_str());
mAnalyticsItem->setCString(kAudioRecordSource,
audioSourceString(record->mAttributes.source).c_str());
+
+ // log total duration recording, including anything currently running [and count].
+ nsecs_t active = 0;
+ if (mStartedNs != 0) {
+ active = systemTime() - mStartedNs;
+ }
+ mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
+ mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
+
+ // XXX I don't know that this adds a lot of value, long term
+ if (mCreatedNs != 0) {
+ mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
+ }
+
+ if (mLastError != NO_ERROR) {
+ mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
+ mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+ }
}
// hand the user a snapshot of the metrics.
@@ -354,6 +377,9 @@
exit:
mStatus = status;
+ if (status != NO_ERROR) {
+ mMediaMetrics.markError(status, __FUNCTION__);
+ }
return status;
}
@@ -412,8 +438,14 @@
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
+
+ // we've successfully started, log that time
+ mMediaMetrics.logStart(systemTime());
}
+ if (status != NO_ERROR) {
+ mMediaMetrics.markError(status, __FUNCTION__);
+ }
return status;
}
@@ -438,6 +470,9 @@
setpriority(PRIO_PROCESS, 0, mPreviousPriority);
set_sched_policy(0, mPreviousSchedulingGroup);
}
+
+ // we've successfully started, log that time
+ mMediaMetrics.logStop(systemTime());
}
bool AudioRecord::stopped() const
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index c07c397..cf446a5 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -704,7 +704,10 @@
private:
class MediaMetrics {
public:
- MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")) {
+ MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")),
+ mCreatedNs(systemTime(SYSTEM_TIME_REALTIME)),
+ mStartedNs(0), mDurationNs(0), mCount(0),
+ mLastError(NO_ERROR) {
}
~MediaMetrics() {
// mAnalyticsItem alloc failure will be flagged in the constructor
@@ -715,8 +718,20 @@
}
void gather(const AudioRecord *record);
MediaAnalyticsItem *dup() { return mAnalyticsItem->dup(); }
+
+ void logStart(nsecs_t when) { mStartedNs = when; mCount++; }
+ void logStop(nsecs_t when) { mDurationNs += (when-mStartedNs); mStartedNs = 0;}
+ void markError(status_t errcode, const char *func)
+ { mLastError = errcode; mLastErrorFunc = func;}
private:
std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+ nsecs_t mCreatedNs; // XXX: perhaps not worth it in production
+ nsecs_t mStartedNs;
+ nsecs_t mDurationNs;
+ int32_t mCount;
+
+ status_t mLastError;
+ std::string mLastErrorFunc;
};
MediaMetrics mMediaMetrics;
};
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 93ed5f2..045c2c3 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -204,15 +204,14 @@
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
- const audio_format_t prevDownmixerFormat = track->mDownmixRequiresFormat;
const status_t status = track->prepareForDownmix();
ALOGE_IF(status != OK,
"prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
status, track->channelMask, track->mMixerChannelMask);
- if (prevDownmixerFormat != track->mDownmixRequiresFormat) {
- track->prepareForReformat(); // because of downmixer, track format may change!
- }
+ // always do reformat since channel mask changed,
+ // do it after downmix since track format may change!
+ track->prepareForReformat();
if (track->mResampler.get() != nullptr && mixerChannelCountChanged) {
// resampler channels may have changed.
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 8dae251..01f014f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -271,17 +271,43 @@
/////////////////////////////////////////////////////////////////////////
+struct HeifDecoderImpl::DecodeThread : public Thread {
+ explicit DecodeThread(HeifDecoderImpl *decoder) : mDecoder(decoder) {}
+
+private:
+ HeifDecoderImpl* mDecoder;
+
+ bool threadLoop();
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecodeThread);
+};
+
+bool HeifDecoderImpl::DecodeThread::threadLoop() {
+ return mDecoder->decodeAsync();
+}
+
+/////////////////////////////////////////////////////////////////////////
+
HeifDecoderImpl::HeifDecoderImpl() :
// output color format should always be set via setOutputColor(), in case
// it's not, default to HAL_PIXEL_FORMAT_RGB_565.
mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
mCurScanline(0),
+ mWidth(0),
+ mHeight(0),
mFrameDecoded(false),
mHasImage(false),
- mHasVideo(false) {
+ mHasVideo(false),
+ mAvailableLines(0),
+ mNumSlices(1),
+ mSliceHeight(0),
+ mAsyncDecodeDone(false) {
}
HeifDecoderImpl::~HeifDecoderImpl() {
+ if (mThread != nullptr) {
+ mThread->join();
+ }
}
bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
@@ -310,22 +336,23 @@
mHasImage = hasImage && !strcasecmp(hasImage, "yes");
mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
+ sp<IMemory> sharedMem;
if (mHasImage) {
// image index < 0 to retrieve primary image
- mFrameMemory = mRetriever->getImageAtIndex(
+ sharedMem = mRetriever->getImageAtIndex(
-1, mOutputColor, true /*metaOnly*/);
} else if (mHasVideo) {
- mFrameMemory = mRetriever->getFrameAtTime(0,
+ sharedMem = mRetriever->getFrameAtTime(0,
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
mOutputColor, true /*metaOnly*/);
}
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@@ -344,6 +371,14 @@
videoFrame->mIccSize,
videoFrame->getFlattenedIccData());
}
+ mWidth = videoFrame->mWidth;
+ mHeight = videoFrame->mHeight;
+ if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
+ // Try decoding in slices only if the image has tiles and is big enough.
+ mSliceHeight = videoFrame->mTileHeight;
+ mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
+ ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
+ }
return true;
}
@@ -376,6 +411,36 @@
return false;
}
+bool HeifDecoderImpl::decodeAsync() {
+ for (size_t i = 1; i < mNumSlices; i++) {
+ ALOGV("decodeAsync(): decoding slice %zu", i);
+ size_t top = i * mSliceHeight;
+ size_t bottom = (i + 1) * mSliceHeight;
+ if (bottom > mHeight) {
+ bottom = mHeight;
+ }
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, top, mWidth, bottom);
+ {
+ Mutex::Autolock autolock(mLock);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ mAsyncDecodeDone = true;
+ mScanlineReady.signal();
+ break;
+ }
+ mFrameMemory = frameMemory;
+ mAvailableLines = bottom;
+ ALOGV("decodeAsync(): available lines %zu", mAvailableLines);
+ mScanlineReady.signal();
+ }
+ }
+ // Aggressive clear to avoid holding on to resources
+ mRetriever.clear();
+ mDataSource.clear();
+ return false;
+}
+
bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
// reset scanline pointer
mCurScanline = 0;
@@ -384,6 +449,47 @@
return true;
}
+ // See if we want to decode in slices to allow client to start
+ // scanline processing in parallel with decode. If this fails
+ // we fallback to decoding the full frame.
+ if (mHasImage && mNumSlices > 1) {
+ // get first slice and metadata
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, 0, mWidth, mSliceHeight);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ ALOGE("decode: metadata is a nullptr");
+ return false;
+ }
+
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+
+ if (frameInfo != nullptr) {
+ frameInfo->set(
+ videoFrame->mWidth,
+ videoFrame->mHeight,
+ videoFrame->mRotationAngle,
+ videoFrame->mBytesPerPixel,
+ videoFrame->mIccSize,
+ videoFrame->getFlattenedIccData());
+ }
+
+ mFrameMemory = frameMemory;
+ mAvailableLines = mSliceHeight;
+ mThread = new DecodeThread(this);
+ if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
+ mFrameDecoded = true;
+ return true;
+ }
+
+ // Fallback to decode without slicing
+ mThread.clear();
+ mNumSlices = 1;
+ mSliceHeight = 0;
+ mAvailableLines = 0;
+ mFrameMemory.clear();
+ }
+
if (mHasImage) {
// image index < 0 to retrieve primary image
mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
@@ -393,14 +499,14 @@
}
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- ALOGE("getFrameAtTime: videoFrame is a nullptr");
+ ALOGE("decode: videoFrame is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
- ALOGE("getFrameAtTime: videoFrame size is invalid");
+ ALOGE("decode: videoFrame size is invalid");
return false;
}
@@ -424,36 +530,45 @@
}
mFrameDecoded = true;
- // Aggressive clear to avoid holding on to resources
+ // Aggressively clear to avoid holding on to resources
mRetriever.clear();
mDataSource.clear();
return true;
}
-bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
- if (mCurScanline >= videoFrame->mHeight) {
- ALOGE("no more scanline available");
- return false;
- }
uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
return true;
}
-size_t HeifDecoderImpl::skipScanlines(size_t count) {
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- return 0;
+bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+ if (mCurScanline >= mHeight) {
+ ALOGE("no more scanline available");
+ return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ if (mNumSlices > 1) {
+ Mutex::Autolock autolock(mLock);
+
+ while (!mAsyncDecodeDone && mCurScanline >= mAvailableLines) {
+ mScanlineReady.wait(mLock);
+ }
+ return (mCurScanline < mAvailableLines) ? getScanlineInner(dst) : false;
+ }
+
+ return getScanlineInner(dst);
+}
+
+size_t HeifDecoderImpl::skipScanlines(size_t count) {
uint32_t oldScanline = mCurScanline;
mCurScanline += count;
- if (mCurScanline > videoFrame->mHeight) {
- mCurScanline = videoFrame->mHeight;
+ if (mCurScanline > mHeight) {
+ mCurScanline = mHeight;
}
return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
}
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 406c2c1..528ee3b 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -19,6 +19,8 @@
#include "include/HeifDecoderAPI.h"
#include <system/graphics.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
#include <utils/RefBase.h>
namespace android {
@@ -49,14 +51,30 @@
size_t skipScanlines(size_t count) override;
private:
+ struct DecodeThread;
+
sp<IDataSource> mDataSource;
sp<MediaMetadataRetriever> mRetriever;
sp<IMemory> mFrameMemory;
android_pixel_format_t mOutputColor;
size_t mCurScanline;
+ uint32_t mWidth;
+ uint32_t mHeight;
bool mFrameDecoded;
bool mHasImage;
bool mHasVideo;
+
+ // Slice decoding only
+ Mutex mLock;
+ Condition mScanlineReady;
+ sp<DecodeThread> mThread;
+ size_t mAvailableLines;
+ size_t mNumSlices;
+ uint32_t mSliceHeight;
+ bool mAsyncDecodeDone;
+
+ bool decodeAsync();
+ bool getScanlineInner(uint8_t* dst);
};
} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 214117b..590ba1a 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -69,6 +69,7 @@
SET_DATA_SOURCE_CALLBACK,
GET_FRAME_AT_TIME,
GET_IMAGE_AT_INDEX,
+ GET_IMAGE_RECT_AT_INDEX,
GET_FRAME_AT_INDEX,
EXTRACT_ALBUM_ART,
EXTRACT_METADATA,
@@ -187,6 +188,30 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom)
+ {
+ ALOGV("getImageRectAtIndex: index %d, colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(index);
+ data.writeInt32(colorFormat);
+ data.writeInt32(left);
+ data.writeInt32(top);
+ data.writeInt32(right);
+ data.writeInt32(bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_IMAGE_RECT_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return NULL;
+ }
+ return interface_cast<IMemory>(reply.readStrongBinder());
+ }
+
status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly)
{
@@ -375,6 +400,34 @@
#endif
return NO_ERROR;
} break;
+
+ case GET_IMAGE_RECT_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int index = data.readInt32();
+ int colorFormat = data.readInt32();
+ int left = data.readInt32();
+ int top = data.readInt32();
+ int right = data.readInt32();
+ int bottom = data.readInt32();
+ ALOGV("getImageRectAtIndex: index(%d), colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ sp<IMemory> bitmap = getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (bitmap != 0) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(bitmap));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
+
case GET_FRAME_AT_INDEX: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
int frameIndex = data.readInt32();
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index 1a04552..c6f422d 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -46,6 +46,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 992e230..98d300f 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -47,6 +47,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
@@ -54,27 +56,6 @@
virtual const char* extractMetadata(int keyCode) = 0;
};
-// MediaMetadataRetrieverInterface
-class MediaMetadataRetrieverInterface : public MediaMetadataRetrieverBase
-{
-public:
- MediaMetadataRetrieverInterface() {}
-
- virtual ~MediaMetadataRetrieverInterface() {}
- virtual sp<IMemory> getFrameAtTime(
- int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
- { return NULL; }
- virtual sp<IMemory> getImageAtIndex(
- int /*index*/, int /*colorFormat*/, bool /*metaOnly*/, bool /*thumbnail*/)
- { return NULL; }
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> >* /*frames*/,
- int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
- { return ERROR_UNSUPPORTED; }
- virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
- virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
-};
-
}; // namespace android
#endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 4cdeeb7..cdef637 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -91,6 +91,8 @@
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> getImageAtIndex(int index,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index c10a907..e61b04d 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -166,6 +166,19 @@
return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
}
+sp<IMemory> MediaMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ return mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+}
+
status_t MediaMetadataRetriever::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 672b832..40b17bf 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -48,7 +48,6 @@
{
ALOGV("MetadataRetrieverClient constructor pid(%d)", pid);
mPid = pid;
- mThumbnail = NULL;
mAlbumArt = NULL;
mRetriever = NULL;
}
@@ -77,7 +76,6 @@
ALOGV("disconnect from pid %d", mPid);
Mutex::Autolock lock(mLock);
mRetriever.clear();
- mThumbnail.clear();
mAlbumArt.clear();
IPCThreadState::self()->flushCommands();
}
@@ -201,7 +199,6 @@
(long long)timeUs, option, colorFormat, metaOnly);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
- mThumbnail.clear();
if (mRetriever == NULL) {
ALOGE("retriever is not initialized");
return NULL;
@@ -216,11 +213,10 @@
sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
- ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
- mThumbnail.clear();
if (mRetriever == NULL) {
ALOGE("retriever is not initialized");
return NULL;
@@ -233,6 +229,25 @@
return frame;
}
+sp<IMemory> MetadataRetrieverClient::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ sp<IMemory> frame = mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (frame == NULL) {
+ ALOGE("failed to extract image");
+ return NULL;
+ }
+ return frame;
+}
+
status_t MetadataRetrieverClient::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index e774c8f..272d093 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -54,6 +54,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
@@ -73,9 +75,8 @@
sp<MediaMetadataRetrieverBase> mRetriever;
pid_t mPid;
- // Keep the shared memory copy of album art and capture frame (for thumbnail)
+ // Keep the shared memory copy of album art
sp<IMemory> mAlbumArt;
- sp<IMemory> mThumbnail;
};
}; // namespace android
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index 2486b76..35a43d8 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -26,7 +26,7 @@
PipeReader::PipeReader(Pipe& pipe) :
NBAIO_Source(pipe.mFormat),
- mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, true /*flush*/),
+ mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, false /*flush*/),
mFramesOverrun(0),
mOverruns(0)
{
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 09a8be5..29a219f 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -42,9 +42,9 @@
static const int64_t kBufferTimeOutUs = 10000ll; // 10 msec
static const size_t kRetryCount = 50; // must be >0
-//static
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
- int32_t width, int32_t height, int32_t dstBpp, bool metaOnly = false) {
+ int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+ int32_t dstBpp, bool metaOnly = false) {
int32_t rotationAngle;
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
@@ -75,7 +75,7 @@
}
VideoFrame frame(width, height, displayWidth, displayHeight,
- rotationAngle, dstBpp, !metaOnly, iccSize);
+ tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
size_t size = frame.getFlattenedSize();
sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
@@ -94,7 +94,6 @@
return frameMem;
}
-//static
bool findThumbnailInfo(
const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
@@ -107,30 +106,15 @@
type ?: &dummyType, data ?: &dummyData, size ?: &dummySize);
}
-//static
-sp<IMemory> FrameDecoder::getMetadataOnly(
- const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
- OMX_COLOR_FORMATTYPE dstFormat;
- int32_t dstBpp;
- if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
- return NULL;
- }
-
- int32_t width, height;
- if (thumbnail) {
- if (!findThumbnailInfo(trackMeta, &width, &height)) {
- return NULL;
- }
- } else {
- CHECK(trackMeta->findInt32(kKeyWidth, &width));
- CHECK(trackMeta->findInt32(kKeyHeight, &height));
- }
- return allocVideoFrame(trackMeta, width, height, dstBpp, true /*metaOnly*/);
+bool findGridInfo(const sp<MetaData> &trackMeta,
+ int32_t *tileWidth, int32_t *tileHeight, int32_t *gridRows, int32_t *gridCols) {
+ return trackMeta->findInt32(kKeyTileWidth, tileWidth) && (*tileWidth > 0)
+ && trackMeta->findInt32(kKeyTileHeight, tileHeight) && (*tileHeight > 0)
+ && trackMeta->findInt32(kKeyGridRows, gridRows) && (*gridRows > 0)
+ && trackMeta->findInt32(kKeyGridCols, gridCols) && (*gridCols > 0);
}
-//static
-bool FrameDecoder::getDstColorFormat(
+bool getDstColorFormat(
android_pixel_format_t colorFormat,
OMX_COLOR_FORMATTYPE *dstFormat,
int32_t *dstBpp) {
@@ -162,46 +146,63 @@
return false;
}
-sp<IMemory> FrameDecoder::extractFrame(
- int64_t frameTimeUs, int option, int colorFormat) {
+//static
+sp<IMemory> FrameDecoder::getMetadataOnly(
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+ OMX_COLOR_FORMATTYPE dstFormat;
+ int32_t dstBpp;
if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
+ (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
return NULL;
}
- status_t err = extractInternal(frameTimeUs, 1, option);
- if (err != OK) {
- return NULL;
- }
+ int32_t width, height, tileWidth = 0, tileHeight = 0;
+ if (thumbnail) {
+ if (!findThumbnailInfo(trackMeta, &width, &height)) {
+ return NULL;
+ }
+ } else {
+ CHECK(trackMeta->findInt32(kKeyWidth, &width));
+ CHECK(trackMeta->findInt32(kKeyHeight, &height));
- return mFrames.size() > 0 ? mFrames[0] : NULL;
+ int32_t gridRows, gridCols;
+ if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+ tileWidth = tileHeight = 0;
+ }
+ }
+ return allocVideoFrame(trackMeta,
+ width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
}
-status_t FrameDecoder::extractFrames(
- int64_t frameTimeUs, size_t numFrames, int option, int colorFormat,
- std::vector<sp<IMemory> >* frames) {
+FrameDecoder::FrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : mComponentName(componentName),
+ mTrackMeta(trackMeta),
+ mSource(source),
+ mDstFormat(OMX_COLOR_Format16bitRGB565),
+ mDstBpp(2),
+ mHaveMoreInputs(true),
+ mFirstSample(true) {
+}
+
+FrameDecoder::~FrameDecoder() {
+ if (mDecoder != NULL) {
+ mDecoder->release();
+ mSource->stop();
+ }
+}
+
+status_t FrameDecoder::init(
+ int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
if (!getDstColorFormat(
(android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
return ERROR_UNSUPPORTED;
}
- status_t err = extractInternal(frameTimeUs, numFrames, option);
- if (err != OK) {
- return err;
- }
-
- for (size_t i = 0; i < mFrames.size(); i++) {
- frames->push_back(mFrames[i]);
- }
- return OK;
-}
-
-status_t FrameDecoder::extractInternal(
- int64_t frameTimeUs, size_t numFrames, int option) {
-
- MediaSource::ReadOptions options;
sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
- frameTimeUs, numFrames, option, &options);
+ frameTimeUs, numFrames, option, &mReadOptions);
if (videoFormat == NULL) {
ALOGE("video format or seek mode not supported");
return ERROR_UNSUPPORTED;
@@ -217,7 +218,8 @@
return (decoder.get() == NULL) ? NO_MEMORY : err;
}
- err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+ err = decoder->configure(
+ videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
if (err != OK) {
ALOGW("configure returned error %d (%s)", err, asString(err));
decoder->release();
@@ -237,43 +239,49 @@
decoder->release();
return err;
}
+ mDecoder = decoder;
- Vector<sp<MediaCodecBuffer> > inputBuffers;
- err = decoder->getInputBuffers(&inputBuffers);
+ return OK;
+}
+
+sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
+ status_t err = onExtractRect(rect);
+ if (err == OK) {
+ err = extractInternal();
+ }
if (err != OK) {
- ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
- decoder->release();
- mSource->stop();
+ return NULL;
+ }
+
+ return mFrames.size() > 0 ? mFrames[0] : NULL;
+}
+
+status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
+ status_t err = extractInternal();
+ if (err != OK) {
return err;
}
- Vector<sp<MediaCodecBuffer> > outputBuffers;
- err = decoder->getOutputBuffers(&outputBuffers);
- if (err != OK) {
- ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
- decoder->release();
- mSource->stop();
- return err;
+ for (size_t i = 0; i < mFrames.size(); i++) {
+ frames->push_back(mFrames[i]);
}
+ return OK;
+}
- sp<AMessage> outputFormat = NULL;
- bool haveMoreInputs = true;
- size_t index, offset, size;
- int64_t timeUs;
- size_t retriesLeft = kRetryCount;
+status_t FrameDecoder::extractInternal() {
+ status_t err = OK;
bool done = false;
- bool firstSample = true;
+ size_t retriesLeft = kRetryCount;
do {
- size_t inputIndex = -1;
+ size_t index;
int64_t ptsUs = 0ll;
uint32_t flags = 0;
- sp<MediaCodecBuffer> codecBuffer = NULL;
// Queue as many inputs as we possibly can, then block on dequeuing
// outputs. After getting each output, come back and queue the inputs
// again to keep the decoder busy.
- while (haveMoreInputs) {
- err = decoder->dequeueInputBuffer(&inputIndex, 0);
+ while (mHaveMoreInputs) {
+ err = mDecoder->dequeueInputBuffer(&index, 0);
if (err != OK) {
ALOGV("Timed out waiting for input");
if (retriesLeft) {
@@ -281,16 +289,21 @@
}
break;
}
- codecBuffer = inputBuffers[inputIndex];
+ sp<MediaCodecBuffer> codecBuffer;
+ err = mDecoder->getInputBuffer(index, &codecBuffer);
+ if (err != OK) {
+ ALOGE("failed to get input buffer %zu", index);
+ break;
+ }
MediaBufferBase *mediaBuffer = NULL;
- err = mSource->read(&mediaBuffer, &options);
- options.clearSeekTo();
+ err = mSource->read(&mediaBuffer, &mReadOptions);
+ mReadOptions.clearSeekTo();
if (err != OK) {
ALOGW("Input Error or EOS");
- haveMoreInputs = false;
- if (!firstSample && err == ERROR_END_OF_STREAM) {
+ mHaveMoreInputs = false;
+ if (!mFirstSample && err == ERROR_END_OF_STREAM) {
err = OK;
}
break;
@@ -299,7 +312,7 @@
if (mediaBuffer->range_length() > codecBuffer->capacity()) {
ALOGE("buffer size (%zu) too large for codec input size (%zu)",
mediaBuffer->range_length(), codecBuffer->capacity());
- haveMoreInputs = false;
+ mHaveMoreInputs = false;
err = BAD_VALUE;
} else {
codecBuffer->setRange(0, mediaBuffer->range_length());
@@ -309,45 +322,46 @@
(const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
mediaBuffer->range_length());
- onInputReceived(codecBuffer, mediaBuffer->meta_data(), firstSample, &flags);
- firstSample = false;
+ onInputReceived(codecBuffer, mediaBuffer->meta_data(), mFirstSample, &flags);
+ mFirstSample = false;
}
mediaBuffer->release();
- if (haveMoreInputs && inputIndex < inputBuffers.size()) {
+ if (mHaveMoreInputs) {
ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
codecBuffer->size(), ptsUs, flags);
- err = decoder->queueInputBuffer(
- inputIndex,
+ err = mDecoder->queueInputBuffer(
+ index,
codecBuffer->offset(),
codecBuffer->size(),
ptsUs,
flags);
if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- haveMoreInputs = false;
+ mHaveMoreInputs = false;
}
}
}
while (err == OK) {
+ size_t offset, size;
// wait for a decoded buffer
- err = decoder->dequeueOutputBuffer(
+ err = mDecoder->dequeueOutputBuffer(
&index,
&offset,
&size,
- &timeUs,
+ &ptsUs,
&flags,
kBufferTimeOutUs);
if (err == INFO_FORMAT_CHANGED) {
ALOGV("Received format change");
- err = decoder->getOutputFormat(&outputFormat);
+ err = mDecoder->getOutputFormat(&mOutputFormat);
} else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
ALOGV("Output buffers changed");
- err = decoder->getOutputBuffers(&outputBuffers);
+ err = OK;
} else {
if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
@@ -355,12 +369,15 @@
} else if (err == OK) {
// If we're seeking with CLOSEST option and obtained a valid targetTimeUs
// from the extractor, decode to the specified frame. Otherwise we're done.
- ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
- sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
-
- err = onOutputReceived(videoFrameBuffer, outputFormat, timeUs, &done);
-
- decoder->releaseOutputBuffer(index);
+ ALOGV("Received an output buffer, timeUs=%lld", (long long)ptsUs);
+ sp<MediaCodecBuffer> videoFrameBuffer;
+ err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
+ if (err != OK) {
+ ALOGE("failed to get output buffer %zu", index);
+ break;
+ }
+ err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ mDecoder->releaseOutputBuffer(index);
} else {
ALOGW("Received error %d (%s) instead of output", err, asString(err));
done = true;
@@ -370,9 +387,6 @@
}
} while (err == OK && !done);
- mSource->stop();
- decoder->release();
-
if (err != OK) {
ALOGE("failed to get video frame (err %d)", err);
}
@@ -380,6 +394,20 @@
return err;
}
+//////////////////////////////////////////////////////////////////////
+
+VideoFrameDecoder::VideoFrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : FrameDecoder(componentName, trackMeta, source),
+ mIsAvcOrHevc(false),
+ mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
+ mTargetTimeUs(-1ll),
+ mNumFrames(0),
+ mNumFramesDecoded(0) {
+}
+
sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
@@ -485,6 +513,8 @@
trackMeta(),
(crop_right - crop_left + 1),
(crop_bottom - crop_top + 1),
+ 0,
+ 0,
dstBpp());
addFrame(frameMem);
VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
@@ -511,33 +541,50 @@
return ERROR_UNSUPPORTED;
}
+////////////////////////////////////////////////////////////////////////
+
+ImageDecoder::ImageDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : FrameDecoder(componentName, trackMeta, source),
+ mFrame(NULL),
+ mWidth(0),
+ mHeight(0),
+ mGridRows(1),
+ mGridCols(1),
+ mTileWidth(0),
+ mTileHeight(0),
+ mTilesDecoded(0),
+ mTargetTiles(0) {
+}
+
sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
int64_t frameTimeUs, size_t /*numFrames*/,
int /*seekMode*/, MediaSource::ReadOptions *options) {
sp<MetaData> overrideMeta;
- mThumbnail = false;
if (frameTimeUs < 0) {
uint32_t type;
const void *data;
size_t size;
- int32_t thumbWidth, thumbHeight;
// if we have a stand-alone thumbnail, set up the override meta,
// and set seekTo time to -1.
- if (!findThumbnailInfo(trackMeta(),
- &thumbWidth, &thumbHeight, &type, &data, &size)) {
+ if (!findThumbnailInfo(trackMeta(), &mWidth, &mHeight, &type, &data, &size)) {
ALOGE("Thumbnail not available");
return NULL;
}
overrideMeta = new MetaData(*(trackMeta()));
overrideMeta->remove(kKeyDisplayWidth);
overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, thumbWidth);
- overrideMeta->setInt32(kKeyHeight, thumbHeight);
+ overrideMeta->setInt32(kKeyWidth, mWidth);
+ overrideMeta->setInt32(kKeyHeight, mHeight);
overrideMeta->setData(kKeyHVCC, type, data, size);
options->setSeekTo(-1);
- mThumbnail = true;
} else {
+ CHECK(trackMeta()->findInt32(kKeyWidth, &mWidth));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &mHeight));
+
options->setSeekTo(frameTimeUs);
}
@@ -545,32 +592,28 @@
if (overrideMeta == NULL) {
// check if we're dealing with a tiled heif
int32_t tileWidth, tileHeight, gridRows, gridCols;
- if (trackMeta()->findInt32(kKeyTileWidth, &tileWidth) && tileWidth > 0
- && trackMeta()->findInt32(kKeyTileHeight, &tileHeight) && tileHeight > 0
- && trackMeta()->findInt32(kKeyGridRows, &gridRows) && gridRows > 0
- && trackMeta()->findInt32(kKeyGridCols, &gridCols) && gridCols > 0) {
- int32_t width, height;
- CHECK(trackMeta()->findInt32(kKeyWidth, &width));
- CHECK(trackMeta()->findInt32(kKeyHeight, &height));
-
- if (width <= tileWidth * gridCols && height <= tileHeight * gridRows) {
+ if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+ if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
- gridCols, gridRows, tileWidth, tileHeight, width, height);
+ gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
overrideMeta = new MetaData(*(trackMeta()));
overrideMeta->setInt32(kKeyWidth, tileWidth);
overrideMeta->setInt32(kKeyHeight, tileHeight);
+ mTileWidth = tileWidth;
+ mTileHeight = tileHeight;
mGridCols = gridCols;
mGridRows = gridRows;
} else {
- ALOGE("bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
- gridCols, gridRows, tileWidth, tileHeight, width, height);
+ ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
+ gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
}
}
if (overrideMeta == NULL) {
overrideMeta = trackMeta();
}
}
+ mTargetTiles = mGridCols * mGridRows;
sp<AMessage> videoFormat;
if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
@@ -589,6 +632,45 @@
return videoFormat;
}
+status_t ImageDecoder::onExtractRect(FrameRect *rect) {
+ // TODO:
+ // This callback is for verifying whether we can decode the rect,
+ // and if so, set up the internal variables for decoding.
+ // Currently, rect decoding is restricted to sequentially decoding one
+ // row of tiles at a time. We can't decode arbitrary rects, as the image
+ // track doesn't yet support seeking by tiles. So all we do here is to
+ // verify the rect against what we expect.
+ // When seeking by tile is supported, this code should be updated to
+ // set the seek parameters.
+ if (rect == NULL) {
+ if (mTilesDecoded > 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ mTargetTiles = mGridRows * mGridCols;
+ return OK;
+ }
+
+ if (mTileWidth <= 0 || mTileHeight <=0) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ int32_t row = mTilesDecoded / mGridCols;
+ int32_t expectedTop = row * mTileHeight;
+ int32_t expectedBot = (row + 1) * mTileHeight;
+ if (expectedBot > mHeight) {
+ expectedBot = mHeight;
+ }
+ if (rect->left != 0 || rect->top != expectedTop
+ || rect->right != mWidth || rect->bottom != expectedBot) {
+ ALOGE("currently only support sequential decoding of slices");
+ return ERROR_UNSUPPORTED;
+ }
+
+ // advance one row
+ mTargetTiles = mTilesDecoded + mGridCols;
+ return OK;
+}
+
status_t ImageDecoder::onOutputReceived(
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
@@ -600,17 +682,9 @@
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
- int32_t imageWidth, imageHeight;
- if (mThumbnail) {
- CHECK(trackMeta()->findInt32(kKeyThumbnailWidth, &imageWidth));
- CHECK(trackMeta()->findInt32(kKeyThumbnailHeight, &imageHeight));
- } else {
- CHECK(trackMeta()->findInt32(kKeyWidth, &imageWidth));
- CHECK(trackMeta()->findInt32(kKeyHeight, &imageHeight));
- }
-
if (mFrame == NULL) {
- sp<IMemory> frameMem = allocVideoFrame(trackMeta(), imageWidth, imageHeight, dstBpp());
+ sp<IMemory> frameMem = allocVideoFrame(
+ trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
mFrame = static_cast<VideoFrame*>(frameMem->pointer());
addFrame(frameMem);
@@ -622,8 +696,6 @@
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
int32_t dstLeft, dstTop, dstRight, dstBottom;
- int32_t numTiles = mGridRows * mGridCols;
-
dstLeft = mTilesDecoded % mGridCols * width;
dstTop = mTilesDecoded / mGridCols * height;
dstRight = dstLeft + width - 1;
@@ -638,16 +710,16 @@
// apply crop on bottom-right
// TODO: need to move this into the color converter itself.
- if (dstRight >= imageWidth) {
- crop_right = imageWidth - dstLeft - 1;
+ if (dstRight >= mWidth) {
+ crop_right = mWidth - dstLeft - 1;
dstRight = dstLeft + crop_right;
}
- if (dstBottom >= imageHeight) {
- crop_bottom = imageHeight - dstTop - 1;
+ if (dstBottom >= mHeight) {
+ crop_bottom = mHeight - dstTop - 1;
dstBottom = dstTop + crop_bottom;
}
- *done = (++mTilesDecoded >= numTiles);
+ *done = (++mTilesDecoded >= mTargetTiles);
if (converter.isValid()) {
converter.convert(
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cd091a6..9a33168 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -287,7 +287,9 @@
//static
bool MediaCodecList::isSoftwareCodec(const AString &componentName) {
return componentName.startsWithIgnoreCase("OMX.google.")
- || !componentName.startsWithIgnoreCase("OMX.");
+ || componentName.startsWithIgnoreCase("c2.android.")
+ || (!componentName.startsWithIgnoreCase("OMX.")
+ && !componentName.startsWithIgnoreCase("c2."));
}
static int compareSoftwareCodecsFirst(const AString *name1, const AString *name2) {
@@ -298,7 +300,14 @@
return isSoftwareCodec2 - isSoftwareCodec1;
}
- // sort order 2: OMX codecs are first (lower)
+ // sort order 2: Codec 2.0 codecs are first (lower)
+ bool isC2_1 = name1->startsWithIgnoreCase("c2.");
+ bool isC2_2 = name2->startsWithIgnoreCase("c2.");
+ if (isC2_1 != isC2_2) {
+ return isC2_2 - isC2_1;
+ }
+
+ // sort order 3: OMX codecs are first (lower)
bool isOMX1 = name1->startsWithIgnoreCase("OMX.");
bool isOMX2 = name2->startsWithIgnoreCase("OMX.");
return isOMX2 - isOMX1;
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 6920e51..cac53f4 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -222,7 +222,7 @@
AString supportMultipleSecureCodecs = "true";
for (const auto& info : infos) {
AString name = info->getCodecName();
- if (name.startsWith("OMX.google.") ||
+ if (name.startsWith("OMX.google.") || name.startsWith("c2.android.") ||
// TODO: reenable below codecs once fixed
name == "OMX.Intel.VideoDecoder.VP9.hybrid") {
continue;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 5417fef..e80ec3b 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -40,7 +40,8 @@
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
- mAlbumArt(NULL) {
+ mAlbumArt(NULL),
+ mLastImageIndex(-1) {
ALOGV("StagefrightMetadataRetriever()");
}
@@ -126,10 +127,30 @@
sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
-
ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
+ return getImageInternal(index, colorFormat, metaOnly, thumbnail, NULL);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+
+ FrameRect rect = {left, top, right, bottom};
+
+ if (mImageDecoder != NULL && index == mLastImageIndex) {
+ return mImageDecoder->extractFrame(&rect);
+ }
+
+ return getImageInternal(
+ index, colorFormat, false /*metaOnly*/, false /*thumbnail*/, &rect);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
+
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
return NULL;
@@ -192,12 +213,19 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
- ImageDecoder decoder(componentName, trackMeta, source);
- sp<IMemory> frame = decoder.extractFrame(
- thumbnail ? -1 : 0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat);
+ sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
+ int64_t frameTimeUs = thumbnail ? -1 : 0;
+ if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+ sp<IMemory> frame = decoder->extractFrame(rect);
- if (frame != NULL) {
- return frame;
+ if (frame != NULL) {
+ if (rect != NULL) {
+ // keep the decoder if slice decoding
+ mImageDecoder = decoder;
+ mLastImageIndex = index;
+ }
+ return frame;
+ }
}
ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
}
@@ -307,16 +335,17 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
VideoFrameDecoder decoder(componentName, trackMeta, source);
- if (outFrame != NULL) {
- *outFrame = decoder.extractFrame(timeUs, option, colorFormat);
- if (*outFrame != NULL) {
- return OK;
- }
- } else if (outFrames != NULL) {
- status_t err = decoder.extractFrames(
- timeUs, numFrames, option, colorFormat, outFrames);
- if (err == OK) {
- return OK;
+ if (decoder.init(timeUs, numFrames, option, colorFormat) == OK) {
+ if (outFrame != NULL) {
+ *outFrame = decoder.extractFrame();
+ if (*outFrame != NULL) {
+ return OK;
+ }
+ } else if (outFrames != NULL) {
+ status_t err = decoder.extractFrames(outFrames);
+ if (err == OK) {
+ return OK;
+ }
}
}
ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index b86f4ad..0b554a2 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -16,77 +16,77 @@
<Included>
<Decoders>
- <MediaCodec name="c2.google.mp3.decoder" type="audio/mpeg">
+ <MediaCodec name="c2.android.mp3.decoder" type="audio/mpeg">
<Limit name="channel-count" max="2" />
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-320000" />
</MediaCodec>
- <MediaCodec name="c2.google.amrnb.decoder" type="audio/3gpp">
+ <MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
</MediaCodec>
- <MediaCodec name="c2.google.amrwb.decoder" type="audio/amr-wb">
+ <MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
</MediaCodec>
- <MediaCodec name="c2.google.aac.decoder" type="audio/mp4a-latm">
+ <MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-960000" />
</MediaCodec>
- <MediaCodec name="c2.google.g711.alaw.decoder" type="audio/g711-alaw">
+ <MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
</MediaCodec>
- <MediaCodec name="c2.google.g711.mlaw.decoder" type="audio/g711-mlaw">
+ <MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
</MediaCodec>
- <MediaCodec name="c2.google.vorbis.decoder" type="audio/vorbis">
+ <MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-96000" />
<Limit name="bitrate" range="32000-500000" />
</MediaCodec>
- <MediaCodec name="c2.google.opus.decoder" type="audio/opus">
+ <MediaCodec name="c2.android.opus.decoder" type="audio/opus">
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="48000" />
<Limit name="bitrate" range="6000-510000" />
</MediaCodec>
- <MediaCodec name="c2.google.raw.decoder" type="audio/raw">
+ <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-96000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
- <MediaCodec name="c2.google.flac.decoder" type="audio/flac">
+ <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="1-655350" />
<Limit name="bitrate" range="1-21000000" />
</MediaCodec>
</Decoders>
<Encoders>
- <MediaCodec name="c2.google.aac.encoder" type="audio/mp4a-latm">
+ <MediaCodec name="c2.android.aac.encoder" type="audio/mp4a-latm">
<Limit name="channel-count" max="6" />
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<!-- also may support 64000, 88200 and 96000 Hz -->
<Limit name="bitrate" range="8000-960000" />
</MediaCodec>
- <MediaCodec name="c2.google.amrnb.encoder" type="audio/3gpp">
+ <MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
<Feature name="bitrate-modes" value="CBR" />
</MediaCodec>
- <MediaCodec name="c2.google.amrwb.encoder" type="audio/amr-wb">
+ <MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
<Feature name="bitrate-modes" value="CBR" />
</MediaCodec>
- <MediaCodec name="c2.google.flac.encoder" type="audio/flac">
+ <MediaCodec name="c2.android.flac.encoder" type="audio/flac">
<Limit name="channel-count" max="2" />
<Limit name="sample-rate" ranges="1-655350" />
<Limit name="bitrate" range="1-21000000" />
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 593463b..adb45b3 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -16,7 +16,7 @@
<Included>
<Decoders>
- <MediaCodec name="c2.google.mpeg4.decoder" type="video/mp4v-es">
+ <MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
<!-- profiles and levels: ProfileSimple : Level3 -->
<Limit name="size" min="2x2" max="352x288" />
<Limit name="alignment" value="2x2" />
@@ -25,7 +25,7 @@
<Limit name="bitrate" range="1-384000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.google.h263.decoder" type="video/3gpp">
+ <MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
<!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
<Limit name="size" min="2x2" max="352x288" />
@@ -33,7 +33,7 @@
<Limit name="bitrate" range="1-384000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.google.avc.decoder" type="video/avc">
+ <MediaCodec name="c2.android.avc.decoder" type="video/avc">
<!-- profiles and levels: ProfileHigh : Level52 -->
<Limit name="size" min="2x2" max="4080x4080" />
<Limit name="alignment" value="2x2" />
@@ -43,7 +43,7 @@
<Limit name="bitrate" range="1-48000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.google.hevc.decoder" type="video/hevc">
+ <MediaCodec name="c2.android.hevc.decoder" type="video/hevc">
<!-- profiles and levels: ProfileMain : MainTierLevel51 -->
<Limit name="size" min="2x2" max="4096x4096" />
<Limit name="alignment" value="2x2" />
@@ -53,7 +53,7 @@
<Limit name="bitrate" range="1-10000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
@@ -62,7 +62,7 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
@@ -74,13 +74,13 @@
</Decoders>
<Encoders>
- <MediaCodec name="c2.google.h263.encoder" type="video/3gpp">
+ <MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
<!-- profiles and levels: ProfileBaseline : Level45 -->
<Limit name="size" min="176x144" max="176x144" />
<Limit name="alignment" value="16x16" />
<Limit name="bitrate" range="1-128000" />
</MediaCodec>
- <MediaCodec name="c2.google.avc.encoder" type="video/avc">
+ <MediaCodec name="c2.android.avc.encoder" type="video/avc">
<!-- profiles and levels: ProfileBaseline : Level41 -->
<Limit name="size" min="16x16" max="2048x2048" />
<Limit name="alignment" value="2x2" />
@@ -90,7 +90,7 @@
<Limit name="bitrate" range="1-12000000" />
<Feature name="intra-refresh" />
</MediaCodec>
- <MediaCodec name="c2.google.mpeg4.encoder" type="video/mp4v-es">
+ <MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
<!-- profiles and levels: ProfileCore : Level2 -->
<Limit name="size" min="16x16" max="176x144" />
<Limit name="alignment" value="16x16" />
@@ -98,7 +98,7 @@
<Limit name="blocks-per-second" range="12-1485" />
<Limit name="bitrate" range="1-64000" />
</MediaCodec>
- <MediaCodec name="c2.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8">
<!-- profiles and levels: ProfileMain : Level_Version0-3 -->
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
@@ -108,7 +108,7 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="bitrate-modes" value="VBR,CBR" />
</MediaCodec>
- <MediaCodec name="c2.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+ <MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9">
<!-- profiles and levels: ProfileMain : Level_Version0-3 -->
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index f6d4727..dc58c15 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -32,32 +32,30 @@
class MediaCodecBuffer;
class IMediaSource;
class VideoFrame;
+struct MediaCodec;
-struct FrameDecoder {
+struct FrameRect {
+ int32_t left, top, right, bottom;
+};
+
+struct FrameDecoder : public RefBase {
FrameDecoder(
const AString &componentName,
const sp<MetaData> &trackMeta,
- const sp<IMediaSource> &source) :
- mComponentName(componentName),
- mTrackMeta(trackMeta),
- mSource(source),
- mDstFormat(OMX_COLOR_Format16bitRGB565),
- mDstBpp(2) {}
+ const sp<IMediaSource> &source);
- sp<IMemory> extractFrame(int64_t frameTimeUs, int option, int colorFormat);
+ status_t init(
+ int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
- status_t extractFrames(
- int64_t frameTimeUs,
- size_t numFrames,
- int option,
- int colorFormat,
- std::vector<sp<IMemory> >* frames);
+ sp<IMemory> extractFrame(FrameRect *rect = NULL);
+
+ status_t extractFrames(std::vector<sp<IMemory> >* frames);
static sp<IMemory> getMetadataOnly(
const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
protected:
- virtual ~FrameDecoder() {}
+ virtual ~FrameDecoder();
virtual sp<AMessage> onGetFormatAndSeekOptions(
int64_t frameTimeUs,
@@ -65,6 +63,8 @@
int seekMode,
MediaSource::ReadOptions *options) = 0;
+ virtual status_t onExtractRect(FrameRect *rect) = 0;
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@@ -92,13 +92,13 @@
OMX_COLOR_FORMATTYPE mDstFormat;
int32_t mDstBpp;
std::vector<sp<IMemory> > mFrames;
+ MediaSource::ReadOptions mReadOptions;
+ sp<MediaCodec> mDecoder;
+ sp<AMessage> mOutputFormat;
+ bool mHaveMoreInputs;
+ bool mFirstSample;
- static bool getDstColorFormat(
- android_pixel_format_t colorFormat,
- OMX_COLOR_FORMATTYPE *dstFormat,
- int32_t *dstBpp);
-
- status_t extractInternal(int64_t frameTimeUs, size_t numFrames, int option);
+ status_t extractInternal();
DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
};
@@ -107,13 +107,7 @@
VideoFrameDecoder(
const AString &componentName,
const sp<MetaData> &trackMeta,
- const sp<IMediaSource> &source) :
- FrameDecoder(componentName, trackMeta, source),
- mIsAvcOrHevc(false),
- mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
- mTargetTimeUs(-1ll),
- mNumFrames(0),
- mNumFramesDecoded(0) {}
+ const sp<IMediaSource> &source);
protected:
virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -122,6 +116,11 @@
int seekMode,
MediaSource::ReadOptions *options) override;
+ virtual status_t onExtractRect(FrameRect *rect) override {
+ // Rect extraction for sequences is not supported for now.
+ return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
+ }
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@@ -146,10 +145,7 @@
ImageDecoder(
const AString &componentName,
const sp<MetaData> &trackMeta,
- const sp<IMediaSource> &source) :
- FrameDecoder(componentName, trackMeta, source),
- mFrame(NULL), mGridRows(1), mGridCols(1),
- mTilesDecoded(0), mThumbnail(false) {}
+ const sp<IMediaSource> &source);
protected:
virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -158,6 +154,8 @@
int seekMode,
MediaSource::ReadOptions *options) override;
+ virtual status_t onExtractRect(FrameRect *rect) override;
+
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer __unused,
MetaDataBase &sampleMeta __unused,
@@ -172,10 +170,14 @@
private:
VideoFrame *mFrame;
+ int32_t mWidth;
+ int32_t mHeight;
int32_t mGridRows;
int32_t mGridCols;
+ int32_t mTileWidth;
+ int32_t mTileHeight;
int32_t mTilesDecoded;
- bool mThumbnail;
+ int32_t mTargetTiles;
};
} // namespace android
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 209f850..f78e125 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -27,8 +27,10 @@
class DataSource;
class MediaExtractor;
+struct ImageDecoder;
+struct FrameRect;
-struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
+struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
StagefrightMetadataRetriever();
virtual ~StagefrightMetadataRetriever();
@@ -44,6 +46,8 @@
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
@@ -59,6 +63,8 @@
KeyedVector<int, String8> mMetaData;
MediaAlbumArt *mAlbumArt;
+ sp<ImageDecoder> mImageDecoder;
+ int mLastImageIndex;
void parseMetaData();
// Delete album art and clear metadata.
void clearMetadata();
@@ -66,6 +72,8 @@
status_t getFrameInternal(
int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
+ virtual sp<IMemory> getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 3eb98f3..3e6942b 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -62,6 +62,7 @@
"libmedia_omx",
"libstagefright_foundation",
"libstagefright_xmlparser",
+ "libutils",
],
cflags: [
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
index baa7b81..5a46b26 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
@@ -22,6 +22,8 @@
#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
namespace android {
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 2b993ee..fc2dbbb 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -64,7 +64,7 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
- void setSilenced(bool silenced) { mSilenced = silenced; }
+ void setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
bool isSilenced() const { return mSilenced; }
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c47aa01..dcad866 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -4169,16 +4169,31 @@
// buffer size, then write 0s to the output
if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
- if (mSleepTimeUs < kMinThreadSleepTimeUs) {
- mSleepTimeUs = kMinThreadSleepTimeUs;
- }
- // reduce sleep time in case of consecutive application underruns to avoid
- // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
- // duration we would end up writing less data than needed by the audio HAL if
- // the condition persists.
- if (sleepTimeShift < kMaxThreadSleepTimeShift) {
- sleepTimeShift++;
+ if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
+ // Using the Monopipe availableToWrite, we estimate the
+ // sleep time to retry for more data (before we underrun).
+ MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
+ const ssize_t availableToWrite = mPipeSink->availableToWrite();
+ const size_t pipeFrames = monoPipe->maxFrames();
+ const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
+ // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
+ const size_t framesDelay = std::min(
+ mNormalFrameCount, max(framesLeft / 2, mFrameCount));
+ ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
+ pipeFrames, framesLeft, framesDelay);
+ mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
+ } else {
+ mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
+ if (mSleepTimeUs < kMinThreadSleepTimeUs) {
+ mSleepTimeUs = kMinThreadSleepTimeUs;
+ }
+ // reduce sleep time in case of consecutive application underruns to avoid
+ // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+ // duration we would end up writing less data than needed by the audio HAL if
+ // the condition persists.
+ if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+ sleepTimeShift++;
+ }
}
} else {
mSleepTimeUs = mIdleSleepTimeUs;
@@ -6614,11 +6629,16 @@
if (framesRead != OVERRUN) break;
}
- // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
- // buffer size or at least for 20ms.
- size_t sleepFrames = max(
- min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
- if (framesRead <= (ssize_t) sleepFrames) {
+ const ssize_t availableToRead = mPipeSource->availableToRead();
+ if (availableToRead >= 0) {
+ // PipeSource is the master clock. It is up to the AudioRecord client to keep up.
+ LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
+ "more frames to read than fifo size, %zd > %zu",
+ availableToRead, mPipeFramesP2);
+ const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
+ const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
+ ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
+ mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
}
if (framesRead < 0) {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index aff1239..49552a1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1663,7 +1663,8 @@
mFramesToDrop(0),
mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
mRecordBufferConverter(NULL),
- mFlags(flags)
+ mFlags(flags),
+ mSilenced(false)
{
if (mCblk == NULL) {
return;
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 65571f9..d29cae1 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -25,6 +25,7 @@
libserviceutility \
libaudiopolicymanager \
libmedia_helper \
+ libmediametrics \
libeffectsconfig
LOCAL_STATIC_LIBRARIES := \
@@ -60,6 +61,7 @@
audio_policy_criteria.conf \
LOCAL_C_INCLUDES += frameworks/av/services/audiopolicy/engineconfigurable/include
+LOCAL_C_INCLUDES += frameworks/av/include
LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
@@ -78,6 +80,7 @@
libaudiopolicycomponents
LOCAL_SHARED_LIBRARIES += libmedia_helper
+LOCAL_SHARED_LIBRARIES += libmediametrics
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
LOCAL_SHARED_LIBRARIES += libicuuc libxml2
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 7f09e9b..923c091 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -69,8 +69,12 @@
API_INPUT_CONCURRENCY_NONE = 0,
API_INPUT_CONCURRENCY_CALL = (1 << 0), // Concurrency with a call
API_INPUT_CONCURRENCY_CAPTURE = (1 << 1), // Concurrency with another capture
+ API_INPUT_CONCURRENCY_HOTWORD = (1 << 2), // Concurrency with a hotword
+ API_INPUT_CONCURRENCY_PREEMPT = (1 << 3), // pre-empted someone
+ // NB: preempt is marked on a successful return, others are on failing calls
+ API_INPUT_CONCURRENCY_LAST = (1 << 4),
- API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+ API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_LAST - 1),
};
typedef uint32_t concurrency_type__mask_t;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 264e709..e1467b7 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1878,6 +1878,7 @@
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
ALOGW("startInput(%d) failed: call in progress", input);
+ *concurrency |= API_INPUT_CONCURRENCY_CALL;
return INVALID_OPERATION;
}
@@ -1920,17 +1921,20 @@
ALOGW("startInput(%d) failed for HOTWORD: "
"other input %d already started for HOTWORD",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
return INVALID_OPERATION;
}
} else {
ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
} else {
if (activeSource != AUDIO_SOURCE_HOTWORD) {
ALOGW("startInput(%d) failed: other input %d already started",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
}
@@ -1955,6 +1959,7 @@
audio_session_t activeSession = activeSessions.keyAt(0);
audio_io_handle_t activeHandle = activeDesc->mIoHandle;
SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+ *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
sessions.add(activeSession);
inputDesc->setPreemptedSessions(sessions);
stopInput(activeHandle, activeSession);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index cf24c13..008d655 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -18,8 +18,11 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <media/MediaAnalyticsItem.h>
+
#include "AudioPolicyService.h"
#include "ServiceUtilities.h"
+#include "TypeConverter.h"
namespace android {
@@ -409,6 +412,35 @@
return NO_ERROR;
}
+// this is replicated from frameworks/av/media/libaudioclient/AudioRecord.cpp
+// XXX -- figure out how to put it into a common, shared location
+
+static std::string audioSourceString(audio_source_t value) {
+ std::string source;
+ if (SourceTypeConverter::toString(value, source)) {
+ return source;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+static std::string audioConcurrencyString(AudioPolicyInterface::concurrency_type__mask_t concurrency)
+{
+ char buffer[64]; // oversized
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL) {
+ snprintf(buffer, sizeof(buffer), "%s%s%s%s",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL)? ",call":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE)? ",capture":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_HOTWORD)? ",hotword":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_PREEMPT)? ",preempt":"");
+ } else {
+ snprintf(buffer, sizeof(buffer), ",none");
+ }
+
+ return &buffer[1];
+}
+
status_t AudioPolicyService::startInput(audio_port_handle_t portId, bool *silenced)
{
if (mAudioPolicyManager == NULL) {
@@ -444,6 +476,57 @@
AutoCallerClear acc;
status = mAudioPolicyManager->startInput(
client->input, client->session, *silenced, &concurrency);
+
+ }
+
+ // XXX log them all for a while, during some dogfooding.
+ if (1 || status != NO_ERROR) {
+
+ static constexpr char kAudioPolicy[] = "audiopolicy";
+
+ static constexpr char kAudioPolicyReason[] = "android.media.audiopolicy.reason";
+ static constexpr char kAudioPolicyStatus[] = "android.media.audiopolicy.status";
+ static constexpr char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
+ static constexpr char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
+ static constexpr char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
+ static constexpr char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
+ static constexpr char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
+ static constexpr char kAudioPolicyActiveSession[] = "android.media.audiopolicy.active.session";
+
+ MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
+ if (item != NULL) {
+
+ item->setCString(kAudioPolicyReason, audioConcurrencyString(concurrency).c_str());
+ item->setInt32(kAudioPolicyStatus, status);
+
+ item->setCString(kAudioPolicyRqstSrc, audioSourceString(client->attributes.source).c_str());
+ item->setCString(kAudioPolicyRqstPkg, std::string(String8(client->opPackageName).string()).c_str());
+ item->setInt32(kAudioPolicyRqstSession, client->session);
+
+ // figure out who is active
+ // NB: might the other party have given up the microphone since then? how sure.
+ // perhaps could have given up on it.
+ // we hold mLock, so perhaps we're safe for this looping
+ if (concurrency != AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE) {
+ int count = mAudioRecordClients.size();
+ for (int i = 0; i<count ; i++) {
+ if (portId == mAudioRecordClients.keyAt(i)) {
+ continue;
+ }
+ sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
+ if (other->active) {
+ // keeps the last of the clients marked active
+ item->setCString(kAudioPolicyActiveSrc,
+ audioSourceString(other->attributes.source).c_str());
+ item->setCString(kAudioPolicyActivePkg, std::string(String8(other->opPackageName).string()).c_str());
+ item->setInt32(kAudioPolicyActiveSession, other->session);
+ }
+ }
+ }
+ item->selfrecord();
+ delete item;
+ item = NULL;
+ }
}
if (status == NO_ERROR) {
@@ -457,6 +540,8 @@
if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
//TODO: check concurrent capture permission
}
+
+ client->active = true;
} else {
finishRecording(client->opPackageName, client->uid);
}
@@ -477,6 +562,8 @@
}
sp<AudioRecordClient> client = mAudioRecordClients.valueAt(index);
+ client->active = false;
+
// finish the recording app op
finishRecording(client->opPackageName, client->uid);
AutoCallerClear acc;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 67b5e06..6b958a8 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -4325,9 +4325,9 @@
uint32_t numRequestProcessed = 0;
for (size_t i = 0; i < batchSize; i++) {
requests[i] = &mNextRequests.editItemAt(i).halRequest;
+ ATRACE_ASYNC_BEGIN("frame capture", mNextRequests[i].halRequest.frame_number);
}
- ATRACE_ASYNC_BEGIN("batch frame capture", mNextRequests[0].halRequest.frame_number);
res = mInterface->processBatchCaptureRequests(requests, &numRequestProcessed);
bool triggerRemoveFailed = false;
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 6d84a42..4b05395 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -481,6 +481,7 @@
static std::string allowedKeys[] =
{
+ "audiopolicy",
"audiorecord",
"audiotrack",
"codec",